aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/doc/src/notes.xml35
-rw-r--r--erts/emulator/beam/io.c34
-rw-r--r--erts/emulator/beam/register.c19
-rw-r--r--erts/emulator/sys/common/erl_mseg.c30
-rw-r--r--erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c46
-rw-r--r--erts/emulator/test/port_SUITE.erl20
-rw-r--r--erts/vsn.mk12
-rw-r--r--lib/inets/doc/src/http_client.xml29
-rw-r--r--lib/inets/doc/src/http_server.xml15
-rw-r--r--lib/inets/doc/src/httpc.xml3
-rw-r--r--lib/inets/doc/src/httpd_util.xml4
-rw-r--r--lib/inets/doc/src/inets.xml22
-rw-r--r--lib/inets/doc/src/mod_security.xml17
-rw-r--r--lib/inets/doc/src/notes.xml234
-rw-r--r--lib/inets/src/http_client/httpc.erl8
-rw-r--r--lib/inets/src/http_client/httpc_cookie.erl12
-rw-r--r--lib/inets/src/http_client/httpc_handler.erl68
-rw-r--r--lib/inets/src/http_client/httpc_manager.erl54
-rw-r--r--lib/inets/src/http_lib/http_util.erl106
-rw-r--r--lib/inets/src/http_server/httpd_conf.erl2
-rw-r--r--lib/inets/src/http_server/httpd_instance_sup.erl6
-rw-r--r--lib/inets/src/http_server/httpd_sup.erl10
-rw-r--r--lib/inets/src/http_server/httpd_util.erl21
-rw-r--r--lib/inets/src/http_server/mod_esi.erl26
-rw-r--r--lib/inets/src/inets_app/inets.appup.src82
-rw-r--r--lib/inets/src/inets_app/inets.erl20
-rw-r--r--lib/inets/test/ftp_macosx_x86_test.erl6
-rw-r--r--lib/inets/test/ftp_suite_lib.erl26
-rw-r--r--lib/inets/test/http_format_SUITE.erl16
-rw-r--r--lib/inets/test/httpc_SUITE.erl82
-rw-r--r--lib/inets/vsn.mk100
-rw-r--r--lib/kernel/doc/src/notes.xml47
-rw-r--r--lib/kernel/src/net_kernel.erl110
-rw-r--r--lib/kernel/src/os.erl59
-rw-r--r--lib/kernel/test/os_SUITE.erl46
-rw-r--r--lib/kernel/vsn.mk16
-rw-r--r--lib/megaco/doc/src/megaco.xml16
-rw-r--r--lib/megaco/doc/src/megaco_mib.xml12
-rw-r--r--lib/megaco/doc/src/megaco_run.xml8
-rw-r--r--lib/megaco/doc/src/megaco_user.xml11
-rw-r--r--lib/megaco/doc/src/notes.xml62
-rw-r--r--lib/megaco/doc/src/notes_history.xml10
-rw-r--r--lib/megaco/src/app/megaco.appup.src23
-rw-r--r--lib/megaco/src/app/megaco_internal.hrl26
-rw-r--r--lib/megaco/src/engine/megaco_config.erl427
-rw-r--r--lib/megaco/src/engine/megaco_messenger.erl239
-rw-r--r--lib/megaco/src/engine/megaco_monitor.erl33
-rw-r--r--lib/megaco/src/flex/Makefile.in2
-rw-r--r--lib/megaco/test/megaco_app_test.erl14
-rw-r--r--lib/megaco/test/megaco_config_test.erl362
-rw-r--r--lib/megaco/vsn.mk6
-rw-r--r--lib/mnesia/test/Makefile118
-rw-r--r--lib/mnesia/test/README107
-rw-r--r--lib/mnesia/test/mnesia.spec23
-rw-r--r--lib/mnesia/test/mnesia.spec.vxworks362
-rw-r--r--lib/mnesia/test/mnesia_SUITE.erl203
-rw-r--r--lib/mnesia/test/mnesia_atomicity_test.erl839
-rw-r--r--lib/mnesia/test/mnesia_config_backup.erl105
-rw-r--r--lib/mnesia/test/mnesia_config_event.erl74
-rw-r--r--lib/mnesia/test/mnesia_config_test.erl1466
-rw-r--r--lib/mnesia/test/mnesia_consistency_test.erl1612
-rw-r--r--lib/mnesia/test/mnesia_cost.erl222
-rw-r--r--lib/mnesia/test/mnesia_dbn_meters.erl242
-rw-r--r--lib/mnesia/test/mnesia_dirty_access_test.erl927
-rw-r--r--lib/mnesia/test/mnesia_durability_test.erl1470
-rw-r--r--lib/mnesia/test/mnesia_evil_backup.erl750
-rw-r--r--lib/mnesia/test/mnesia_evil_coverage_test.erl2167
-rw-r--r--lib/mnesia/test/mnesia_examples_test.erl160
-rw-r--r--lib/mnesia/test/mnesia_frag_test.erl875
-rw-r--r--lib/mnesia/test/mnesia_inconsistent_database_test.erl74
-rw-r--r--lib/mnesia/test/mnesia_install_test.erl342
-rw-r--r--lib/mnesia/test/mnesia_isolation_test.erl2419
-rw-r--r--lib/mnesia/test/mnesia_measure_test.erl203
-rw-r--r--lib/mnesia/test/mnesia_meter.erl465
-rw-r--r--lib/mnesia/test/mnesia_nice_coverage_test.erl227
-rw-r--r--lib/mnesia/test/mnesia_qlc_test.erl475
-rw-r--r--lib/mnesia/test/mnesia_recovery_test.erl1701
-rw-r--r--lib/mnesia/test/mnesia_registry_test.erl137
-rw-r--r--lib/mnesia/test/mnesia_schema_recovery_test.erl787
-rw-r--r--lib/mnesia/test/mnesia_test_lib.erl1058
-rw-r--r--lib/mnesia/test/mnesia_test_lib.hrl132
-rw-r--r--lib/mnesia/test/mnesia_tpcb.erl1268
-rw-r--r--lib/mnesia/test/mnesia_trans_access_test.erl1254
-rwxr-xr-xlib/mnesia/test/mt60
-rw-r--r--lib/mnesia/test/mt.erl262
-rw-r--r--lib/public_key/asn1/Makefile14
-rw-r--r--lib/public_key/asn1/OTP-PUB-KEY.set.asn1
-rw-r--r--lib/public_key/asn1/PKCS-3.asn121
-rw-r--r--lib/public_key/doc/src/notes.xml21
-rw-r--r--lib/public_key/src/pubkey_cert.erl35
-rw-r--r--lib/public_key/src/pubkey_crypto.erl59
-rw-r--r--lib/public_key/src/pubkey_pem.erl12
-rw-r--r--lib/public_key/src/public_key.appup.src26
-rw-r--r--lib/public_key/src/public_key.erl47
-rw-r--r--lib/public_key/vsn.mk7
-rw-r--r--lib/sasl/doc/src/notes.xml43
-rw-r--r--lib/sasl/src/overload.erl22
-rw-r--r--lib/sasl/src/rb.erl29
-rw-r--r--lib/sasl/src/release_handler.erl47
-rw-r--r--lib/sasl/src/release_handler_1.erl18
-rw-r--r--lib/sasl/src/si_sasl_supp.erl24
-rw-r--r--lib/sasl/vsn.mk2
-rw-r--r--lib/snmp/doc/src/notes.xml224
-rw-r--r--lib/snmp/doc/src/notes_history.xml14
-rw-r--r--lib/snmp/doc/src/snmp_app.xml6
-rw-r--r--lib/snmp/doc/src/snmp_config.xml4
-rw-r--r--lib/snmp/doc/src/snmpa.xml36
-rw-r--r--lib/snmp/doc/src/snmpa_mpd.xml42
-rw-r--r--lib/snmp/doc/src/snmpa_network_interface_filter.xml8
-rw-r--r--lib/snmp/doc/src/snmpc.xml152
-rw-r--r--lib/snmp/doc/src/snmpm.xml10
-rw-r--r--lib/snmp/src/agent/snmpa.erl23
-rw-r--r--lib/snmp/src/agent/snmpa_agent.erl230
-rw-r--r--lib/snmp/src/agent/snmpa_general_db.erl19
-rw-r--r--lib/snmp/src/agent/snmpa_internal.hrl12
-rw-r--r--lib/snmp/src/agent/snmpa_mib.erl12
-rw-r--r--lib/snmp/src/agent/snmpa_mpd.erl226
-rw-r--r--lib/snmp/src/agent/snmpa_net_if.erl11
-rw-r--r--lib/snmp/src/agent/snmpa_trap.erl95
-rw-r--r--lib/snmp/src/agent/snmpa_usm.erl82
-rw-r--r--lib/snmp/src/app/snmp.appup.src230
-rw-r--r--lib/snmp/src/compile/snmpc.erl187
-rw-r--r--lib/snmp/src/compile/snmpc_lib.erl13
-rw-r--r--lib/snmp/src/manager/snmpm_mpd.erl24
-rw-r--r--lib/snmp/src/manager/snmpm_net_if.erl43
-rw-r--r--lib/snmp/src/manager/snmpm_server.erl26
-rw-r--r--lib/snmp/src/misc/snmp_pdus.erl51
-rw-r--r--lib/snmp/src/misc/snmp_usm.erl10
-rw-r--r--lib/snmp/test/modules.mk14
-rw-r--r--lib/snmp/test/snmp_agent_test.erl49
-rw-r--r--lib/snmp/test/snmp_agent_test_lib.erl12
-rw-r--r--lib/snmp/test/snmp_compiler_test.erl70
-rw-r--r--lib/snmp/test/snmp_manager_config_test.erl90
-rw-r--r--lib/snmp/test/snmp_manager_test.erl29
-rw-r--r--lib/snmp/test/snmp_manager_user_test.erl43
-rw-r--r--lib/snmp/test/snmp_pdus_test.erl64
-rw-r--r--lib/snmp/test/snmp_test_data/OLD-SNMPEA-MIB.mib18
-rw-r--r--lib/snmp/test/snmp_test_data/OTP8574-MIB.mib77
-rw-r--r--lib/snmp/test/snmp_test_data/OTP8595-MIB.mib45
-rw-r--r--lib/snmp/test/snmp_test_lib.erl22
-rw-r--r--lib/snmp/test/snmp_test_mgr_misc.erl14
-rw-r--r--lib/snmp/vsn.mk181
-rw-r--r--lib/ssh/doc/src/notes.xml83
-rw-r--r--lib/ssh/doc/src/ssh.xml17
-rw-r--r--lib/ssh/examples/ssh_sample_cli.erl66
-rw-r--r--lib/ssh/src/ssh.appup.src20
-rw-r--r--lib/ssh/src/ssh.erl18
-rw-r--r--lib/ssh/src/ssh_acceptor.erl14
-rw-r--r--lib/ssh/src/ssh_channel.erl24
-rw-r--r--lib/ssh/src/ssh_cli.erl12
-rwxr-xr-xlib/ssh/src/ssh_connect.hrl13
-rw-r--r--lib/ssh/src/ssh_connection.erl32
-rw-r--r--lib/ssh/src/ssh_connection_controler.erl14
-rw-r--r--lib/ssh/src/ssh_connection_manager.erl29
-rw-r--r--lib/ssh/src/ssh_sftpd.erl19
-rw-r--r--lib/ssh/vsn.mk15
-rw-r--r--lib/ssl/doc/src/new_ssl.xml25
-rw-r--r--lib/ssl/doc/src/notes.xml76
-rw-r--r--lib/ssl/src/ssl.appup.src7
-rw-r--r--lib/ssl/src/ssl.erl76
-rw-r--r--lib/ssl/src/ssl_certificate.erl53
-rw-r--r--lib/ssl/src/ssl_certificate_db.erl2
-rw-r--r--lib/ssl/src/ssl_connection.erl853
-rw-r--r--lib/ssl/src/ssl_handshake.erl193
-rw-r--r--lib/ssl/src/ssl_handshake.hrl13
-rw-r--r--lib/ssl/src/ssl_internal.hrl14
-rw-r--r--lib/ssl/src/ssl_record.erl64
-rw-r--r--lib/ssl/src/ssl_record.hrl17
-rw-r--r--lib/ssl/src/ssl_ssl3.erl18
-rw-r--r--lib/ssl/src/ssl_tls1.erl20
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl936
-rw-r--r--lib/ssl/test/ssl_basic_SUITE_data/dHParam.pem5
-rw-r--r--lib/ssl/test/ssl_packet_SUITE.erl174
-rw-r--r--lib/ssl/test/ssl_test_lib.erl172
-rw-r--r--lib/ssl/test/ssl_to_openssl_SUITE.erl362
-rw-r--r--lib/ssl/vsn.mk13
176 files changed, 29166 insertions, 2529 deletions
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 65b836fc45..824ad6d94e 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -30,6 +30,41 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 5.7.5.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Driver threads, such as async threads, using <seealso
+ marker="erl_driver#ErlDrvPDL">port data locks</seealso>
+ peeked at the port status field without proper locking
+ when looking up the driver queue.</p>
+ <p>
+ Own Id: OTP-8475</p>
+ </item>
+ <item>
+ <p>
+ A call to the BIF <c>unregister(RegName)</c> when a port
+ had the name <c>RegName</c> registered in the runtime
+ system without SMP support caused a runtime system crash.
+ (Thanks to Per Hedeland for the bugfix and test case.)</p>
+ <p>
+ Own Id: OTP-8487</p>
+ </item>
+ <item>
+ <p>
+ Fix memory management bug causing crash of non-SMP
+ emulator with async threads enabled. The bug did first
+ appear in R13B03.</p>
+ <p>
+ Own Id: OTP-8591 Aux Id: seq11554 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.7.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index ad0b909b2a..3309b77086 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -77,15 +77,35 @@ static ERTS_INLINE ErlIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
int ix = (int) drvport;
+ Uint32 status;
+
if (ix < 0 || erts_max_ports <= ix)
return NULL;
- if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return NULL;
- ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
- || erts_lc_mtx_is_locked(&erts_port[ix].port_data_lock->mtx));
- ERTS_SMP_LC_ASSERT(erts_port[ix].port_data_lock
- || erts_lc_is_port_locked(&erts_port[ix]));
- return &erts_port[ix].ioq;
+
+ if (erts_get_scheduler_data()) {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
+ ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
+ || erts_lc_mtx_is_locked(
+ &erts_port[ix].port_data_lock->mtx));
+
+ status = erts_port[ix].status;
+ }
+ else {
+ erts_smp_port_state_lock(&erts_port[ix]);
+ status = erts_port[ix].status;
+ erts_smp_port_state_unlock(&erts_port[ix]);
+
+ ERTS_LC_ASSERT((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ || erts_port[ix].port_data_lock);
+ ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
+ || erts_lc_mtx_is_locked(
+ &erts_port[ix].port_data_lock->mtx));
+
+ }
+
+ return ((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ ? NULL
+ : &erts_port[ix].ioq);
}
static ERTS_INLINE int
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 7ba097382a..964c10a380 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -500,8 +500,8 @@ int erts_unregister_name(Process *c_p,
if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
if (rp->pt) {
-#ifdef ERTS_SMP
if (port != rp->pt) {
+#ifdef ERTS_SMP
if (port) {
ERTS_SMP_LC_ASSERT(port != c_prt);
erts_smp_port_unlock(port);
@@ -519,10 +519,13 @@ int erts_unregister_name(Process *c_p,
port = erts_id2port(id, NULL, 0);
goto restart;
}
+#endif
port = rp->pt;
}
-#endif
- ERTS_SMP_LC_ASSERT(rp->pt == port && erts_lc_is_port_locked(port));
+
+ ASSERT(rp->pt == port);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+
rp->pt->reg = NULL;
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index f4e21bc05f..5dfd66bd7c 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -287,13 +287,9 @@ check_schedule_cache_check(void)
static void
mseg_shutdown(void)
{
-#ifdef ERTS_SMP
erts_mtx_lock(&mseg_mutex);
-#endif
mseg_clear_cache();
-#ifdef ERTS_SMP
erts_mtx_unlock(&mseg_mutex);
-#endif
}
static ERTS_INLINE void *
@@ -375,8 +371,9 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
static ERTS_INLINE cache_desc_t *
alloc_cd(void)
-{
+{
cache_desc_t *cd = free_cache_descs;
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd)
free_cache_descs = cd->next;
return cd;
@@ -385,6 +382,7 @@ alloc_cd(void)
static ERTS_INLINE void
free_cd(cache_desc_t *cd)
{
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
cd->next = free_cache_descs;
free_cache_descs = cd;
}
@@ -393,6 +391,7 @@ free_cd(cache_desc_t *cd)
static ERTS_INLINE void
link_cd(cache_desc_t *cd)
{
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cache)
cache->prev = cd;
cd->next = cache;
@@ -410,6 +409,7 @@ link_cd(cache_desc_t *cd)
static ERTS_INLINE void
end_link_cd(cache_desc_t *cd)
{
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cache_end)
cache_end->next = cd;
cd->next = NULL;
@@ -427,7 +427,7 @@ end_link_cd(cache_desc_t *cd)
static ERTS_INLINE void
unlink_cd(cache_desc_t *cd)
{
-
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
if (cd->next)
cd->next->prev = cd->prev;
else
@@ -445,6 +445,7 @@ static ERTS_INLINE void
check_cache_limits(void)
{
cache_desc_t *cd;
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
max_cached_seg_size = 0;
min_cached_seg_size = ~((Uint) 0);
for (cd = cache; cd; cd = cd->next) {
@@ -463,7 +464,7 @@ adjust_cache_size(int force_check_limits)
int check_limits = force_check_limits;
Sint max_cached = ((Sint) segments.current.watermark
- (Sint) segments.current.no);
-
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
ASSERT(cache_end);
cd = cache_end;
@@ -487,9 +488,7 @@ adjust_cache_size(int force_check_limits)
static void
check_cache(void *unused)
{
-#ifdef ERTS_SMP
erts_mtx_lock(&mseg_mutex);
-#endif
is_cache_check_scheduled = 0;
@@ -502,10 +501,7 @@ check_cache(void *unused)
INC_CC(check_cache);
-#ifdef ERTS_SMP
erts_mtx_unlock(&mseg_mutex);
-#endif
-
}
static void
diff --git a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
index 2048d06123..c7a42aa687 100644
--- a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
@@ -1,19 +1,20 @@
-/* ``The contents of this file are subject to the Erlang Public License,
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
+ * retrieved online at http://www.erlang.org/.
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
+ *
+ * %CopyrightEnd%
*/
/*
@@ -77,6 +78,7 @@ typedef struct {
int ofd;
int outstanding_async_task;
long async_task;
+ ErlDrvPDL pdl;
#ifdef HAVE_POLL_H
struct erl_drv_event_data event_data;
#endif
@@ -144,6 +146,7 @@ start(ErlDrvPort port, char *command)
ddp->ofd = -1;
ddp->outstanding_async_task = 0;
ddp->async_task = -1;
+ ddp->pdl = driver_pdl_create(port);
#ifdef HAVE_POLL_H
ddp->event_data.events = (short) 0;
ddp->event_data.revents = (short) 0;
@@ -217,8 +220,9 @@ static int control(ErlDrvData drv_data,
res_str = "error: command not supported";
goto done;
}
-
+ driver_pdl_lock(ddp->pdl);
driver_enq(ddp->port, "!", 1);
+ driver_pdl_unlock(ddp->pdl);
ddp->test = (IOQExitTest) command;
res_str = "ok";
@@ -333,8 +337,11 @@ static void ready_input(ErlDrvData drv_data, ErlDrvEvent event)
ddp->ifd = -1;
if (ddp->test == IOQ_EXIT_READY_INPUT_ASYNC)
do_driver_async(ddp);
- else
+ else {
+ driver_pdl_lock(ddp->pdl);
driver_deq(ddp->port, 1);
+ driver_pdl_unlock(ddp->pdl);
+ }
}
#endif
}
@@ -352,8 +359,11 @@ static void ready_output(ErlDrvData drv_data, ErlDrvEvent event)
ddp->ofd = -1;
if (ddp->test == IOQ_EXIT_READY_OUTPUT_ASYNC)
do_driver_async(ddp);
- else
+ else {
+ driver_pdl_lock(ddp->pdl);
driver_deq(ddp->port, 1);
+ driver_pdl_unlock(ddp->pdl);
+ }
}
#endif
}
@@ -366,8 +376,11 @@ static void timeout(ErlDrvData drv_data)
if (ddp->test == IOQ_EXIT_TIMEOUT_ASYNC)
do_driver_async(ddp);
- else
+ else {
+ driver_pdl_lock(ddp->pdl);
driver_deq(ddp->port, 1);
+ driver_pdl_unlock(ddp->pdl);
+ }
}
static void ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data)
@@ -377,7 +390,9 @@ static void ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data)
PRINTF(("ready_async(%p, %p) called\r\n", drv_data, thread_data));
if (drv_data == (ErlDrvData) thread_data) {
+ driver_pdl_lock(ddp->pdl);
driver_deq(ddp->port, 1);
+ driver_pdl_unlock(ddp->pdl);
ddp->outstanding_async_task = 0;
}
}
@@ -397,8 +412,11 @@ static void event(ErlDrvData drv_data,
ddp->ofd = -1;
if (ddp->test == IOQ_EXIT_EVENT_ASYNC)
do_driver_async(ddp);
- else
+ else {
+ driver_pdl_lock(ddp->pdl);
driver_deq(ddp->port, 1);
+ driver_pdl_unlock(ddp->pdl);
+ }
}
#endif
}
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 9a09d20eab..b9100738e4 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -88,7 +88,8 @@
otp_3906/1, otp_4389/1, win_massive/1, win_massive_client/1,
mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1,
exit_status_multi_scheduling_block/1, ports/1,
- spawn_driver/1,spawn_executable/1]).
+ spawn_driver/1,spawn_executable/1,
+ unregister_name/1]).
-export([]).
@@ -112,7 +113,8 @@ all(suite) ->
otp_3906, otp_4389, win_massive, mix_up_ports,
otp_5112, otp_5119,
exit_status_multi_scheduling_block,
- ports, spawn_driver, spawn_executable
+ ports, spawn_driver, spawn_executable,
+ unregister_name
].
-define(DEFAULT_TIMEOUT, ?t:minutes(5)).
@@ -1434,6 +1436,10 @@ spawn_executable(Config) when is_list(Config) ->
?line test_server:timetrap_cancel(Dog),
ok.
+unregister_name(Config) when is_list(Config) ->
+ ?line true = register(crash, open_port({spawn, "sleep 100"}, [])),
+ ?line true = unregister(crash).
+
test_bat_file(Dir) ->
FN = "tf.bat",
Full = filename:join([Dir,FN]),
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 8f940339df..14c6a3e4dd 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -1,23 +1,23 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
-VSN = 5.7.5
+VSN = 5.7.5.1
SYSTEM_VSN = R13B04
# Port number 4365 in 4.2
diff --git a/lib/inets/doc/src/http_client.xml b/lib/inets/doc/src/http_client.xml
index 510c30eb35..ea8053cafa 100644
--- a/lib/inets/doc/src/http_client.xml
+++ b/lib/inets/doc/src/http_client.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -56,8 +56,8 @@
<pre>
[{inets, [{services, [{httpc, PropertyList}]}]}]
</pre>
- <p>For valid properties see <seealso
- marker="http">http(3)</seealso></p>
+ <p>For valid properties see
+ <seealso marker="http">httpc(3)</seealso>. </p>
</section>
<section>
@@ -71,67 +71,66 @@
but not for requests to localhost. This will apply to all subsequent
requests</p>
<code type="erl">
- 2 > http:set_options([{proxy, {{"www-proxy.mycompany.com", 8000},
+ 2 > httpc:set_options([{proxy, {{"www-proxy.mycompany.com", 8000},
["localhost"]}}]).
ok
</code>
<p>An ordinary synchronous request. </p>
<code type="erl">
3 > {ok, {{Version, 200, ReasonPhrase}, Headers, Body}} =
- http:request(get, {"http://www.erlang.org", []}, [], []).
+ httpc:request(get, {"http://www.erlang.org", []}, [], []).
</code>
<p>With all default values, as above, a get request can also be written
like this.</p>
<code type="erl">
4 > {ok, {{Version, 200, ReasonPhrase}, Headers, Body}} =
- http:request("http://www.erlang.org").
+ httpc:request("http://www.erlang.org").
</code>
<p>An ordinary asynchronous request. The result will be sent
to the calling process on the form {http, {ReqestId, Result}}</p>
<code type="erl">
5 > {ok, RequestId} =
- http:request(get, {"http://www.erlang.org", []}, [], [{sync, false}]).
+ httpc:request(get, {"http://www.erlang.org", []}, [], [{sync, false}]).
</code>
<p>In this case the calling process is the shell, so we receive the
result.</p>
<code type="erl">
- 6 > receive {http, {RequestId, Result}} -> ok after 500 -> error end.
+ 6 > receive {http, {RequestId, Result}} -> ok after 500 -> error end.
ok
</code>
<p>Send a request with a specified connection header. </p>
<code type="erl">
7 > {ok, {{NewVersion, 200, NewReasonPhrase}, NewHeaders, NewBody}} =
- http:request(get, {"http://www.erlang.org", [{"connection", "close"}]},
+ httpc:request(get, {"http://www.erlang.org", [{"connection", "close"}]},
[], []).
</code>
<p>Start a HTTP client profile. </p>
<code><![CDATA[
- 8 > {ok, Pid} = inets:start(httpc, [{profile, foo}]).
+ 8 > {ok, Pid} = inets:start(httpc, [{profile, foo}]).
{ok, <0.45.0>}
]]></code>
<p>The new profile has no proxy settings so the connection will
be refused</p>
<code type="erl">
- 9 > http:request("http://www.erlang.org", foo).
- {error,econnrefused}
+ 9 > httpc:request("http://www.erlang.org", foo).
+ {error, econnrefused}
</code>
<p>Stop a HTTP client profile. </p>
<code type="erl">
- 10 > inets:stop(httpc, foo).
+ 10 > inets:stop(httpc, foo).
ok
</code>
<p>Alternatively:</p>
<code type="erl">
- 10 > inets:stop(httpc, Pid).
+ 10 > inets:stop(httpc, Pid).
ok
</code>
-
</section>
</chapter>
diff --git a/lib/inets/doc/src/http_server.xml b/lib/inets/doc/src/http_server.xml
index 547617e2e3..68dfd1add0 100644
--- a/lib/inets/doc/src/http_server.xml
+++ b/lib/inets/doc/src/http_server.xml
@@ -30,6 +30,8 @@
<date></date>
<rev></rev>
<file>http_server.xml</file>
+
+ <marker id="intro"></marker>
</header>
<section>
@@ -65,6 +67,8 @@
Server API. This API can be used to advantage by all who wants
to enhance the server core functionality, for example custom
logging and authentication.</p>
+
+ <marker id="config"></marker>
</section>
<section>
@@ -109,6 +113,8 @@
functions or only exported functions on chosen modules.</p>
<p>{accept_timeout, integer()} sets the wanted timeout value for
the server to set up a request connection.</p>
+
+ <marker id="using_http_server_api"></marker>
</section>
<section>
@@ -173,6 +179,7 @@
the ip address reported by the info function and can
not be the hostname that is allowed when inputting bind_address.</p>
+ <marker id="htaccess"></marker>
</section>
<section>
@@ -337,6 +344,8 @@ UserName:Password
</item>
</list>
</section>
+
+ <marker id="dynamic_we_pages"></marker>
</section>
<section>
@@ -357,7 +366,7 @@ UserName:Password
<p>The mod_cgi module makes it possible to execute CGI scripts
in the server. A file that matches the definition of a
ScriptAlias config directive is treated as a CGI script. A CGI
- script is executed by the server and it's output is returned to
+ script is executed by the server and its output is returned to
the client. </p>
<p>The CGI Script response comprises a message-header and a
message-body, separated by a blank line. The message-header
@@ -434,6 +443,8 @@ http://your.server.org/eval?httpd_example:print(atom_to_list(apply(erlang,halt,[
</note>
</section>
</section>
+
+ <marker id="logging"></marker>
</section>
<section>
@@ -467,6 +478,8 @@ http://your.server.org/eval?httpd_example:print(atom_to_list(apply(erlang,halt,[
</p>
<p><em>[date]</em> access to <em>path</em> failed for
<em>remotehost</em>, reason: <em>reason</em></p>
+
+ <marker id="ssi"></marker>
</section>
<section>
diff --git a/lib/inets/doc/src/httpc.xml b/lib/inets/doc/src/httpc.xml
index ca93190f61..7430a62b1b 100644
--- a/lib/inets/doc/src/httpc.xml
+++ b/lib/inets/doc/src/httpc.xml
@@ -191,7 +191,8 @@ ssl_options() = {verify, code()} |
<v>Result = {status_line(), headers(), body()} |
{status_code(), body()} | request_id() </v>
<v>Profile = profile() </v>
- <v>Reason = term() </v>
+ <v>Reason = {connect_failed, term()} |
+ {send_failed, term()} | term() </v>
</type>
<desc>
diff --git a/lib/inets/doc/src/httpd_util.xml b/lib/inets/doc/src/httpd_util.xml
index 642e5213b0..6ac2b13c72 100644
--- a/lib/inets/doc/src/httpd_util.xml
+++ b/lib/inets/doc/src/httpd_util.xml
@@ -63,7 +63,7 @@
<v>Etag = string()</v>
</type>
<desc>
- <p><c>create_etag/1</c> calculates the Etag for a file, from it's
+ <p><c>create_etag/1</c> calculates the Etag for a file, from its
size and time for last modification. fileinfo is a record defined
in <c>kernel/include/file.hrl</c></p>
@@ -78,7 +78,7 @@
<v>HexValue = DecValue = string()</v>
</type>
<desc>
- <p>Converts the hexadecimal value <c>HexValue</c> into it's
+ <p>Converts the hexadecimal value <c>HexValue</c> into its
decimal equivalent (<c>DecValue</c>).</p>
<marker id="day"></marker>
diff --git a/lib/inets/doc/src/inets.xml b/lib/inets/doc/src/inets.xml
index 81dfe7e944..c367d7fa77 100644
--- a/lib/inets/doc/src/inets.xml
+++ b/lib/inets/doc/src/inets.xml
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>inets</title>
@@ -40,13 +40,13 @@
<title>COMMON DATA TYPES </title>
<p>Type definitions that are used more than once in
this module: </p>
- <p><c> service() = ftpc | tfptd | httpc | httpd</c></p>
+ <p><c> service() = ftpc | tftp | httpc | httpd</c></p>
<p><c> property() = atom() </c></p>
</section>
<funcs>
<func>
<name>services() -> [{Service, Pid}]</name>
- <fsummary>Returns a list of currently running services. </fsummary>
+ <fsummary>Returns a list of currently running services. </fsummary>
<type>
<v>Service = service()</v>
<v>Pid = pid()</v>
@@ -97,7 +97,7 @@
<name>start(Type) -> ok | {error, Reason}</name>
<fsummary>Starts the Inets application. </fsummary>
<type>
- <v>Type = permanent | transient | temporary</v>
+ <v>Type = permanent | transient | temporary</v>
</type>
<desc>
<p>Starts the Inets application. Default type
@@ -115,11 +115,9 @@
</func>
<func>
<name>start(Service, ServiceConfig) -> {ok, Pid} | {error, Reason}</name>
- <name>start(Service, ServiceConfig, How) -> {ok, Pid} |
- {error, Reason}</name>
+ <name>start(Service, ServiceConfig, How) -> {ok, Pid} | {error, Reason}</name>
<fsummary>Dynamically starts an inets
- service after the inets application has been
- started. </fsummary>
+ service after the inets application has been started. </fsummary>
<type>
<v>Service = service()</v>
<v>ServiceConfig = [{Option, Value}]</v>
@@ -153,9 +151,9 @@
<fsummary>Stops a started service of the inets application or takes
down a "stand_alone-service" gracefully.</fsummary>
<type>
- <v>Service = service() | stand_alone</v>
+ <v>Service = service() | stand_alone</v>
<v>Reference = pid() | term() - service specified reference</v>
- <v>Reason = term()</v>
+ <v>Reason = term()</v>
</type>
<desc>
<p>Stops a started service of the inets application or takes
@@ -169,7 +167,7 @@
<section>
<title>SEE ALSO</title>
<p><seealso marker="ftp">ftp(3)</seealso>,
- <seealso marker="http">http(3)</seealso>,
+ <seealso marker="httpc">httpc(3)</seealso>,
<seealso marker="httpd">httpd(3)</seealso>,
<seealso marker="tftp">tftp(3)</seealso></p>
</section>
diff --git a/lib/inets/doc/src/mod_security.xml b/lib/inets/doc/src/mod_security.xml
index 5f9f88071e..2a871d29d8 100644
--- a/lib/inets/doc/src/mod_security.xml
+++ b/lib/inets/doc/src/mod_security.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1998</year><year>2009</year>
+ <year>1998</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>mod_security</title>
@@ -117,11 +117,14 @@
<section>
<marker id="callback_module"></marker>
<title>The SecurityCallbackModule</title>
- <p>The SecurityCallbackModule is a user written module that can receive events from
- the mod_security Erlang Webserver API module. This module only exports one function,
- <seealso marker="#callback_module_event">event/4</seealso>, which is described below.
+ <p>The SecurityCallbackModule is a user written module that can receive
+ events from the mod_security Erlang Webserver API module.
+ This module only exports the function(s),
+ <seealso marker="#callback_module_event">event/4,5</seealso>,
+ which are described below.
</p>
</section>
+
<funcs>
<func>
<name>event(What, Port, Dir, Data) -> ignored</name>
@@ -131,7 +134,7 @@
<v>What = atom()</v>
<v>Port = integer()</v>
<v>Address = {A,B,C,D} | string() &lt;v>Dir = string()</v>
- <v>What = [Info]</v>
+ <v>Data = [Info]</v>
<v>Info = {Name, Value}</v>
</type>
<desc>
diff --git a/lib/inets/doc/src/notes.xml b/lib/inets/doc/src/notes.xml
index 0c524f00d1..9ab35ff38b 100644
--- a/lib/inets/doc/src/notes.xml
+++ b/lib/inets/doc/src/notes.xml
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>Inets Release Notes</title>
@@ -32,6 +32,236 @@
<file>notes.xml</file>
</header>
+ <section><title>Inets 5.3.4</title>
+
+ <section><title>Improvements and New Features</title>
+ <p>-</p>
+
+<!--
+ <list>
+ <item>
+ <p>[httpc] - Allow users to pass socket options to the transport
+ module when making requests. </p>
+ <p>See the <c>socket_opts</c> option in the
+ <seealso marker="httpc#request2">request/4</seealso> or
+ <seealso marker="httpc#set_options">set_options/1,2</seealso>
+ for more info, </p>
+ <p>Own Id: OTP-8352</p>
+ </item>
+
+ </list>
+-->
+ </section>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+
+<!--
+ <p>-</p>
+-->
+
+ <list>
+ <item>
+ <p>[httpc] - If a request times out (not during connect), the
+ handler process exited (normal) but neglected to inform
+ the manager process. For this reason, the manager
+ did not clean up the request table, resulting in a
+ memory leak. Also the manager did not create a monitor
+ for the handler, so in an unforseen handler crash, this
+ could also create a memory leak. </p>
+ <p>Own Id: OTP-8739</p>
+ </item>
+
+ <item>
+ <p>[tftp] - Was spelled wrong in documentation and in some
+ parts of the code. It should be tftp. </p>
+ <p>Own Id: OTP-8741</p>
+ </item>
+
+ <item>
+ <p>[htpc] - Replaced the old http client api module (http)
+ with the new, http client in the
+ <seealso marker="http_client">Users Guide</seealso>. </p>
+ <p>Own Id: OTP-8742</p>
+ <p>Ryan Zezeski</p>
+ </item>
+
+ </list>
+ </section>
+
+ </section> <!-- 5.3.4 -->
+
+
+ <section><title>Inets 5.3.3</title>
+
+ <section><title>Improvements and New Features</title>
+ <p>-</p>
+
+<!--
+ <list>
+ <item>
+ <p>[httpc] - Allow users to pass socket options to the transport
+ module when making requests. </p>
+ <p>See the <c>socket_opts</c> option in the
+ <seealso marker="httpc#request2">request/4</seealso> or
+ <seealso marker="httpc#set_options">set_options/1,2</seealso>
+ for more info, </p>
+ <p>Own Id: OTP-8352</p>
+ </item>
+
+ </list>
+-->
+ </section>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+
+<!--
+ <p>-</p>
+-->
+
+ <list>
+ <item>
+ <p>[httpc] - Made cookie handling more case insensitive.</p>
+ <p>Own Id: OTP-8609</p>
+ <p>Nicolas Thauvin</p>
+ </item>
+
+ <item>
+ <p>[httpc|httpd] - Netscape cookie dates can also be given with a
+ 2-digit year (e.g. 06 = 2006). </p>
+ <p>Own Id: OTP-8610</p>
+ <p>Nicolas Thauvin</p>
+ </item>
+
+ <item>
+ <p>[httpd] - Added support (again) for the documented debugging
+ features. See the User's Guide
+ <seealso marker="http_server#config">Configuration</seealso>
+ chapter for more info. </p>
+ <p>Own Id: OTP-8624</p>
+ </item>
+
+ </list>
+ </section>
+
+ </section> <!-- 5.3.3 -->
+
+
+ <section><title>Inets 5.3.2</title>
+
+ <section><title>Improvements and New Features</title>
+ <p>-</p>
+
+<!--
+ <list>
+ <item>
+ <p>[httpc] - Allow users to pass socket options to the transport
+ module when making requests. </p>
+ <p>See the <c>socket_opts</c> option in the
+ <seealso marker="httpc#request2">request/4</seealso> or
+ <seealso marker="httpc#set_options">set_options/1,2</seealso>
+ for more info, </p>
+ <p>Own Id: OTP-8352</p>
+ </item>
+
+ </list>
+-->
+ </section>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+
+<!--
+ <p>-</p>
+-->
+
+ <list>
+ <item>
+ <p>[httpc] - Memory leak plugged.
+ The profile manager never cleaned up in its handler database.
+ This meant that with each new request handler, another entry
+ was created that was never deleted. Eventually the request
+ id counter (used as a key) would wrap, but the machine would
+ most likely run out of memory before that happened.</p>
+ <p>Own Id: OTP-8542</p>
+ <p>Lev Walkin</p>
+ </item>
+
+ <item>
+ <p>[httpc] - https requests with default port (443) not handled
+ properly. </p>
+ <p>Own Id: OTP-8607</p>
+ <p>jebu ittiachen</p>
+ </item>
+
+ </list>
+ </section>
+
+ </section> <!-- 5.3.2 -->
+
+
+ <section><title>Inets 5.3.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <p>-</p>
+
+<!--
+ <list>
+ <item>
+ <p>[httpc] - Allow users to pass socket options to the transport
+ module when making requests. </p>
+ <p>See the <c>socket_opts</c> option in the
+ <seealso marker="httpc#request2">request/4</seealso> or
+ <seealso marker="httpc#set_options">set_options/1,2</seealso>
+ for more info, </p>
+ <p>Own Id: OTP-8352</p>
+ </item>
+
+ </list>
+-->
+ </section>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+
+<!--
+ <p>-</p>
+-->
+
+ <list>
+ <item>
+ <p>[httpc] - Badly formated error reason for errors occuring
+ during initial connect to a server.
+ Also, the possible error reasons was
+ not properly documented.</p>
+ <p>Own Id: OTP-8508</p>
+ <p>Aux Id: seq11407</p>
+ </item>
+
+ <item>
+ <p>[httpd] - Issues with ESI erl_script_timeout. </p>
+ <p>
+ <list type="bulleted">
+ <item>
+ <p>The <c>erl_script_timeout</c> config option is ducumented
+ as a number of seconds. But when parsing the config, in the
+ new format (not a config file), it was handled as if in
+ number of milliseconds. </p>
+ </item>
+ <item>
+ <p>When the erl-script-timeout time was exceeded, the server
+ incorrectly marked the answer as sent, thereby leaving
+ client hanging (with an incomplete answer).
+ This has been changed, so that now the socket will be
+ closed. </p>
+ </item>
+ </list>
+ </p>
+ <p>Own Id: OTP-8509</p>
+ </item>
+ </list>
+ </section>
+
+ </section> <!-- 5.3.1 -->
+
+
<section><title>Inets 5.3</title>
<section><title>Improvements and New Features</title>
diff --git a/lib/inets/src/http_client/httpc.erl b/lib/inets/src/http_client/httpc.erl
index 5205605e0a..6deeab6948 100644
--- a/lib/inets/src/http_client/httpc.erl
+++ b/lib/inets/src/http_client/httpc.erl
@@ -432,7 +432,7 @@ handle_request(Method, Url,
Options = request_options(Options0),
Sync = proplists:get_value(sync, Options),
Stream = proplists:get_value(stream, Options),
- Host2 = header_host(Host, Port),
+ Host2 = header_host(Scheme, Host, Port),
HeadersRecord = header_record(NewHeaders, Host2, HTTPOptions),
Receiver = proplists:get_value(receiver, Options),
SocketOpts = proplists:get_value(socket_opts, Options),
@@ -895,9 +895,11 @@ bad_option(Option, BadValue) ->
throw({error, {bad_option, Option, BadValue}}).
-header_host(Host, 80 = _Port) ->
+header_host(https, Host, 443 = _Port) ->
Host;
-header_host(Host, Port) ->
+header_host(http, Host, 80 = _Port) ->
+ Host;
+header_host(_Scheme, Host, Port) ->
Host ++ ":" ++ integer_to_list(Port).
diff --git a/lib/inets/src/http_client/httpc_cookie.erl b/lib/inets/src/http_client/httpc_cookie.erl
index 586701b4a1..4d61f82b5a 100644
--- a/lib/inets/src/http_client/httpc_cookie.erl
+++ b/lib/inets/src/http_client/httpc_cookie.erl
@@ -476,13 +476,13 @@ path_sort(Cookies)->
lists:reverse(lists:keysort(#http_cookie.path, Cookies)).
-%% Informally, the Set-Cookie response header comprises the token
-%% Set-Cookie:, followed by a comma-separated list of one or more
-%% cookies. Netscape cookies expires attribute may also have a
-%% , in this case the header list will have been incorrectly split
-%% in parse_set_cookies/2 this functions fixs that problem.
+%% Informally, the Set-Cookie response header comprises the token
+%% Set-Cookie:, followed by a comma-separated list of one or more
+%% cookies. Netscape cookies expires attribute may also have a,
+%% in this case the header list will have been incorrectly split
+%% in parse_set_cookies/2 this functions fix that problem.
fix_netscape_cookie([Cookie1, Cookie2 | Rest], Acc) ->
- case inets_regexp:match(Cookie1, "expires=") of
+ case inets_regexp:match(string:to_lower(Cookie1), "expires=") of
{_, _, _} ->
fix_netscape_cookie(Rest, [Cookie1 ++ Cookie2 | Acc]);
nomatch ->
diff --git a/lib/inets/src/http_client/httpc_handler.erl b/lib/inets/src/http_client/httpc_handler.erl
index 31585537d4..db5ff3036a 100644
--- a/lib/inets/src/http_client/httpc_handler.erl
+++ b/lib/inets/src/http_client/httpc_handler.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2002-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -280,7 +280,7 @@ handle_call({connect_and_send, #request{address = Address0,
{ok, NewState} ->
{reply, ok, NewState};
{stop, Error, NewState} ->
- {stop, Error, Error, NewState}
+ {stop, normal, Error, NewState}
end
end;
@@ -607,24 +607,29 @@ handle_info({ssl_error, _, _} = Reason, State) ->
%% Internally, to a request handling process, a request timeout is
%% seen as a canceled request.
handle_info({timeout, RequestId},
- #state{request = #request{id = RequestId} = Request,
- canceled = Canceled} = State) ->
+ #state{request = #request{id = RequestId} = Request,
+ canceled = Canceled,
+ profile_name = ProfileName} = State) ->
?hcri("timeout of current request", [{id, RequestId}]),
httpc_response:send(Request#request.from,
httpc_response:error(Request, timeout)),
+ httpc_manager:request_done(RequestId, ProfileName),
?hcrv("response (timeout) sent - now terminate", []),
{stop, normal,
State#state{request = Request#request{from = answer_sent},
canceled = [RequestId | Canceled]}};
-handle_info({timeout, RequestId}, #state{canceled = Canceled} = State) ->
+handle_info({timeout, RequestId},
+ #state{canceled = Canceled,
+ profile_name = ProfileName} = State) ->
?hcri("timeout", [{id, RequestId}]),
Filter =
fun(#request{id = Id, from = From} = Request) when Id =:= RequestId ->
?hcrv("found request", [{id, Id}, {from, From}]),
%% Notify the owner
- Response = httpc_response:error(Request, timeout),
- httpc_response:send(From, Response),
+ httpc_response:send(From,
+ httpc_response:error(Request, timeout)),
+ httpc_manager:request_done(RequestId, ProfileName),
?hcrv("response (timeout) sent", []),
[Request#request{from = answer_sent}];
(_) ->
@@ -675,6 +680,24 @@ handle_info({'EXIT', _, _}, State) ->
%%--------------------------------------------------------------------
%% Init error there is no socket to be closed.
+terminate(normal,
+ #state{request = Request,
+ session = {send_failed, AReason} = Reason} = State) ->
+ ?hcrd("terminate", [{send_reason, AReason}, {request, Request}]),
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
+terminate(normal,
+ #state{request = Request,
+ session = {connect_failed, AReason} = Reason} = State) ->
+ ?hcrd("terminate", [{connect_reason, AReason}, {request, Request}]),
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
terminate(normal, #state{session = undefined}) ->
ok;
@@ -886,18 +909,17 @@ connect_and_send_first_request(Address,
NewState = activate_request_timeout(TmpState),
{ok, NewState};
- {error, Reason} ->
+ {error, Reason} = Error ->
?hcrv("failed sending request", [{reason, Reason}]),
- Error = {error, {send_failed,
- httpc_response:error(Request, Reason)}},
- {stop, Error, State#state{request = Request}}
+ {stop, Error,
+ State#state{session = {send_failed, Reason},
+ request = Request}}
end;
- {error, Reason} ->
+ {error, Reason} = Error ->
?hcri("connect failed", [{reason, Reason}]),
- Error = {error, {connect_failed,
- httpc_response:error(Request, Reason)}},
- {stop, Error, State#state{request = Request}}
+ {stop, Error, State#state{session = {connect_failed, Reason},
+ request = Request}}
end.
@@ -1407,14 +1429,16 @@ try_to_enable_pipeline_or_keep_alive(
State#state{status = close}
end.
-answer_request(Request, Msg, #state{timers = Timers} = State) ->
+answer_request(#request{id = RequestId, from = From} = Request, Msg,
+ #state{timers = Timers, profile_name = ProfileName} = State) ->
?hcrt("answer request", [{request, Request}]),
- httpc_response:send(Request#request.from, Msg),
+ httpc_response:send(From, Msg),
RequestTimers = Timers#timers.request_timers,
TimerRef =
- proplists:get_value(Request#request.id, RequestTimers, undefined),
- Timer = {Request#request.id, TimerRef},
+ proplists:get_value(RequestId, RequestTimers, undefined),
+ Timer = {RequestId, TimerRef},
cancel_timer(TimerRef, {timeout, Request#request.id}),
+ httpc_manager:request_done(RequestId, ProfileName),
State#state{request = Request#request{from = answer_sent},
timers =
Timers#timers{request_timers =
diff --git a/lib/inets/src/http_client/httpc_manager.erl b/lib/inets/src/http_client/httpc_manager.erl
index f3cd81f4a7..4738517210 100644
--- a/lib/inets/src/http_client/httpc_manager.erl
+++ b/lib/inets/src/http_client/httpc_manager.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2002-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -30,6 +30,7 @@
request/2,
cancel_request/2,
request_canceled/2,
+ request_done/2,
retry_request/2,
redirect_request/2,
insert_session/2,
@@ -65,15 +66,6 @@
state % State of the handler: initiating | started | operational | canceled
}).
-%% Entries in the handler / request cross-ref table
-%% -record(request_info,
-%% {
-%% id, % Id of the request
-%% handler, % Pid of the handler process
-%% from, % The From value for the caller
-%% mref % Monitor ref for the caller
-%% }).
-
%%====================================================================
%% Internal Application API
@@ -171,6 +163,18 @@ request_canceled(RequestId, ProfileName) ->
%%--------------------------------------------------------------------
+%% Function: request_done(RequestId, ProfileName) -> ok
+%% RequestId - ref()
+%% ProfileName = atom()
+%%
+%% Description: Inform tha manager that a request has been completed.
+%%--------------------------------------------------------------------
+
+request_done(RequestId, ProfileName) ->
+ cast(ProfileName, {request_done, RequestId}).
+
+
+%%--------------------------------------------------------------------
%% Function: insert_session(Session, ProfileName) -> _
%% Session - #tcp_session{}
%% ProfileName - atom()
@@ -486,6 +490,11 @@ handle_cast({request_canceled, RequestId}, State) ->
{noreply, State}
end;
+handle_cast({request_done, RequestId}, State) ->
+ ?hcrv("request done", [{request_id, RequestId}]),
+ ets:delete(State#state.handler_db, RequestId),
+ {noreply, State};
+
handle_cast({set_options, Options}, State = #state{options = OldOptions}) ->
?hcrv("set options", [{options, Options}, {old_options, OldOptions}]),
NewOptions =
@@ -559,7 +568,9 @@ handle_info({'EXIT', Pid, Reason}, #state{handler_db = HandlerDb} = State) ->
handle_info({'DOWN', _, _, Pid, _}, State) ->
%%
- %% Check what happens to waiting requests! Chall we not send a reply?
+ %% Normally this should have been cleaned up already
+ %% (when receiving {request_done, PequestId}), but
+ %% just in case there is a glitch, cleanup anyway.
%%
Pattern = #handler_info{handler = Pid, _ = '_'},
@@ -631,7 +642,16 @@ get_handler_info(Tab) ->
Acc
end,
Handlers2 = lists:foldl(F, [], Handlers1),
- Handlers3 = [{Pid, State, httpc_handler:info(Pid)} ||
+ Handlers3 = [{Pid, State,
+ case (catch httpc_handler:info(Pid)) of
+ {'EXIT', _} ->
+ %% Why would this crash?
+ %% Only if the process has died, but we don't
+ %% know about it?
+ [];
+ Else ->
+ Else
+ end} ||
{Pid, State} <- Handlers2],
Handlers3.
@@ -648,6 +668,10 @@ handle_started(StarterPid, ReqId, HandlerPid,
case ets:lookup(HandlerDb, ReqId) of
[#handler_info{state = initiating} = HandlerInfo] ->
?hcri("received started ack for initiating handler", []),
+ %% As a last resort, make sure we know when it exits,
+ %% in case it forgets to notify us.
+ %% We dont need to know the ref id?
+ erlang:monitor(process, HandlerPid),
HandlerInfo2 = HandlerInfo#handler_info{handler = HandlerPid,
state = started},
ets:insert(HandlerDb, HandlerInfo2),
diff --git a/lib/inets/src/http_lib/http_util.erl b/lib/inets/src/http_lib/http_util.erl
index ddb58c7116..4f1147176c 100644
--- a/lib/inets/src/http_lib/http_util.erl
+++ b/lib/inets/src/http_lib/http_util.erl
@@ -38,13 +38,79 @@ to_upper(Str) ->
to_lower(Str) ->
string:to_lower(Str).
-convert_netscapecookie_date([_D,_A,_Y, $,, _SP,
- D1,D2,_DA,
- M,O,N,_DA,
- Y1,Y2,Y3,Y4,_SP,
- H1,H2,_Col,
- M1,M2,_Col,
+%% Example: Mon, 09-Dec-2002 13:46:00 GMT
+convert_netscapecookie_date([_D,_A,_Y, $,, $ ,
+ D1,D2, $-,
+ M,O,N, $-,
+ Y1,Y2,Y3,Y4, $ ,
+ H1,H2, $:,
+ M1,M2, $:,
+ S1,S2|_Rest]) ->
+ Year = list_to_integer([Y1,Y2,Y3,Y4]),
+ Day = list_to_integer([D1,D2]),
+ Month = convert_month([M,O,N]),
+ Hour = list_to_integer([H1,H2]),
+ Min = list_to_integer([M1,M2]),
+ Sec = list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}};
+
+convert_netscapecookie_date([_D,_A,_Y, $,, $ ,
+ D1,D2, $-,
+ M,O,N, $-,
+ Y3,Y4, $ ,
+ H1,H2, $:,
+ M1,M2, $:,
+ S1,S2|_Rest]) ->
+ {CurrentYear, _, _} = date(),
+ [Y1,Y2|_] = integer_to_list(CurrentYear),
+ Year = list_to_integer([Y1,Y2,Y3,Y4]),
+ Day = list_to_integer([D1,D2]),
+ Month = convert_month([M,O,N]),
+ Hour = list_to_integer([H1,H2]),
+ Min = list_to_integer([M1,M2]),
+ Sec = list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}};
+
+convert_netscapecookie_date([_D,_A,_Y, $ ,
+ D1,D2, $-,
+ M,O,N, $-,
+ Y1,Y2,Y3,Y4, $ ,
+ H1,H2, $:,
+ M1,M2, $:,
S1,S2|_Rest]) ->
+ Year = list_to_integer([Y1,Y2,Y3,Y4]),
+ Day = list_to_integer([D1,D2]),
+ Month = convert_month([M,O,N]),
+ Hour = list_to_integer([H1,H2]),
+ Min = list_to_integer([M1,M2]),
+ Sec = list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}};
+
+convert_netscapecookie_date([_D,_A,_Y, $ ,
+ D1,D2, $-,
+ M,O,N, $-,
+ Y3,Y4, $ ,
+ H1,H2, $:,
+ M1,M2, $:,
+ S1,S2|_Rest]) ->
+ {CurrentYear, _, _} = date(),
+ [Y1,Y2|_] = integer_to_list(CurrentYear),
+ Year = list_to_integer([Y1,Y2,Y3,Y4]),
+ Day = list_to_integer([D1,D2]),
+ Month = convert_month([M,O,N]),
+ Hour = list_to_integer([H1,H2]),
+ Min = list_to_integer([M1,M2]),
+ Sec = list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}};
+
+%% Sloppy...
+convert_netscapecookie_date([_D,_A,_Y, $,, _SP,
+ D1,D2,_DA,
+ M,O,N,_DA,
+ Y1,Y2,Y3,Y4,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2|_Rest]) ->
Year=list_to_integer([Y1,Y2,Y3,Y4]),
Day=list_to_integer([D1,D2]),
Month=convert_month([M,O,N]),
@@ -54,12 +120,12 @@ convert_netscapecookie_date([_D,_A,_Y, $,, _SP,
{{Year,Month,Day},{Hour,Min,Sec}};
convert_netscapecookie_date([_D,_A,_Y, _SP,
- D1,D2,_DA,
- M,O,N,_DA,
- Y1,Y2,Y3,Y4,_SP,
- H1,H2,_Col,
- M1,M2,_Col,
- S1,S2|_Rest]) ->
+ D1,D2,_DA,
+ M,O,N,_DA,
+ Y1,Y2,Y3,Y4,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2|_Rest]) ->
Year=list_to_integer([Y1,Y2,Y3,Y4]),
Day=list_to_integer([D1,D2]),
Month=convert_month([M,O,N]),
@@ -68,17 +134,17 @@ convert_netscapecookie_date([_D,_A,_Y, _SP,
Sec=list_to_integer([S1,S2]),
{{Year,Month,Day},{Hour,Min,Sec}}.
-hexlist_to_integer([])->
+hexlist_to_integer([]) ->
empty;
%%When the string only contains one value its eaasy done.
%% 0-9
-hexlist_to_integer([Size]) when Size >= 48 , Size =< 57 ->
+hexlist_to_integer([Size]) when (Size >= 48) andalso (Size =< 57) ->
Size - 48;
%% A-F
-hexlist_to_integer([Size]) when Size >= 65 , Size =< 70 ->
+hexlist_to_integer([Size]) when (Size >= 65) andalso (Size =< 70) ->
Size - 55;
%% a-f
-hexlist_to_integer([Size]) when Size >= 97 , Size =< 102 ->
+hexlist_to_integer([Size]) when (Size >= 97) andalso (Size =< 102) ->
Size - 87;
hexlist_to_integer([_Size]) ->
not_a_num;
@@ -141,7 +207,7 @@ hexlist_to_integer2([HexVal | HexString], Pos, Sum)
hexlist_to_integer2(_AfterHexString, _Pos, Sum)->
Sum.
-integer_to_hexlist(Num, Pot, Res) when Pot<0 ->
+integer_to_hexlist(Num, Pot, Res) when Pot < 0 ->
convert_to_ascii([Num | Res]);
integer_to_hexlist(Num,Pot,Res) ->
@@ -163,7 +229,9 @@ convert_to_ascii(RevesedNum) ->
convert_to_ascii([], Num)->
Num;
-convert_to_ascii([Num | Reversed], Number) when Num > -1, Num < 10 ->
+convert_to_ascii([Num | Reversed], Number)
+ when (Num > -1) andalso (Num < 10) ->
convert_to_ascii(Reversed, [Num + 48 | Number]);
-convert_to_ascii([Num | Reversed], Number) when Num > 9, Num < 16 ->
+convert_to_ascii([Num | Reversed], Number)
+ when (Num > 9) andalso (Num < 16) ->
convert_to_ascii(Reversed, [Num + 55 | Number]).
diff --git a/lib/inets/src/http_server/httpd_conf.erl b/lib/inets/src/http_server/httpd_conf.erl
index 3e498d1db7..5ca2e47eb5 100644
--- a/lib/inets/src/http_server/httpd_conf.erl
+++ b/lib/inets/src/http_server/httpd_conf.erl
@@ -354,7 +354,7 @@ load("DocumentRoot " ++ DocumentRoot,[]) ->
{ok, Directory} ->
{ok, [], {document_root,string:strip(Directory,right,$/)}};
{error, _} ->
- {error, ?NICE(clean(DocumentRoot)++"is an invalid DocumentRoot")}
+ {error, ?NICE(clean(DocumentRoot)++" is an invalid DocumentRoot")}
end;
load("DefaultType " ++ DefaultType, []) ->
{ok, [], {default_type,clean(DefaultType)}};
diff --git a/lib/inets/src/http_server/httpd_instance_sup.erl b/lib/inets/src/http_server/httpd_instance_sup.erl
index 0aaeb838c2..baa60d318c 100644
--- a/lib/inets/src/http_server/httpd_instance_sup.erl
+++ b/lib/inets/src/http_server/httpd_instance_sup.erl
@@ -97,14 +97,16 @@ start_link(ConfigFile, AcceptTimeout, ListenInfo, Debug) ->
%%%=========================================================================
%%% Supervisor callback
%%%=========================================================================
-init([ConfigFile, ConfigList, AcceptTimeout, _Debug, Address, Port]) ->
+init([ConfigFile, ConfigList, AcceptTimeout, Debug, Address, Port]) ->
+ httpd_util:enable_debug(Debug),
Flags = {one_for_one, 0, 1},
Children = [sup_spec(httpd_acceptor_sup, Address, Port),
sup_spec(httpd_misc_sup, Address, Port),
worker_spec(httpd_manager, Address, Port,
ConfigFile, ConfigList,AcceptTimeout)],
{ok, {Flags, Children}};
-init([ConfigFile, ConfigList, AcceptTimeout, _Debug, Address, Port, ListenInfo]) ->
+init([ConfigFile, ConfigList, AcceptTimeout, Debug, Address, Port, ListenInfo]) ->
+ httpd_util:enable_debug(Debug),
Flags = {one_for_one, 0, 1},
Children = [sup_spec(httpd_acceptor_sup, Address, Port),
sup_spec(httpd_misc_sup, Address, Port),
diff --git a/lib/inets/src/http_server/httpd_sup.erl b/lib/inets/src/http_server/httpd_sup.erl
index 3399f78b53..1507c6852a 100644
--- a/lib/inets/src/http_server/httpd_sup.erl
+++ b/lib/inets/src/http_server/httpd_sup.erl
@@ -185,14 +185,14 @@ httpd_child_spec(ConfigFile, AcceptTimeout, Debug) ->
httpd_child_spec(Config, AcceptTimeout, Debug, Addr, 0) ->
case start_listen(Addr, 0, Config) of
{Pid, {NewPort, NewConfig, ListenSocket}} ->
- Name = {httpd_instance_sup, Addr, NewPort},
+ Name = {httpd_instance_sup, Addr, NewPort},
StartFunc = {httpd_instance_sup, start_link,
[NewConfig, AcceptTimeout,
{Pid, ListenSocket}, Debug]},
- Restart = permanent,
- Shutdown = infinity,
- Modules = [httpd_instance_sup],
- Type = supervisor,
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [httpd_instance_sup],
+ Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules};
{Pid, {error, Reason}} ->
exit(Pid, normal),
diff --git a/lib/inets/src/http_server/httpd_util.erl b/lib/inets/src/http_server/httpd_util.erl
index b59fd861dc..cfad79638f 100644
--- a/lib/inets/src/http_server/httpd_util.erl
+++ b/lib/inets/src/http_server/httpd_util.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -755,23 +755,18 @@ do_enable_debug([{Level,Modules}|Rest])
when is_atom(Level) andalso is_list(Modules) ->
case Level of
all_functions ->
- io:format("Tracing on all functions set on modules: ~p~n",
- [Modules]),
lists:foreach(
- fun(X)->
+ fun(X) ->
dbg:tpl(X, [{'_', [], [{return_trace}]}])
end, Modules);
exported_functions ->
- io:format("Tracing on exported functions set on "
- "modules: ~p~n",[Modules]),
lists:foreach(
- fun(X)->
+ fun(X) ->
dbg:tp(X, [{'_', [], [{return_trace}]}])
end, Modules);
disable ->
- io:format("Tracing disabled on modules: ~p~n", [Modules]),
lists:foreach(
- fun(X)->
+ fun(X) ->
dbg:ctp(X)
end, Modules);
_ ->
diff --git a/lib/inets/src/http_server/mod_esi.erl b/lib/inets/src/http_server/mod_esi.erl
index 484d4b3fb4..cb33544540 100644
--- a/lib/inets/src/http_server/mod_esi.erl
+++ b/lib/inets/src/http_server/mod_esi.erl
@@ -33,6 +33,7 @@
-define(VMODULE,"ESI").
-define(DEFAULT_ERL_TIMEOUT,15000).
+
%%%=========================================================================
%%% API
%%%=========================================================================
@@ -52,6 +53,7 @@ deliver(SessionID, Data) when is_pid(SessionID) ->
deliver(_SessionID, _Data) ->
{error, bad_sessionID}.
+
%%%=========================================================================
%%% CALLBACK API
%%%=========================================================================
@@ -74,6 +76,8 @@ do(ModData) ->
{proceed, ModData#mod.data}
end
end.
+
+
%%--------------------------------------------------------------------------
%% load(Line, Context) -> eof | ok | {ok, NewContext} |
%% {ok, NewContext, Directive} |
@@ -127,6 +131,7 @@ load("ErlScriptNoCache " ++ CacheArg, [])->
" is an invalid ErlScriptNoCache directive")}
end.
+
%%--------------------------------------------------------------------------
%% store(Directive, DirectiveList) -> {ok, NewDirective} |
%% {ok, [NewDirective]} |
@@ -163,16 +168,18 @@ store({eval_script_alias, {Name, Modules}} = Conf, _)
store({erl_script_alias, Value}, _) ->
{error, {wrong_type, {erl_script_alias, Value}}};
-store({erl_script_timeout, Value} = Conf, _)
- when is_integer(Value), Value >= 0 ->
- {ok, Conf};
+store({erl_script_timeout, TimeoutSec}, _)
+ when is_integer(TimeoutSec) andalso (TimeoutSec >= 0) ->
+ {ok, {erl_script_timeout, TimeoutSec * 1000}};
store({erl_script_timeout, Value}, _) ->
{error, {wrong_type, {erl_script_timeout, Value}}};
-store({erl_script_nocache, Value} = Conf, _) when Value == true;
- Value == false ->
+store({erl_script_nocache, Value} = Conf, _)
+ when (Value =:= true) orelse (Value =:= false) ->
{ok, Conf};
store({erl_script_nocache, Value}, _) ->
{error, {wrong_type, {erl_script_nocache, Value}}}.
+
+
%%%========================================================================
%%% Internal functions
%%%========================================================================
@@ -227,7 +234,7 @@ alias_match_str(Alias, eval_script_alias) ->
%%------------------------ Erl mechanism --------------------------------
erl(#mod{method = Method} = ModData, ESIBody, Modules)
- when Method == "GET"; Method == "HEAD"->
+ when (Method =:= "GET") orelse (Method =:= "HEAD") ->
case httpd_util:split(ESIBody,":|%3A|/",2) of
{ok, [ModuleName, FuncAndInput]} ->
case httpd_util:split(FuncAndInput,"[\?/]",2) of
@@ -347,7 +354,7 @@ erl_scheme_webpage_chunk(Mod, Func, Env, Input, ModData) ->
Pid = spawn_link(
fun() ->
case catch Mod:Func(Self, Env, Input) of
- {'EXIT',{undef,_}} ->
+ {'EXIT', {undef,_}} ->
%% Will force fallback on the old API
exit(erl_scheme_webpage_chunk_undefined);
_ ->
@@ -430,11 +437,12 @@ handle_body(Pid, ModData, Body, Timeout, Size, IsDisableChunkedSend) ->
{proceed, [{response, {already_sent, 200, Size}} |
ModData#mod.data]};
{'EXIT', Pid, Reason} when is_pid(Pid) ->
+ httpd_response:send_final_chunk(ModData, IsDisableChunkedSend),
exit({mod_esi_linked_process_died, Pid, Reason})
after Timeout ->
process_flag(trap_exit,false),
- {proceed,[{response, {already_sent, 200, Size}} |
- ModData#mod.data]}
+ httpd_response:send_final_chunk(ModData, IsDisableChunkedSend),
+ exit({mod_esi_linked_process_timeout, Pid})
end.
erl_script_timeout(Db) ->
diff --git a/lib/inets/src/inets_app/inets.appup.src b/lib/inets/src/inets_app/inets.appup.src
index 2efa7ccb60..d86e998f01 100644
--- a/lib/inets/src/inets_app/inets.appup.src
+++ b/lib/inets/src/inets_app/inets.appup.src
@@ -1,23 +1,60 @@
%% This is an -*- erlang -*- file.
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
{"%VSN%",
[
+ {"5.3.3",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, []},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3.2",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, []},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3.1",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, [httpc_manager]},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, [httpc_manager]},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []},
+ {load_module, mod_esi, soft_purge, soft_purge, []}
+ ]
+ },
{"5.2",
[
{restart_application, inets}
@@ -35,6 +72,43 @@
}
],
[
+ {"5.3.3",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, []},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3.2",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, []},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3.1",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, [httpc_manager]},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.3",
+ [
+ {load_module, inets, soft_purge, soft_purge, []},
+ {load_module, http_util, soft_purge, soft_purge, []},
+ {load_module, httpc, soft_purge, soft_purge, []},
+ {load_module, httpc_cookie, soft_purge, soft_purge, []},
+ {update, httpc_handler, soft, soft_purge, soft_purge, [httpc_manager]},
+ {update, httpc_manager, soft, soft_purge, soft_purge, []},
+ {load_module, mod_esi, soft_purge, soft_purge, []}
+ ]
+ },
{"5.2",
[
{restart_application, inets}
diff --git a/lib/inets/src/inets_app/inets.erl b/lib/inets/src/inets_app/inets.erl
index 7e3f862ee7..054468e445 100644
--- a/lib/inets/src/inets_app/inets.erl
+++ b/lib/inets/src/inets_app/inets.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2006-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -57,7 +57,7 @@ start(Type) ->
%% Function: start(Service, ServiceConfig [, How]) -> {ok, Pid} |
%% {error, Reason}
%%
-%% Service = - ftpc | tftpd | httpc | httpd
+%% Service = - ftpc | tftpd | tftpc | tftp | httpc | httpd
%% ServiceConfig = ConfPropList | ConfFile
%% ConfPropList = [{Property, Value}] according to service
%% ConfFile = Path - when service is httpd
@@ -100,7 +100,7 @@ stop() ->
%%--------------------------------------------------------------------
%% Function: stop(Service, Pid) -> ok
%%
-%% Service - ftp | tftpd | http | httpd | stand_alone
+%% Service - ftpc | ftp | tftpd | tftpc | tftp | httpc | httpd | stand_alone
%%
%% Description: Stops a started service of the inets application or takes
%% down a stand alone "service" gracefully.
@@ -382,7 +382,7 @@ key1search(Key, Vals, Def) ->
%% Description: Returns a list of supported services
%%-------------------------------------------------------------------
service_names() ->
- [ftpc, tftpd, httpc, httpd].
+ [ftpc, tftp, httpc, httpd].
%%-----------------------------------------------------------------
@@ -533,7 +533,7 @@ error_to_exit(Where, {error, Reason}) ->
%%-----------------------------------------------------------------
-%% report_event(Serverity, Label, Service, Content)
+%% report_event(Severity, Label, Service, Content)
%%
%% Parameters:
%% Severity -> 0 =< integer() =< 100
@@ -725,8 +725,8 @@ call_service(Service, Call, Args) ->
service_module(tftpd) ->
tftp;
-service_module(httpc) ->
- httpc;
+service_module(tftpc) ->
+ tftp;
service_module(ftpc) ->
ftp;
service_module(Service) ->
diff --git a/lib/inets/test/ftp_macosx_x86_test.erl b/lib/inets/test/ftp_macosx_x86_test.erl
index c59a992421..5566d4feaa 100644
--- a/lib/inets/test/ftp_macosx_x86_test.erl
+++ b/lib/inets/test/ftp_macosx_x86_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -108,7 +108,7 @@ passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
passive_cd(X) -> ?LIB_MOD:passive_cd(X).
passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
+passive_nlist(X) -> ?LIB_MOD:passive_nlist([{wildcard_support, false} | X]).
passive_rename(X) -> ?LIB_MOD:passive_rename(X).
passive_delete(X) -> ?LIB_MOD:passive_delete(X).
passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
@@ -129,7 +129,7 @@ active_pwd(X) -> ?LIB_MOD:active_pwd(X).
active_cd(X) -> ?LIB_MOD:active_cd(X).
active_lcd(X) -> ?LIB_MOD:active_lcd(X).
active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
+active_nlist(X) -> ?LIB_MOD:active_nlist([{wildcard_support, false} | X]).
active_rename(X) -> ?LIB_MOD:active_rename(X).
active_delete(X) -> ?LIB_MOD:active_delete(X).
active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
diff --git a/lib/inets/test/ftp_suite_lib.erl b/lib/inets/test/ftp_suite_lib.erl
index 75e1a5a7f9..b3c4ff2657 100644
--- a/lib/inets/test/ftp_suite_lib.erl
+++ b/lib/inets/test/ftp_suite_lib.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -497,7 +497,8 @@ passive_nlist(suite) ->
[];
passive_nlist(Config) when is_list(Config) ->
Pid = ?config(ftp, Config),
- do_nlist(Pid).
+ WildcardSupport = ?config(wildcard_support, Config),
+ do_nlist(Pid, WildcardSupport).
%%-------------------------------------------------------------------------
@@ -757,7 +758,8 @@ active_nlist(suite) ->
[];
active_nlist(Config) when is_list(Config) ->
Pid = ?config(ftp, Config),
- do_nlist(Pid).
+ WildcardSupport = ?config(wildcard_support, Config),
+ do_nlist(Pid, WildcardSupport).
%%-------------------------------------------------------------------------
@@ -1218,16 +1220,20 @@ do_ls(Pid) ->
{ok, _} = ftp:ls(Pid, "incom*"),
ok.
-do_nlist(Pid) ->
+do_nlist(Pid, WildcardSupport) ->
{ok, _} = ftp:nlist(Pid),
{ok, _} = ftp:nlist(Pid, "incoming"),
%% neither nlist nor ls operates on a directory
%% they operate on a pathname, which *can* be a
%% directory, but can also be a filename or a group
%% of files (including wildcards).
- {ok, _} = ftp:nlist(Pid, "incom*"),
-%% {error, epath} = ftp:nlist(Pid, ?BAD_DIR),
- ok.
+ case WildcardSupport of
+ true ->
+ {ok, _} = ftp:nlist(Pid, "incom*"),
+ ok;
+ _ ->
+ ok
+ end.
do_rename(Pid, Config) ->
PrivDir = ?config(priv_dir, Config),
diff --git a/lib/inets/test/http_format_SUITE.erl b/lib/inets/test/http_format_SUITE.erl
index 9559317640..79945f0f4d 100644
--- a/lib/inets/test/http_format_SUITE.erl
+++ b/lib/inets/test/http_format_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -567,6 +567,12 @@ convert_netscapecookie_date(Config) when is_list(Config) ->
http_util:convert_netscapecookie_date("Sun, 12-Nov-2006 08:59:38 GMT"),
{{2006,12,12},{8,59,38}} =
http_util:convert_netscapecookie_date("Sun, 12-Dec-2006 08:59:38 GMT"),
+ {{2006,12,12},{8,59,38}} =
+ http_util:convert_netscapecookie_date("Sun 12-Dec-2006 08:59:38 GMT"),
+ {{2006,12,12},{8,59,38}} =
+ http_util:convert_netscapecookie_date("Sun, 12-Dec-06 08:59:38 GMT"),
+ {{2006,12,12},{8,59,38}} =
+ http_util:convert_netscapecookie_date("Sun 12-Dec-06 08:59:38 GMT"),
ok.
%%--------------------------------------------------------------------
diff --git a/lib/inets/test/httpc_SUITE.erl b/lib/inets/test/httpc_SUITE.erl
index aa65fb1197..f2e8bebe07 100644
--- a/lib/inets/test/httpc_SUITE.erl
+++ b/lib/inets/test/httpc_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%
@@ -284,7 +284,8 @@ tickets(suite) ->
otp_8106,
otp_8056,
otp_8352,
- otp_8371
+ otp_8371,
+ otp_8739
].
@@ -2440,7 +2441,78 @@ otp_8371(Config) when is_list(Config) ->
ok.
+%%-------------------------------------------------------------------------
+otp_8739(doc) ->
+ ["OTP-8739"];
+otp_8739(suite) ->
+ [];
+otp_8739(Config) when is_list(Config) ->
+ {_DummyServerPid, Port} = otp_8739_dummy_server(),
+ URL = ?URL_START ++ integer_to_list(Port) ++ "/dummy.html",
+ Method = get,
+ Request = {URL, []},
+ HttpOptions = [{connect_timeout, 500}, {timeout, 1}],
+ Options = [{sync, true}],
+ case http:request(Method, Request, HttpOptions, Options) of
+ {error, timeout} ->
+ %% And now we check the size of the handler db
+ Info = httpc:info(),
+ tsp("Info: ~p", [Info]),
+ {value, {handlers, Handlers}} =
+ lists:keysearch(handlers, 1, Info),
+ case Handlers of
+ [] ->
+ ok;
+ _ ->
+ tsf({unexpected_handlers, Handlers})
+ end;
+ Unexpected ->
+ tsf({unexpected, Unexpected})
+ end.
+
+
+otp_8739_dummy_server() ->
+ Parent = self(),
+ Pid = spawn_link(fun() -> otp_8739_dummy_server_init(Parent) end),
+ receive
+ {port, Port} ->
+ {Pid, Port}
+ end.
+
+otp_8739_dummy_server_init(Parent) ->
+ {ok, ListenSocket} =
+ gen_tcp:listen(0, [binary, inet, {packet, 0},
+ {reuseaddr,true},
+ {active, false}]),
+ {ok, Port} = inet:port(ListenSocket),
+ Parent ! {port, Port},
+ otp_8739_dummy_server_main(Parent, ListenSocket).
+
+otp_8739_dummy_server_main(Parent, ListenSocket) ->
+ case gen_tcp:accept(ListenSocket) of
+ {ok, Sock} ->
+ %% Ignore the request, and simply wait for the socket to close
+ receive
+ {tcp_closed, Sock} ->
+ (catch gen_tcp:close(ListenSocket)),
+ exit(normal);
+ {tcp_error, Sock, Reason} ->
+ tsp("socket error: ~p", [Reason]),
+ (catch gen_tcp:close(ListenSocket)),
+ exit(normal)
+ after 10000 ->
+ %% Just in case
+ (catch gen_tcp:close(Sock)),
+ (catch gen_tcp:close(ListenSocket)),
+ exit(timeout)
+ end;
+ Error ->
+ exit(Error)
+ end.
+
+
+
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
diff --git a/lib/inets/vsn.mk b/lib/inets/vsn.mk
index 23de8906e7..d70a0234f6 100644
--- a/lib/inets/vsn.mk
+++ b/lib/inets/vsn.mk
@@ -1,28 +1,43 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
# %CopyrightBegin%
-#
+#
# Copyright Ericsson AB 1997-2010. All Rights Reserved.
-#
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
APPLICATION = inets
-INETS_VSN = 5.3
+INETS_VSN = 5.3.4
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(INETS_VSN)$(PRE_VSN)"
-TICKETS = \
+TICKETS = OTP-8739 OTP-8741 OTP-8742
+
+TICKETS_5_3_3 = \
+ OTP-8609 \
+ OTP-8610 \
+ OTP-8624
+
+TICKETS_5_3_2 = \
+ OTP-8542 \
+ OTP-8607
+
+TICKETS_5_3_1 = \
+ OTP-8508 \
+ OTP-8509
+
+TICKETS_5_3 = \
OTP-8016 \
OTP-8056 \
OTP-8103 \
@@ -36,17 +51,40 @@ TICKETS = \
OTP-8359 \
OTP-8371
-TICKETS_5_2 = OTP-8204 OTP-8206 OTP-8247 OTP-8248 OTP-8249 OTP-8258 OTP-8280
+TICKETS_5_2 = \
+ OTP-8204 \
+ OTP-8206 \
+ OTP-8247 \
+ OTP-8248 \
+ OTP-8249 \
+ OTP-8258 \
+ OTP-8280
-TICKETS_5_1_3 = OTP-8154
+TICKETS_5_1_3 = \
+ OTP-8154
-TICKETS_5_1_2 = OTP-7298 OTP-8101 OTP-8118
+TICKETS_5_1_2 = \
+ OTP-7298 \
+ OTP-8101 \
+ OTP-8118
-TICKETS_5_1_1 = OTP-8052 OTP-8069
+TICKETS_5_1_1 = \
+ OTP-8052 \
+ OTP-8069
-TICKETS_5_1 = OTP-7994 OTP-7998 OTP-8001 OTP-8004 OTP-8005
+TICKETS_5_1 = \
+ OTP-7994 \
+ OTP-7998 \
+ OTP-8001 \
+ OTP-8004 \
+ OTP-8005
-TICKETS_5_0_14 = OTP-7882 OTP-7883 OTP-7888 OTP-7950 OTP-7976
+TICKETS_5_0_14 = \
+ OTP-7882 \
+ OTP-7883 \
+ OTP-7888 \
+ OTP-7950 \
+ OTP-7976
TICKETS_5.0.13 = \
OTP-7723 \
@@ -56,41 +94,3 @@ TICKETS_5.0.13 = \
OTP-7815 \
OTP-7857
-# TICKETS_5.0.12 = \
-# OTP-7636
-#
-# TICKETS_5.0.11 = \
-# OTP-7574 \
-# OTP-7597 \
-# OTP-7598 \
-# OTP-7605
-#
-# TICKETS_5.0.10 = \
-# OTP-7450 \
-# OTP-7454 \
-# OTP-7490 \
-# OTP-7512
-#
-# TICKETS_5.0.9 = \
-# OTP-7257 \
-# OTP-7323 \
-# OTP-7341
-#
-# TICKETS_5.0.8 = \
-# OTP-7315 \
-# OTP-7321
-#
-# TICKETS_5.0.7 = \
-# OTP-7304
-#
-# TICKETS_5.0.6 = \
-# OTP-7266
-#
-# TICKETS_5.0.5 = \
-# OTP-7220 \
-# OTP-7221
-#
-# TICKETS_5.0.4 = \
-# OTP-7173
-#
-
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 7bb6aea40e..aa652020d9 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -30,6 +30,53 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 2.13.5.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A bug introduced in Kernel 2.13.5.2 has been fixed.</p>
+ <p>
+ Own Id: OTP-8686 Aux Id: OTP-8643</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 2.13.5.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Under certain circumstances the net kernel could hang.
+ (Thanks to Scott Lystig Fritchie.)</p>
+ <p>
+ Own Id: OTP-8643 Aux Id: seq11584</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 2.13.5.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition in <c>os:cmd/1</c> could cause the
+ caller to get stuck in <c>os:cmd/1</c> forever.</p>
+ <p>
+ Own Id: OTP-8502</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 2.13.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
index 3afaedf274..1353ac65c6 100644
--- a/lib/kernel/src/net_kernel.erl
+++ b/lib/kernel/src/net_kernel.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(net_kernel).
@@ -354,13 +354,13 @@ init({Name, LongOrShortNames, TickT}) ->
%% The response is delayed until the connection is up and
%% running.
%%
-handle_call({connect, _, Node}, _From, State) when Node =:= node() ->
- {reply, true, State};
+handle_call({connect, _, Node}, From, State) when Node =:= node() ->
+ async_reply({reply, true, State}, From);
handle_call({connect, Type, Node}, From, State) ->
verbose({connect, Type, Node}, 1, State),
case ets:lookup(sys_dist, Node) of
[Conn] when Conn#connection.state =:= up ->
- {reply, true, State};
+ async_reply({reply, true, State}, From);
[Conn] when Conn#connection.state =:= pending ->
Waiting = Conn#connection.waiting,
ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
@@ -376,19 +376,19 @@ handle_call({connect, Type, Node}, From, State) ->
{noreply,State#state{conn_owners=Owners}};
_ ->
?connect_failure(Node, {setup_call, failed}),
- {reply, false, State}
+ async_reply({reply, false, State}, From)
end
end;
%%
%% Close the connection to Node.
%%
-handle_call({disconnect, Node}, _From, State) when Node =:= node() ->
- {reply, false, State};
-handle_call({disconnect, Node}, _From, State) ->
+handle_call({disconnect, Node}, From, State) when Node =:= node() ->
+ async_reply({reply, false, State}, From);
+handle_call({disconnect, Node}, From, State) ->
verbose({disconnect, Node}, 1, State),
{Reply, State1} = do_disconnect(Node, State),
- {reply, Reply, State1};
+ async_reply({reply, Reply, State1}, From);
%%
%% The spawn/4 BIF ends up here.
@@ -411,39 +411,40 @@ handle_call({spawn_opt,M,F,A,O,L,Gleader},{From,Tag},State) when is_pid(From) ->
%%
%% Only allow certain nodes.
%%
-handle_call({allow, Nodes}, _From, State) ->
+handle_call({allow, Nodes}, From, State) ->
case all_atoms(Nodes) of
true ->
Allowed = State#state.allowed,
- {reply,ok,State#state{allowed = Allowed ++ Nodes}};
+ async_reply({reply,ok,State#state{allowed = Allowed ++ Nodes}},
+ From);
false ->
- {reply,error,State}
+ async_reply({reply,error,State}, From)
end;
%%
%% authentication, used by auth. Simply works as this:
%% if the message comes through, the other node IS authorized.
%%
-handle_call({is_auth, _Node}, _From, State) ->
- {reply,yes,State};
+handle_call({is_auth, _Node}, From, State) ->
+ async_reply({reply,yes,State}, From);
%%
%% Not applicable any longer !?
%%
handle_call({apply,_Mod,_Fun,_Args}, {From,Tag}, State)
when is_pid(From), node(From) =:= node() ->
- gen_server:reply({From,Tag}, not_implemented),
+ async_gen_server_reply({From,Tag}, not_implemented),
% Port = State#state.port,
% catch apply(Mod,Fun,[Port|Args]),
{noreply,State};
-handle_call(longnames, _From, State) ->
- {reply, get(longnames), State};
+handle_call(longnames, From, State) ->
+ async_reply({reply, get(longnames), State}, From);
-handle_call({update_publish_nodes, Ns}, _From, State) ->
- {reply, ok, State#state{publish_on_nodes = Ns}};
+handle_call({update_publish_nodes, Ns}, From, State) ->
+ async_reply({reply, ok, State#state{publish_on_nodes = Ns}}, From);
-handle_call({publish_on_node, Node}, _From, State) ->
+handle_call({publish_on_node, Node}, From, State) ->
NewState = case State#state.publish_on_nodes of
undefined ->
State#state{publish_on_nodes =
@@ -457,11 +458,12 @@ handle_call({publish_on_node, Node}, _From, State) ->
Nodes ->
lists:member(Node, Nodes)
end,
- {reply, Publish, NewState};
+ async_reply({reply, Publish, NewState}, From);
-handle_call({verbose, Level}, _From, State) ->
- {reply, State#state.verbose, State#state{verbose = Level}};
+handle_call({verbose, Level}, From, State) ->
+ async_reply({reply, State#state.verbose, State#state{verbose = Level}},
+ From);
%%
%% Set new ticktime
@@ -471,16 +473,16 @@ handle_call({verbose, Level}, _From, State) ->
%% #tick_change{} record if the ticker process has been upgraded;
%% otherwise, an integer or an atom.
-handle_call(ticktime, _, #state{tick = #tick{time = T}} = State) ->
- {reply, T, State};
-handle_call(ticktime, _, #state{tick = #tick_change{time = T}} = State) ->
- {reply, {ongoing_change_to, T}, State};
+handle_call(ticktime, From, #state{tick = #tick{time = T}} = State) ->
+ async_reply({reply, T, State}, From);
+handle_call(ticktime, From, #state{tick = #tick_change{time = T}} = State) ->
+ async_reply({reply, {ongoing_change_to, T}, State}, From);
-handle_call({new_ticktime,T,_TP}, _, #state{tick = #tick{time = T}} = State) ->
+handle_call({new_ticktime,T,_TP}, From, #state{tick = #tick{time = T}} = State) ->
?tckr_dbg(no_tick_change),
- {reply, unchanged, State};
+ async_reply({reply, unchanged, State}, From);
-handle_call({new_ticktime,T,TP}, _, #state{tick = #tick{ticker = Tckr,
+handle_call({new_ticktime,T,TP}, From, #state{tick = #tick{ticker = Tckr,
time = OT}} = State) ->
?tckr_dbg(initiating_tick_change),
start_aux_ticker(T, OT, TP),
@@ -493,14 +495,15 @@ handle_call({new_ticktime,T,TP}, _, #state{tick = #tick{ticker = Tckr,
?tckr_dbg(shorter_ticktime),
shorter
end,
- {reply, change_initiated, State#state{tick = #tick_change{ticker = Tckr,
- time = T,
- how = How}}};
+ async_reply({reply, change_initiated,
+ State#state{tick = #tick_change{ticker = Tckr,
+ time = T,
+ how = How}}}, From);
-handle_call({new_ticktime,_,_},
+handle_call({new_ticktime,From,_},
_,
#state{tick = #tick_change{time = T}} = State) ->
- {reply, {ongoing_change_to, T}, State}.
+ async_reply({reply, {ongoing_change_to, T}, State}, From).
%% ------------------------------------------------------------
%% handle_cast.
@@ -1063,11 +1066,12 @@ safesend(Pid, Mess) -> Pid ! Mess.
-endif.
do_spawn(SpawnFuncArgs, SpawnOpts, State) ->
+ [_,From|_] = SpawnFuncArgs,
case catch spawn_opt(?MODULE, spawn_func, SpawnFuncArgs, SpawnOpts) of
- {'EXIT', {Reason,_}} ->
- {reply, {'EXIT', {Reason,[]}}, State};
- {'EXIT', Reason} ->
- {reply, {'EXIT', {Reason,[]}}, State};
+ {'EXIT', {Reason,_}} ->
+ async_reply({reply, {'EXIT', {Reason,[]}}, State}, From);
+ {'EXIT', Reason} ->
+ async_reply({reply, {'EXIT', {Reason,[]}}, State}, From);
_ ->
{noreply,State}
end.
@@ -1409,7 +1413,7 @@ reply_waiting(_Node, Waiting, Rep) ->
reply_waiting1(lists:reverse(Waiting), Rep).
reply_waiting1([From|W], Rep) ->
- gen_server:reply(From, Rep),
+ async_gen_server_reply(From, Rep),
reply_waiting1(W, Rep);
reply_waiting1([], _) ->
ok.
@@ -1511,3 +1515,21 @@ verbose(_, _, _) ->
getnode(P) when is_pid(P) -> node(P);
getnode(P) -> P.
+
+async_reply({reply, Msg, State}, From) ->
+ async_gen_server_reply(From, Msg),
+ {noreply, State}.
+
+async_gen_server_reply(From, Msg) ->
+ {Pid, Tag} = From,
+ M = {Tag, Msg},
+ case catch erlang:send(Pid, M, [nosuspend, noconnect]) of
+ ok ->
+ ok;
+ nosuspend ->
+ spawn(fun() -> catch erlang:send(Pid, M, [noconnect]) end);
+ noconnect ->
+ ok; % The gen module takes care of this case.
+ {'EXIT', _}=EXIT ->
+ EXIT
+ end.
diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl
index 196e6cdeb2..d0b498edc9 100644
--- a/lib/kernel/src/os.erl
+++ b/lib/kernel/src/os.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(os).
@@ -169,6 +169,7 @@ unix_cmd(Cmd) ->
%% $1 parameter for easy identification of the resident shell.
%%
-define(SHELL, "/bin/sh -s unix:cmd 2>&1").
+-define(PORT_CREATOR_NAME, os_cmd_port_creator).
%%
%% Serializing open_port through a process to avoid smp lock contention
@@ -176,18 +177,37 @@ unix_cmd(Cmd) ->
%%
-spec start_port() -> port().
start_port() ->
- {Ref,Client} = {make_ref(),self()},
- try (os_cmd_port_creator ! {Ref,Client})
- catch
- error:_ -> spawn(fun() -> start_port_srv({Ref,Client}) end)
- end,
+ Ref = make_ref(),
+ Request = {Ref,self()},
+ {Pid, Mon} = case whereis(?PORT_CREATOR_NAME) of
+ undefined ->
+ spawn_monitor(fun() ->
+ start_port_srv(Request)
+ end);
+ P ->
+ P ! Request,
+ M = erlang:monitor(process, P),
+ {P, M}
+ end,
receive
- {Ref,Port} when is_port(Port) -> Port;
- {Ref,Error} -> exit(Error)
+ {Ref, Port} when is_port(Port) ->
+ erlang:demonitor(Mon, [flush]),
+ Port;
+ {Ref, Error} ->
+ erlang:demonitor(Mon, [flush]),
+ exit(Error);
+ {'DOWN', Mon, process, Pid, _Reason} ->
+ start_port()
end.
start_port_srv(Request) ->
- StayAlive = try register(os_cmd_port_creator, self())
+ %% We don't want a group leader of some random application. Use
+ %% kernel_sup's group leader.
+ {group_leader, GL} = process_info(whereis(kernel_sup),
+ group_leader),
+ true = group_leader(GL, self()),
+ process_flag(trap_exit, true),
+ StayAlive = try register(?PORT_CREATOR_NAME, self())
catch
error:_ -> false
end,
@@ -196,7 +216,7 @@ start_port_srv(Request) ->
start_port_srv_loop({Ref,Client}, StayAlive) ->
Reply = try open_port({spawn, ?SHELL},[stream]) of
Port when is_port(Port) ->
- port_connect(Port, Client),
+ (catch port_connect(Port, Client)),
unlink(Port),
Port
catch
@@ -205,10 +225,19 @@ start_port_srv_loop({Ref,Client}, StayAlive) ->
end,
Client ! {Ref,Reply},
case StayAlive of
- true -> start_port_srv_loop(receive Msg -> Msg end, true);
+ true -> start_port_srv_loop(get_open_port_request(), true);
false -> exiting
end.
+get_open_port_request() ->
+ receive
+ {Ref, Client} = Request when is_reference(Ref),
+ is_pid(Client) ->
+ Request;
+ _Junk ->
+ get_open_port_request()
+ end.
+
%%
%% unix_get_data(Port) -> Result
%%
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index 1673b33010..6a3534b094 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -20,13 +20,13 @@
-export([all/1]).
-export([space_in_cwd/1, quoting/1, space_in_name/1, bad_command/1,
- find_executable/1, unix_comment_in_command/1]).
+ find_executable/1, unix_comment_in_command/1, evil/1]).
-include("test_server.hrl").
all(suite) ->
[space_in_cwd, quoting, space_in_name, bad_command, find_executable,
- unix_comment_in_command].
+ unix_comment_in_command, evil].
space_in_cwd(doc) ->
"Test that executing a command in a current working directory "
@@ -186,6 +186,48 @@ unix_comment_in_command(Config) when is_list(Config) ->
?line test_server:timetrap_cancel(Dog),
ok.
+-define(EVIL_PROCS, 100).
+-define(EVIL_LOOPS, 100).
+-define(PORT_CREATOR, os_cmd_port_creator).
+evil(Config) when is_list(Config) ->
+ Dog = test_server:timetrap(test_server:minutes(5)),
+ Parent = self(),
+ Ps = lists:map(fun (N) ->
+ spawn_link(fun () ->
+ evil_loop(Parent, ?EVIL_LOOPS,N)
+ end)
+ end, lists:seq(1, ?EVIL_PROCS)),
+ Devil = spawn(fun () -> devil(hd(Ps), hd(lists:reverse(Ps))) end),
+ lists:foreach(fun (P) -> receive {P, done} -> ok end end, Ps),
+ exit(Devil, kill),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+devil(P1, P2) ->
+ erlang:display({?PORT_CREATOR, whereis(?PORT_CREATOR)}),
+ (catch ?PORT_CREATOR ! lists:seq(1,1000000)),
+ (catch ?PORT_CREATOR ! lists:seq(1,666)),
+ (catch ?PORT_CREATOR ! grrrrrrrrrrrrrrrr),
+ (catch ?PORT_CREATOR ! {'EXIT', P1, buhuuu}),
+ (catch ?PORT_CREATOR ! {'EXIT', hd(erlang:ports()), buhuuu}),
+ (catch ?PORT_CREATOR ! {'EXIT', P2, arggggggg}),
+ receive after 500 -> ok end,
+ (catch exit(whereis(?PORT_CREATOR), kill)),
+ (catch ?PORT_CREATOR ! ">8|"),
+ receive after 500 -> ok end,
+ (catch exit(whereis(?PORT_CREATOR), diiiiiiiiiiiiiiiiiiiie)),
+ receive after 100 -> ok end,
+ devil(P1, P2).
+
+evil_loop(Parent, Loops, N) ->
+ Res = integer_to_list(N),
+ evil_loop(Parent, Loops, Res, "echo " ++ Res).
+
+evil_loop(Parent, 0, _Res, _Cmd) ->
+ Parent ! {self(), done};
+evil_loop(Parent, Loops, Res, Cmd) ->
+ comp(Res, os:cmd(Cmd)),
+ evil_loop(Parent, Loops-1, Res, Cmd).
comp(Expected, Got) ->
case strip_nl(Got) of
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index 5b4369740d..fafd1d2c60 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1,20 +1,20 @@
-#
+#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
-#
+#
-KERNEL_VSN = 2.13.5
+KERNEL_VSN = 2.13.5.3
diff --git a/lib/megaco/doc/src/megaco.xml b/lib/megaco/doc/src/megaco.xml
index 0fb9d5aac6..ae9e250965 100644
--- a/lib/megaco/doc/src/megaco.xml
+++ b/lib/megaco/doc/src/megaco.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2000</year><year>2009</year>
+ <year>2000</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>megaco</title>
@@ -40,6 +40,16 @@
<section>
<title>DATA TYPES</title>
<code type="none"><![CDATA[
+megaco_mid() = ip4Address() | ip6Address() |
+ domainName() | deviceName() |
+ mtpAddress()
+ip4Address() = #'IP4Address'{}
+ip6Address() = #'IP6Address'{}
+domainName() = #'DomainName'{}
+deviceName() = pathName()
+pathName() = ia5String(1..64)
+mtpAddress() = octetString(2..4)
+
action_request() = #'ActionRequest'{}
action_reply() = #'ActionReply'{}
error_desc() = #'ErrorDescriptor'{}
diff --git a/lib/megaco/doc/src/megaco_mib.xml b/lib/megaco/doc/src/megaco_mib.xml
index 3c0a549590..f1abe08fb5 100644
--- a/lib/megaco/doc/src/megaco_mib.xml
+++ b/lib/megaco/doc/src/megaco_mib.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2002</year><year>2009</year>
+ <year>2002</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>Megaco mib</title>
@@ -50,15 +50,15 @@
lightweight. I.e. the statistic counters are handled
separately by different entities of the application. For
instance our two transport module(s) (see <seealso marker="megaco_tcp#stats">megaco_tcp</seealso> and <seealso marker="megaco_udp#stats">megaco_udp</seealso>) maintain their
- own counters and the application engine (see <seealso marker="megaco#stats">megaco</seealso>) maintain it's own
+ own counters and the application engine (see <seealso marker="megaco#stats">megaco</seealso>) maintain its own
counters.</p>
<p>This also means that if a user implement their own transport
- service then it has to maintain it's own statistics.</p>
+ service then it has to maintain its own statistics.</p>
</section>
<section>
<title>Distribution</title>
- <p>Each megaco application maintains it's own set of counters. So
+ <p>Each megaco application maintains its own set of counters. So
in a large (distributed) MG/MGC it could be necessary to
collect the statistics from several nodes (each) running the
megaco application (only one of them with the transport).</p>
diff --git a/lib/megaco/doc/src/megaco_run.xml b/lib/megaco/doc/src/megaco_run.xml
index 3afc638bcf..9ed589b079 100644
--- a/lib/megaco/doc/src/megaco_run.xml
+++ b/lib/megaco/doc/src/megaco_run.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2000</year><year>2009</year>
+ <year>2000</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>Running the stack</title>
@@ -365,7 +365,7 @@
then the specified max message size (see the
<seealso marker="megaco#user_info">max_pdu_size</seealso> option).
Finally, if segmentation is decided, then each action reply
- will make up it's own (segment) message.</p>
+ will make up its own (segment) message.</p>
</item>
</list>
</section>
diff --git a/lib/megaco/doc/src/megaco_user.xml b/lib/megaco/doc/src/megaco_user.xml
index 37942007bc..7332fa684d 100644
--- a/lib/megaco/doc/src/megaco_user.xml
+++ b/lib/megaco/doc/src/megaco_user.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2000</year><year>2009</year>
+ <year>2000</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>megaco_user</title>
@@ -471,7 +471,7 @@ protocol_version() = integer() ]]></code>
<v>transaction_result() = action_reps()</v>
<v>segment_result() = {segment_no(), last_segment(), action_reps()}</v>
<v>action_reps() = [action_reply()]</v>
- <v>failure() = {error, reason()}</v>
+ <v>failure() = {error, reason()} | {error, ReplyNo, reason()}</v>
<v>reason() = transaction_reason() | segment_reason() | user_cancel_reason() | send_reason() | other_reason()</v>
<v>transaction_reason() = error_desc()</v>
<v>segment_reason() = {segment_no(), last_segment(), error_desc()}</v>
@@ -486,6 +486,7 @@ protocol_version() = integer() ]]></code>
<v>send_failed_reason() = {send_message_failed, reason_for_send_failure()}</v>
<v>reason_for_send_failure() = term()</v>
<v>ReplyData = reply_data()</v>
+ <v>ReplyNo = integer() > 0</v>
<v>reply_data() = term()</v>
<v>Extra = term()</v>
</type>
@@ -669,7 +670,7 @@ protocol_version() = integer() ]]></code>
<p>Invoked when a unexpected message is received</p>
<p>If a reply to a request is not received in time, the
megaco stack removes all info about the request from
- it's tables. If a reply should arrive after this has been
+ its tables. If a reply should arrive after this has been
done the app has no way of knowing where to send this message.
The message is delivered to the "user" by calling this
function on the local node (the node which has the link).</p>
diff --git a/lib/megaco/doc/src/notes.xml b/lib/megaco/doc/src/notes.xml
index 65bf0345f5..99a3784402 100644
--- a/lib/megaco/doc/src/notes.xml
+++ b/lib/megaco/doc/src/notes.xml
@@ -36,6 +36,68 @@
section is the version number of Megaco.</p>
<section>
+ <title>Megaco 3.14.1</title>
+
+ <p>Version 3.14.1 supports code replacement in runtime from/to
+ version 3.14, 3.13, 3.12 and 3.11.3.</p>
+
+ <section>
+ <title>Improvements and new features</title>
+
+<!--
+ <p>-</p>
+-->
+
+ <list type="bulleted">
+ <item>
+ <p>A minor compiler related performance improvement. </p>
+ <p>Own Id: OTP-8561</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Fixed bugs and malfunctions</title>
+<!--
+ <p>-</p>
+-->
+
+ <list type="bulleted">
+ <item>
+ <p>A raise condition when, during high load, processing
+ both the original and a resent message and delivering
+ this as two separate messages to the user. </p>
+ <p>Note that this solution only protects against multiple
+ reply deliveries! </p>
+ <p>Own Id: OTP-8529</p>
+ <p>Aux Id: Seq 10915</p>
+ </item>
+
+ <item>
+ <p>Fix shared libraries installation. </p>
+ <p>The flex shared lib(s) were incorrectly installed as data
+ files. </p>
+ <p>Peter Lemenkov</p>
+ <p>Own Id: OTP-8627</p>
+ </item>
+
+ <item>
+ <p>Eliminated a possible raise condition while creating
+ pending counters. </p>
+ <p>Own Id: OTP-8634</p>
+ <p>Aux Id: Seq 11579</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ </section> <!-- 3.14.1 -->
+
+
+ <section>
<title>Megaco 3.14</title>
<p>Version 3.14 supports code replacement in runtime from/to
diff --git a/lib/megaco/doc/src/notes_history.xml b/lib/megaco/doc/src/notes_history.xml
index 97aa4c66a5..640b62230f 100644
--- a/lib/megaco/doc/src/notes_history.xml
+++ b/lib/megaco/doc/src/notes_history.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2006</year><year>2009</year>
+ <year>2006</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>Megaco Release Notes history</title>
@@ -2764,7 +2764,7 @@
<p>Added a new configuration parameter, threaded.
This tells the megaco app, that all
transaction requests in a message should be executed
- in parallel (e.g. each in it's own process).
+ in parallel (e.g. each in its own process).
<br></br>See the
<seealso marker="megaco#user_info">threaded</seealso> parameter
of the user_info function (also conn_info).</p>
@@ -2911,7 +2911,7 @@
<title>Improvements and new features</title>
<list type="bulleted">
<item>
- <p>This is just a code up-/downgrade cleanup release. I.e. It's the
+ <p>This is just a code up-/downgrade cleanup release. I.e. it's the
same as version 1.2 minus the ugly stuff needed to handle up-/downgrade
from/to version 1.1.2, 1.1.1 and 1.1.0.</p>
</item>
diff --git a/lib/megaco/src/app/megaco.appup.src b/lib/megaco/src/app/megaco.appup.src
index 4f781478ef..f939f5e6cf 100644
--- a/lib/megaco/src/app/megaco.appup.src
+++ b/lib/megaco/src/app/megaco.appup.src
@@ -124,14 +124,25 @@
%% |
%% v
%% 3.14
+%% |
+%% v
+%% 3.14.1
%%
%%
{"%VSN%",
[
+ {"3.14",
+ [
+ {load_module, megaco_messenger, soft_purge, soft_purge, [megaco_monitor]},
+ {update, megaco_monitor, soft, soft_purge, soft_purge, []},
+ {update, megaco_config, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"3.13",
[
- {load_module, megaco_messenger, soft_purge, soft_purge, []},
+ {load_module, megaco_messenger, soft_purge, soft_purge, [megaco_monitor]},
{load_module, megaco_filter, soft_purge, soft_purge, []},
+ {update, megaco_monitor, soft, soft_purge, soft_purge, []},
{update, megaco_config, soft, soft_purge, soft_purge, []},
{update, megaco_flex_scanner_handler, {advanced, downgrade_to_pre_3_13_1},
soft_purge, soft_purge, []}
@@ -163,10 +174,18 @@
}
],
[
+ {"3.14",
+ [
+ {load_module, megaco_messenger, soft_purge, soft_purge, [megaco_monitor]},
+ {update, megaco_monitor, soft, soft_purge, soft_purge, []},
+ {update, megaco_config, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"3.13",
[
- {load_module, megaco_messenger, soft_purge, soft_purge, []},
+ {load_module, megaco_messenger, soft_purge, soft_purge, [megaco_monitor]},
{load_module, megaco_filter, soft_purge, soft_purge, []},
+ {update, megaco_monitor, soft, soft_purge, soft_purge, []},
{update, megaco_config, soft, soft_purge, soft_purge, []},
{update, megaco_flex_scanner_handler, {advanced, upgrade_from_pre_3_13_1},
soft_purge, soft_purge, []}
diff --git a/lib/megaco/src/app/megaco_internal.hrl b/lib/megaco/src/app/megaco_internal.hrl
index adbaacacef..2c124e9060 100644
--- a/lib/megaco/src/app/megaco_internal.hrl
+++ b/lib/megaco/src/app/megaco_internal.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -139,6 +139,22 @@
[?APPLICATION, ?MODULE, self()|A]))).
+-define(megaco_ereport(Label, Report),
+ ?megaco_report(error_report, Label, Report)).
+
+-define(megaco_wreport(Label, Report),
+ ?megaco_report(warning_report, Label, Report)).
+
+-define(megaco_ireport(Label, Report),
+ ?megaco_report(info_report, Label, Report)).
+
+-define(megaco_report(Func, Label, Report),
+ (catch error_logger:Func([{label, Label},
+ {application, ?APPLICATION},
+ {module, ?MODULE},
+ {process, self()} | Report]))).
+
+
%%%----------------------------------------------------------------------
%%% Default (ignore) value of the Extra argument to the
%%% megaco:receive_message/5 and process_received_message functions/5.
diff --git a/lib/megaco/src/engine/megaco_config.erl b/lib/megaco/src/engine/megaco_config.erl
index 1c7a141be7..6805db790d 100644
--- a/lib/megaco/src/engine/megaco_config.erl
+++ b/lib/megaco/src/engine/megaco_config.erl
@@ -224,149 +224,278 @@ update_user_info(UserMid, orig_pending_limit, Val) ->
update_user_info(UserMid, Item, Val) ->
call({update_user_info, UserMid, Item, Val}).
-conn_info(CH, Item)
- when is_record(CH, megaco_conn_handle) andalso (Item /= cancel) ->
- case Item of
- conn_handle ->
- CH;
- mid ->
- CH#megaco_conn_handle.local_mid;
- local_mid ->
- CH#megaco_conn_handle.local_mid;
- remote_mid ->
- CH#megaco_conn_handle.remote_mid;
- conn_data ->
- case lookup_local_conn(CH) of
- [] ->
- exit({no_such_connection, CH});
- [ConnData] ->
- ConnData
- end;
- _ ->
- case lookup_local_conn(CH) of
- [] ->
- exit({no_such_connection, CH});
- [ConnData] ->
- conn_info(ConnData, Item)
- end
+
+conn_info(Data, Item) ->
+ %% The purpose of this is a compiler optimization...
+ %% Args are processed from left to right.
+ do_conn_info(Item, Data).
+
+do_conn_info(mid = _Item, #megaco_conn_handle{local_mid = Mid}) ->
+ Mid;
+do_conn_info(local_mid = _Item, #megaco_conn_handle{local_mid = LMid}) ->
+ LMid;
+do_conn_info(remote_mid = _Item, #megaco_conn_handle{remote_mid = RMid}) ->
+ RMid;
+do_conn_info(conn_handle = _Item, CH) when is_record(CH, megaco_conn_handle) ->
+ CH;
+do_conn_info(conn_data = _Item, CH) when is_record(CH, megaco_conn_handle) ->
+ case lookup_local_conn(CH) of
+ [] ->
+ exit({no_such_connection, CH});
+ [ConnData] ->
+ ConnData
+ end;
+do_conn_info(Item, CH) when is_record(CH, megaco_conn_handle) ->
+ case lookup_local_conn(CH) of
+ [] ->
+ exit({no_such_connection, CH});
+ [ConnData] ->
+ do_conn_info(Item, ConnData)
end;
-conn_info(#conn_data{conn_handle = CH}, cancel) ->
+
+do_conn_info(cancel = _Item, #conn_data{conn_handle = CH}) ->
+ %% To minimise raise-condition propabillity,
+ %% we always look in the table instead of
+ %% in the record for this one
+ ets:lookup_element(megaco_local_conn, CH, #conn_data.cancel);
+do_conn_info(cancel = _Item, CH) when is_record(CH, megaco_conn_handle) ->
%% To minimise raise-condition propabillity,
%% we always look in the table instead of
%% in the record for this one
ets:lookup_element(megaco_local_conn, CH, #conn_data.cancel);
-conn_info(CD, Item) when is_record(CD, conn_data) ->
- case Item of
- all ->
- Tags0 = record_info(fields, conn_data),
- Tags1 = replace(serial, trans_id, Tags0),
- Tags = [mid, local_mid, remote_mid] ++
- replace(max_serial, max_trans_id, Tags1),
- [{Tag, conn_info(CD,Tag)} || Tag <- Tags,
- Tag /= conn_data,
- Tag /= trans_sender,
- Tag /= cancel];
- conn_data -> CD;
- conn_handle -> CD#conn_data.conn_handle;
- mid -> (CD#conn_data.conn_handle)#megaco_conn_handle.local_mid;
- local_mid -> (CD#conn_data.conn_handle)#megaco_conn_handle.local_mid;
- remote_mid -> (CD#conn_data.conn_handle)#megaco_conn_handle.remote_mid;
- trans_id -> CH = CD#conn_data.conn_handle,
- LocalMid = CH#megaco_conn_handle.local_mid,
- Item2 = {LocalMid, trans_id_counter},
- case (catch ets:lookup(megaco_config, Item2)) of
- {'EXIT', _} ->
- undefined_serial;
- [] ->
- user_info(LocalMid, min_trans_id);
- [{_, Serial}] ->
- Max = CD#conn_data.max_serial,
- if
- ((Max =:= infinity) andalso
- is_integer(Serial) andalso
- (Serial < 4294967295)) ->
- Serial + 1;
- (Max =:= infinity) andalso
- is_integer(Serial) andalso
- (Serial =:= 4294967295) ->
- user_info(LocalMid,
- min_trans_id);
- Serial < Max ->
- Serial + 1;
- Serial =:= Max ->
- user_info(LocalMid,
- min_trans_id);
- Serial =:= 4294967295 ->
- user_info(LocalMid,
- min_trans_id);
- true ->
- undefined_serial
- end
- end;
- max_trans_id -> CD#conn_data.max_serial;
- request_timer -> CD#conn_data.request_timer;
- long_request_timer -> CD#conn_data.long_request_timer;
-
- auto_ack -> CD#conn_data.auto_ack;
-
- trans_ack -> CD#conn_data.trans_ack;
- trans_ack_maxcount -> CD#conn_data.trans_ack_maxcount;
-
- trans_req -> CD#conn_data.trans_req;
- trans_req_maxcount -> CD#conn_data.trans_req_maxcount;
- trans_req_maxsize -> CD#conn_data.trans_req_maxsize;
-
- trans_timer -> CD#conn_data.trans_timer;
-
- pending_timer -> CD#conn_data.pending_timer;
- orig_pending_limit -> CD#conn_data.sent_pending_limit;
- sent_pending_limit -> CD#conn_data.sent_pending_limit;
- recv_pending_limit -> CD#conn_data.recv_pending_limit;
- reply_timer -> CD#conn_data.reply_timer;
- control_pid -> CD#conn_data.control_pid;
- monitor_ref -> CD#conn_data.monitor_ref;
- send_mod -> CD#conn_data.send_mod;
- send_handle -> CD#conn_data.send_handle;
- encoding_mod -> CD#conn_data.encoding_mod;
- encoding_config -> CD#conn_data.encoding_config;
- protocol_version -> CD#conn_data.protocol_version;
- auth_data -> CD#conn_data.auth_data;
- user_mod -> CD#conn_data.user_mod;
- user_args -> CD#conn_data.user_args;
- reply_action -> CD#conn_data.reply_action;
- reply_data -> CD#conn_data.reply_data;
- threaded -> CD#conn_data.threaded;
- strict_version -> CD#conn_data.strict_version;
- long_request_resend -> CD#conn_data.long_request_resend;
- call_proxy_gc_timeout -> CD#conn_data.call_proxy_gc_timeout;
- cancel -> CD#conn_data.cancel;
- resend_indication -> CD#conn_data.resend_indication;
- segment_reply_ind -> CD#conn_data.segment_reply_ind;
- segment_recv_acc -> CD#conn_data.segment_recv_acc;
- segment_recv_timer -> CD#conn_data.segment_recv_timer;
- segment_send -> CD#conn_data.segment_send;
- segment_send_timer -> CD#conn_data.segment_send_timer;
- max_pdu_size -> CD#conn_data.max_pdu_size;
- request_keep_alive_timeout -> CD#conn_data.request_keep_alive_timeout;
- receive_handle ->
- LocalMid = (CD#conn_data.conn_handle)#megaco_conn_handle.local_mid,
- #megaco_receive_handle{local_mid = LocalMid,
- encoding_mod = CD#conn_data.encoding_mod,
- encoding_config = CD#conn_data.encoding_config,
- send_mod = CD#conn_data.send_mod};
- _ ->
- exit({no_such_item, Item})
+do_conn_info(all = _Item,
+ #conn_data{conn_handle = CH,
+ serial = TransId,
+ max_serial = MaxTransId,
+ request_timer = ReqTmr,
+ long_request_timer = LongReqTmr,
+ auto_ack = AutoAck,
+ trans_ack = TransAck,
+ trans_ack_maxcount = TransAckMaxCount,
+ trans_req = TransReq,
+ trans_req_maxcount = TransReqMaxCount,
+ trans_req_maxsize = TransReqMaxSz,
+ trans_timer = TransTmr,
+ %% trans_sender,
+ pending_timer = PendingTmr,
+ sent_pending_limit = SentPendingLimit,
+ recv_pending_limit = RecvPendingLimit,
+ reply_timer = ReplyTmr,
+ control_pid = CtrlPid,
+ monitor_ref = MonRef,
+ send_mod = SendMod,
+ send_handle = SendHandle,
+ encoding_mod = EncodingMod,
+ encoding_config = EncodingConf,
+ protocol_version = ProtoVersion,
+ auth_data = AuthData,
+ user_mod = UserMod,
+ user_args = UserArgs,
+ reply_action = ReplyAction,
+ reply_data = ReplyData,
+ threaded = Threaded,
+ strict_version = StrictVersion,
+ long_request_resend = LongReqResend,
+ call_proxy_gc_timeout = CallProxyGCTimeout,
+ %% cancel,
+ resend_indication = ResendInd,
+ segment_reply_ind = SegReplyInd,
+ segment_recv_acc = SegRecvAcc,
+ segment_recv_timer = SegRecvTmr,
+ segment_send = SegSend,
+ segment_send_timer = SegSendTmr,
+ max_pdu_size = MaxPduSz,
+ request_keep_alive_timeout = RequestKeepAliveTmr}) ->
+ [{conn_handle, CH},
+ {trans_id, TransId},
+ {max_trans_id, MaxTransId},
+ {request_timer, ReqTmr},
+ {long_request_timer, LongReqTmr},
+ {mid, CH#megaco_conn_handle.local_mid},
+ {local_mid, CH#megaco_conn_handle.local_mid},
+ {remote_mid, CH#megaco_conn_handle.remote_mid},
+ {auto_ack, AutoAck},
+ {trans_ack, TransAck},
+ {trans_ack_maxcount, TransAckMaxCount},
+ {trans_req, TransReq},
+ {trans_req_maxcount, TransReqMaxCount},
+ {trans_req_maxsize, TransReqMaxSz},
+ {trans_timer, TransTmr},
+ {pending_timer, PendingTmr},
+ {sent_pending_limit, SentPendingLimit},
+ {recv_pending_limit, RecvPendingLimit},
+ {reply_timer, ReplyTmr},
+ {control_pid, CtrlPid},
+ {monitor_ref, MonRef},
+ {send_mod, SendMod},
+ {send_handle, SendHandle},
+ {encoding_mod, EncodingMod},
+ {encoding_config, EncodingConf},
+ {protocol_version, ProtoVersion},
+ {auth_data, AuthData},
+ {user_mod, UserMod},
+ {user_args, UserArgs},
+ {reply_action, ReplyAction},
+ {reply_data, ReplyData},
+ {threaded, Threaded},
+ {strict_version, StrictVersion},
+ {long_request_resend, LongReqResend},
+ {call_proxy_gc_timeout, CallProxyGCTimeout},
+ {resend_indication, ResendInd},
+ {segment_reply_ind, SegReplyInd},
+ {segment_recv_acc, SegRecvAcc},
+ {segment_recv_timer, SegRecvTmr},
+ {segment_send, SegSend},
+ {segment_send_timer, SegSendTmr},
+ {max_pdu_size, MaxPduSz},
+ {request_keep_alive_timeout, RequestKeepAliveTmr}];
+
+do_conn_info(conn_data = _Item, CD) ->
+ CD;
+do_conn_info(conn_handle = _Item, #conn_data{conn_handle = Val}) ->
+ Val;
+do_conn_info(mid = _Item,
+ #conn_data{conn_handle = #megaco_conn_handle{local_mid = Val}}) ->
+ Val;
+do_conn_info(local_mid = _Item,
+ #conn_data{conn_handle = #megaco_conn_handle{local_mid = Val}}) ->
+ Val;
+do_conn_info(remote_mid = _Item,
+ #conn_data{conn_handle = #megaco_conn_handle{remote_mid = Val}}) ->
+ Val;
+do_conn_info(trans_id = _Item,
+ #conn_data{conn_handle = #megaco_conn_handle{local_mid = LMid},
+ max_serial = Max}) ->
+ Item2 = {LMid, trans_id_counter},
+ case (catch ets:lookup(megaco_config, Item2)) of
+ {'EXIT', _} ->
+ undefined_serial;
+ [] ->
+ user_info(LMid, min_trans_id);
+ [{_, Serial}] ->
+ if
+ ((Max =:= infinity) andalso
+ is_integer(Serial) andalso
+ (Serial < 4294967295)) ->
+ Serial + 1;
+ ((Max =:= infinity) andalso
+ is_integer(Serial) andalso
+ (Serial =:= 4294967295)) ->
+ user_info(LMid, min_trans_id);
+ Serial < Max ->
+ Serial + 1;
+ Serial =:= Max ->
+ user_info(LMid, min_trans_id);
+ Serial =:= 4294967295 ->
+ user_info(LMid, min_trans_id);
+ true ->
+ undefined_serial
+ end
end;
-conn_info(BadHandle, _Item) ->
- {error, {no_such_connection, BadHandle}}.
-
-replace(_, _, []) ->
- [];
-replace(Item, WithItem, [Item|List]) ->
- [WithItem|List];
-replace(Item, WithItem, [OtherItem|List]) ->
- [OtherItem | replace(Item, WithItem, List)].
+do_conn_info(max_trans_id = _Item, #conn_data{max_serial = Val}) ->
+ Val;
+do_conn_info(request_timer = _Item, #conn_data{request_timer = Val}) ->
+ Val;
+do_conn_info(long_request_timer = _Item, #conn_data{long_request_timer = Val}) ->
+ Val;
+do_conn_info(auto_ack = _Item, #conn_data{auto_ack = Val}) ->
+ Val;
+do_conn_info(trans_ack = _Item, #conn_data{trans_ack = Val}) ->
+ Val;
+do_conn_info(trans_ack_maxcount = _Item, #conn_data{trans_ack_maxcount = Val}) ->
+ Val;
+do_conn_info(trans_req = _Item, #conn_data{trans_req = Val}) ->
+ Val;
+do_conn_info(trans_req_maxcount = _Item, #conn_data{trans_req_maxcount = Val}) ->
+ Val;
+do_conn_info(trans_req_maxsize = _Item, #conn_data{trans_req_maxsize = Val}) ->
+ Val;
+do_conn_info(trans_timer = _Item, #conn_data{trans_timer = Val}) ->
+ Val;
+do_conn_info(pending_timer = _Item, #conn_data{pending_timer = Val}) ->
+ Val;
+do_conn_info(orig_pending_limit = _Item, #conn_data{sent_pending_limit = Val}) ->
+ Val;
+do_conn_info(sent_pending_limit = _Item, #conn_data{sent_pending_limit = Val}) ->
+ Val;
+do_conn_info(recv_pending_limit = _Item, #conn_data{recv_pending_limit = Val}) ->
+ Val;
+do_conn_info(reply_timer = _Item, #conn_data{reply_timer = Val}) ->
+ Val;
+do_conn_info(control_pid = _Item, #conn_data{control_pid = Val}) ->
+ Val;
+do_conn_info(send_mod = _Item, #conn_data{send_mod = Val}) ->
+ Val;
+do_conn_info(send_handle = _Item, #conn_data{send_handle = Val}) ->
+ Val;
+do_conn_info(encoding_mod = _Item, #conn_data{encoding_mod = Val}) ->
+ Val;
+do_conn_info(encoding_config = _Item, #conn_data{encoding_config = Val}) ->
+ Val;
+do_conn_info(protocol_version = _Item, #conn_data{protocol_version = Val}) ->
+ Val;
+do_conn_info(auth_data = _Item, #conn_data{auth_data = Val}) ->
+ Val;
+do_conn_info(user_mod = _Item, #conn_data{user_mod = Val}) ->
+ Val;
+do_conn_info(user_args = _Item, #conn_data{user_args = Val}) ->
+ Val;
+do_conn_info(reply_action = _Item, #conn_data{reply_action = Val}) ->
+ Val;
+do_conn_info(reply_data = _Item, #conn_data{reply_data = Val}) ->
+ Val;
+do_conn_info(threaded = _Item, #conn_data{threaded = Val}) ->
+ Val;
+do_conn_info(strict_version = _Item, #conn_data{strict_version = Val}) ->
+ Val;
+do_conn_info(long_request_resend = _Item,
+ #conn_data{long_request_resend = Val}) ->
+ Val;
+do_conn_info(call_proxy_gc_timeout = _Item,
+ #conn_data{call_proxy_gc_timeout = Val}) ->
+ Val;
+do_conn_info(resend_indication = _Item, #conn_data{resend_indication = Val}) ->
+ Val;
+do_conn_info(segment_reply_ind = _Item, #conn_data{segment_reply_ind = Val}) ->
+ Val;
+do_conn_info(segment_recv_acc = _Item, #conn_data{segment_recv_acc = Val}) ->
+ Val;
+do_conn_info(segment_recv_timer = _Item,
+ #conn_data{segment_recv_timer = Val}) ->
+ Val;
+do_conn_info(segment_send = _Item, #conn_data{segment_send = Val}) ->
+ Val;
+do_conn_info(segment_send_timer = _Item,
+ #conn_data{segment_send_timer = Val}) ->
+ Val;
+do_conn_info(max_pdu_size = _Item, #conn_data{max_pdu_size = Val}) ->
+ Val;
+do_conn_info(request_keep_alive_timeout = _Item,
+ #conn_data{request_keep_alive_timeout = Val}) ->
+ Val;
+do_conn_info(receive_handle = _Item,
+ #conn_data{conn_handle = #megaco_conn_handle{local_mid = LMid},
+ encoding_mod = EM,
+ encoding_config = EC,
+ send_mod = SM}) ->
+ #megaco_receive_handle{local_mid = LMid,
+ encoding_mod = EM,
+ encoding_config = EC,
+ send_mod = SM};
+do_conn_info(Item, Data)
+ when is_record(Data, conn_data) orelse is_record(Data, megaco_conn_handle) ->
+ exit({no_such_item, Item});
+do_conn_info(_Item, BadData) ->
+ {error, {no_such_connection, BadData}}.
+
+
+%% replace(_, _, []) ->
+%% [];
+%% replace(Item, WithItem, [Item|List]) ->
+%% [WithItem|List];
+%% replace(Item, WithItem, [OtherItem|List]) ->
+%% [OtherItem | replace(Item, WithItem, List)].
update_conn_info(#conn_data{conn_handle = CH}, Item, Val) ->
@@ -499,31 +628,19 @@ incr_counter(Item, Incr) ->
end
catch
error:_ ->
+ %% Counter does not exist, so try creat it
try
begin
cre_counter(Item, Incr)
end
catch
exit:_ ->
- %% Ok, some other process got there before us,
- %% so try again
+ %% This is a raise condition.
+ %% When we tried to update the counter above, it
+ %% did not exist, but now it does...
ets:update_counter(megaco_config, Item, Incr)
end
end.
-%% incr_counter(Item, Incr) ->
-%% case (catch ets:update_counter(megaco_config, Item, Incr)) of
-%% {'EXIT', _} ->
-%% case (catch cre_counter(Item, Incr)) of
-%% {'EXIT', _} ->
-%% %% Ok, some other process got there before us,
-%% %% so try again
-%% ets:update_counter(megaco_config, Item, Incr);
-%% NewVal ->
-%% NewVal
-%% end;
-%% NewVal ->
-%% NewVal
-%% end.
cre_counter(Item, Initial) ->
case whereis(?SERVER) =:= self() of
@@ -531,8 +648,8 @@ cre_counter(Item, Initial) ->
case call({cre_counter, Item, Initial}) of
{ok, Value} ->
Value;
- Error ->
- exit(Error)
+ {error, Reason} ->
+ exit({failed_creating_counter, Item, Initial, Reason})
end;
true ->
%% Check that the counter does not already exists
@@ -542,7 +659,7 @@ cre_counter(Item, Initial) ->
ets:insert(megaco_config, {Item, Initial}),
{ok, Initial};
[_] ->
- %% Ouch, now what?
+ %% Possibly a raise condition
{error, already_exists}
end
diff --git a/lib/megaco/src/engine/megaco_messenger.erl b/lib/megaco/src/engine/megaco_messenger.erl
index 5756e8e896..5fad29931b 100644
--- a/lib/megaco/src/engine/megaco_messenger.erl
+++ b/lib/megaco/src/engine/megaco_messenger.erl
@@ -1541,30 +1541,6 @@ check_pending_limit(Limit, Direction, TransId) ->
aborted
end.
-%% check_pending_limit(infinity, _, _) ->
-%% {ok, 0};
-%% check_pending_limit(Limit, Direction, TransId) ->
-%% ?rt2("check pending limit", [Direction, Limit, TransId]),
-%% case (catch megaco_config:get_pending_counter(Direction, TransId)) of
-%% {'EXIT', _} ->
-%% %% This function is only called when we "know" the
-%% %% counter to exist. So, the only reason that this
-%% %% would happen is of the counter has been removed.
-%% %% This only happen if the pending limit has been
-%% %% reached. In any case, this is basically the same
-%% %% as aborted!
-%% ?rt2("check pending limit - exit", []),
-%% aborted;
-%% Val when Val =< Limit ->
-%% %% Since we have no intention to increment here, it
-%% %% is ok to be _at_ the limit
-%% ?rt2("check pending limit - ok", [Val]),
-%% {ok, Val};
-%% _Val ->
-%% ?rt2("check pending limit - aborted", [_Val]),
-%% aborted
-%% end.
-
check_and_maybe_incr_pending_limit(infinity, _, _) ->
ok;
@@ -1572,59 +1548,42 @@ check_and_maybe_incr_pending_limit(Limit, Direction, TransId) ->
%%
%% We need this kind of test to detect when we _pass_ the limit
%%
- ?rt2("check and maybe incr pending limit", [Direction, Limit, TransId]),
+ ?rt2("check and maybe incr pending limit", [{direction, Direction},
+ {transaction_id, TransId},
+ {counter_limit, Limit}]),
try megaco_config:get_pending_counter(Direction, TransId) of
Val when Val > Limit ->
- ?rt2("check and maybe incr - aborted", [Direction, Val, Limit]),
+ ?rt2("check and maybe incr - aborted", [{counter_value, Val}]),
aborted; % Already passed the limit
Val ->
- ?rt2("check and maybe incr - incr", [Direction, Val, Limit]),
+ ?rt2("check and maybe incr - incr", [{counter_value, Val}]),
megaco_config:incr_pending_counter(Direction, TransId),
if
Val < Limit ->
ok; % Still within the limit
true ->
?rt2("check and maybe incr - error",
- [Direction, Val, Limit]),
+ [{counter_value, Val}]),
error % Passed the limit
end
catch
_:_ ->
%% Has not been created yet (connect).
- megaco_config:cre_pending_counter(Direction, TransId, 1),
- ok
+ %% Try create it, but bevare of possible raise condition
+ try
+ begin
+ megaco_config:cre_pending_counter(Direction, TransId, 1),
+ ok
+ end
+ catch
+ _:_ ->
+ %% Ouch, raise condition, increment instead...
+ megaco_config:incr_pending_counter(Direction, TransId),
+ ok
+ end
end.
-%% check_and_maybe_incr_pending_limit(infinity, _, _) ->
-%% ok;
-%% check_and_maybe_incr_pending_limit(Limit, Direction, TransId) ->
-%% %%
-%% %% We need this kind of test to detect when we _pass_ the limit
-%% %%
-%% ?rt2("check and maybe incr pending limit", [Direction, Limit, TransId]),
-%% case (catch megaco_config:get_pending_counter(Direction, TransId)) of
-%% {'EXIT', _} ->
-%% %% Has not been created yet (connect).
-%% megaco_config:cre_pending_counter(Direction, TransId, 1),
-%% ok;
-%% Val when Val > Limit ->
-%% ?rt2("check and maybe incr - aborted", [Direction, Val, Limit]),
-%% aborted; % Already passed the limit
-%% Val ->
-%% ?rt2("check and maybe incr - incr", [Direction, Val, Limit]),
-%% megaco_config:incr_pending_counter(Direction, TransId),
-%% if
-%% Val < Limit ->
-%% ok; % Still within the limit
-%% true ->
-%% ?rt2("check and maybe incr - error",
-%% [Direction, Val, Limit]),
-%% error % Passed the limit
-%% end
-%% end.
-
-
%% BUGBUG BUGBUG BUGBUG
%%
%% Do we know that the Rep is still valid? A previous transaction
@@ -2648,33 +2607,84 @@ handle_reply(
handle_reply(#conn_data{conn_handle = CH} = CD, T, Extra) ->
TransId = to_local_trans_id(CD),
?rt2("handle reply", [T, TransId]),
- case megaco_monitor:lookup_request(TransId) of
- [Req] when (is_record(Req, request) andalso
- (CD#conn_data.cancel =:= true)) ->
+ case {megaco_monitor:request_lockcnt_inc(TransId),
+ megaco_monitor:lookup_request(TransId)} of
+ {_Cnt, [Req]} when (is_record(Req, request) andalso
+ (CD#conn_data.cancel =:= true)) ->
?TC_AWAIT_REPLY_EVENT(true),
+ ?report_trace(CD, "trans reply - cancel(1)", [T]),
do_handle_reply_cancel(CD, Req, T);
- [#request{remote_mid = RMid} = Req] when ((RMid =:= preliminary_mid) orelse
- (RMid =:= CH#megaco_conn_handle.remote_mid)) ->
+ {Cnt, [#request{remote_mid = RMid} = Req]} when
+ ((Cnt =:= 1) andalso
+ ((RMid =:= preliminary_mid) orelse
+ (RMid =:= CH#megaco_conn_handle.remote_mid))) ->
+ ?TC_AWAIT_REPLY_EVENT(false),
+ %% Just in case conn_data got update after our lookup
+ %% but before we looked up the request record, we
+ %% check the cancel field again.
+ case megaco_config:conn_info(CD, cancel) of
+ true ->
+ ?report_trace(CD, "trans reply - cancel(2)", [T]),
+ megaco_monitor:request_lockcnt_del(TransId),
+ do_handle_reply_cancel(CD, Req, T);
+ false ->
+ ?report_trace(CD, "trans reply", [T]),
+ do_handle_reply(CD, Req, TransId, T, Extra)
+ end;
+
+ {Cnt, [#request{remote_mid = RMid} = _Req]} when
+ (is_integer(Cnt) andalso
+ ((RMid =:= preliminary_mid) orelse
+ (RMid =:= CH#megaco_conn_handle.remote_mid))) ->
+ ?TC_AWAIT_REPLY_EVENT(false),
+ %% Ok, someone got there before me, now what?
+ %% This is a plain old raise condition
+ ?report_important(CD, "trans reply - raise condition",
+ [T, {request_lockcnt, Cnt}]),
+ megaco_monitor:request_lockcnt_dec(TransId);
+
+ %% no counter
+ {_Cnt, [#request{remote_mid = RMid} = Req]} when
+ ((RMid =:= preliminary_mid) orelse
+ (RMid =:= CH#megaco_conn_handle.remote_mid)) ->
?TC_AWAIT_REPLY_EVENT(false),
+ %% The counter does not exist.
+ %% This can only mean a code upgrade raise condition.
+ %% That is, this request record was created before
+ %% this feature (the counters) was instroduced.
+ %% The simples solution is this is to behave exactly as
+ %% before, that is pass it along, and leave it to the
+ %% user to figure out.
+
%% Just in case conn_data got update after our lookup
%% but before we looked up the request record, we
%% check the cancel field again.
+ ?report_verbose(CD, "trans reply - old style", [T]),
case megaco_config:conn_info(CD, cancel) of
true ->
+ megaco_monitor:request_lockcnt_del(TransId),
do_handle_reply_cancel(CD, Req, T);
false ->
do_handle_reply(CD, Req, TransId, T, Extra)
end;
- [#request{user_mod = UserMod,
- user_args = UserArgs,
- reply_action = Action,
- reply_data = UserData,
- remote_mid = RMid}] ->
+ {Cnt, [#request{user_mod = UserMod,
+ user_args = UserArgs,
+ reply_action = Action,
+ reply_data = UserData,
+ remote_mid = RMid}]} ->
?report_trace(CD,
"received trans reply with invalid remote mid",
- [T, RMid]),
+ [{transaction, T},
+ {remote_mid, RMid},
+ {request_lockcnt, Cnt}]),
+ if
+ is_integer(Cnt) ->
+ megaco_monitor:request_lockcnt_dec(TransId);
+ true ->
+ ok
+ end,
WrongMid = CH#megaco_conn_handle.remote_mid,
T2 = transform_transaction_reply_enc(CD#conn_data.protocol_version,
T),
@@ -2685,7 +2695,15 @@ handle_reply(#conn_data{conn_handle = CH} = CD, T, Extra) ->
reply_data = UserData},
return_reply(CD2, TransId, UserReply, Extra);
- [] ->
+ {Cnt, []} when is_integer(Cnt) ->
+ ?TC_AWAIT_REPLY_EVENT(undefined),
+ ?report_trace(CD, "trans reply (no receiver)",
+ [T, {request_lockcnt, Cnt}]),
+ megaco_monitor:request_lockcnt_dec(TransId),
+ return_unexpected_trans(CD, T, Extra);
+
+ %% No counter
+ {_Cnt, []} ->
?TC_AWAIT_REPLY_EVENT(undefined),
?report_trace(CD, "trans reply (no receiver)", [T]),
return_unexpected_trans(CD, T, Extra)
@@ -2716,6 +2734,7 @@ do_handle_reply(CD,
%% This is the first reply (maybe of many)
megaco_monitor:delete_request(TransId),
+ megaco_monitor:request_lockcnt_del(TransId),
megaco_monitor:cancel_apply_after(Ref), % OTP-4843
megaco_config:del_pending_counter(recv, TransId), % OTP-7189
@@ -3739,6 +3758,11 @@ insert_requests(ConnData, ConnHandle,
insert_request(ConnData, ConnHandle, TransId,
Action, Data, InitTimer, LongTimer) ->
+ %% We dont check the result of the lock-counter creation because
+ %% the only way it could already exist is if the transaction-id
+ %% range has wrapped and an old counter was not deleted.
+ megaco_monitor:request_lockcnt_cre(TransId),
+
#megaco_conn_handle{remote_mid = RemoteMid} = ConnHandle,
#conn_data{protocol_version = Version,
user_mod = UserMod,
@@ -4323,6 +4347,7 @@ cancel_request(ConnData, Req, Reason) ->
cancel_request2(ConnData, TransId, UserReply) ->
megaco_monitor:delete_request(TransId),
+ megaco_monitor:request_lockcnt_del(TransId),
megaco_config:del_pending_counter(recv, TransId), % OTP-7189
Serial = TransId#trans_id.serial,
ConnData2 = ConnData#conn_data{serial = Serial},
@@ -4380,29 +4405,67 @@ receive_reply_remote(ConnData, UserReply) ->
receive_reply_remote(ConnData, UserReply, Extra) ->
TransId = to_local_trans_id(ConnData),
- case (catch megaco_monitor:lookup_request(TransId)) of
- [#request{timer_ref = {_Type, Ref}} = Req] -> %% OTP-4843
+ case {megaco_monitor:request_lockcnt_inc(TransId),
+ (catch megaco_monitor:lookup_request(TransId))} of
+ {Cnt, [Req]} when (Cnt =:= 1) andalso is_record(Req, request) ->
%% Don't care about Req and Rep version diff
- megaco_monitor:delete_request(TransId),
- megaco_monitor:cancel_apply_after(Ref), % OTP-4843
- megaco_config:del_pending_counter(recv, TransId), % OTP-7189
-
- UserMod = Req#request.user_mod,
- UserArgs = Req#request.user_args,
- Action = Req#request.reply_action,
- UserData = Req#request.reply_data,
- ConnData2 = ConnData#conn_data{user_mod = UserMod,
- user_args = UserArgs,
- reply_action = Action,
- reply_data = UserData},
- return_reply(ConnData2, TransId, UserReply, Extra);
-
+ do_receive_reply_remote(ConnData, TransId, Req, UserReply, Extra);
+
+ {Cnt, [Req]} when is_integer(Cnt) andalso is_record(Req, request) ->
+ %% Another process is accessing, handle as unexpected
+ %% (so it has a possibillity to get logged).
+ ?report_important(ConnData, "trans reply (no receiver)",
+ [{user_reply, UserReply},
+ {request_lockcnt, Cnt}]),
+ megaco_monitor:request_lockcnt_dec(TransId),
+ return_unexpected_trans_reply(ConnData, TransId, UserReply, Extra);
+
+ %% no counter
+ {_Cnt, [Req]} when is_record(Req, request) ->
+ %% The counter does not exist.
+ %% This can only mean a code upgrade raise condition.
+ %% That is, this request record was created before
+ %% this feature (the counters) was instroduced.
+ %% The simples solution to this is to behave exactly as
+ %% before, that is, pass it along, and leave it to the
+ %% user to figure out.
+ ?report_trace(ConnData,
+ "remote reply - "
+ "code upgrade raise condition",
+ [{user_reply, UserReply}]),
+ do_receive_reply_remote(ConnData, TransId, Req, UserReply, Extra);
+
+ {Cnt, _} when is_integer(Cnt) ->
+ ?report_trace(ConnData, "trans reply (no receiver)",
+ [{user_reply, UserReply}, {request_lockcnt, Cnt}]),
+ megaco_monitor:request_lockcnt_dec(TransId),
+ return_unexpected_trans_reply(ConnData, TransId, UserReply, Extra);
+
_ ->
?report_trace(ConnData, "remote reply (no receiver)",
- [UserReply]),
+ [{user_reply, UserReply}]),
return_unexpected_trans_reply(ConnData, TransId, UserReply, Extra)
end.
+do_receive_reply_remote(ConnData, TransId,
+ #request{timer_ref = {_Type, Ref},
+ user_mod = UserMod,
+ user_args = UserArgs,
+ reply_action = Action,
+ reply_data = UserData} = _Req,
+ UserReply, Extra) ->
+ megaco_monitor:delete_request(TransId),
+ megaco_monitor:request_lockcnt_del(TransId),
+ megaco_monitor:cancel_apply_after(Ref), % OTP-4843
+ megaco_config:del_pending_counter(recv, TransId), % OTP-7189
+
+ ConnData2 = ConnData#conn_data{user_mod = UserMod,
+ user_args = UserArgs,
+ reply_action = Action,
+ reply_data = UserData},
+ return_reply(ConnData2, TransId, UserReply, Extra).
+
+
cancel_reply(ConnData, #reply{state = waiting_for_ack,
user_mod = UserMod,
user_args = UserArgs} = Rep, Reason) ->
diff --git a/lib/megaco/src/engine/megaco_monitor.erl b/lib/megaco/src/engine/megaco_monitor.erl
index f95a20cf58..29275371be 100644
--- a/lib/megaco/src/engine/megaco_monitor.erl
+++ b/lib/megaco/src/engine/megaco_monitor.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2000-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -51,6 +51,11 @@
update_request_field/3, update_request_fields/2,
delete_request/1,
+ request_lockcnt_cre/1,
+ request_lockcnt_del/1,
+ request_lockcnt_inc/1,
+ request_lockcnt_dec/1,
+
lookup_reply/1,
lookup_reply_field/2,
match_replies/1,
@@ -115,6 +120,24 @@ update_request_fields(Key, NewFields) when is_list(NewFields) ->
delete_request(Key) ->
ets:delete(megaco_requests, Key).
+
+request_lockcnt_cre(TransId) ->
+ Key = {TransId, lockcnt},
+ ets:insert_new(megaco_requests, {Key, 1}).
+
+request_lockcnt_del(TransId) ->
+ Key = {TransId, lockcnt},
+ ets:delete(megaco_requests, Key).
+
+request_lockcnt_inc(TransId) ->
+ Key = {TransId, lockcnt},
+ (catch ets:update_counter(megaco_requests, Key, 1)).
+
+request_lockcnt_dec(TransId) ->
+ Key = {TransId, lockcnt},
+ (catch ets:update_counter(megaco_requests, Key, -1)).
+
+
lookup_reply(Key) ->
ets:lookup(megaco_replies, Key).
diff --git a/lib/megaco/src/flex/Makefile.in b/lib/megaco/src/flex/Makefile.in
index 6ce9b34617..5af651d89b 100644
--- a/lib/megaco/src/flex/Makefile.in
+++ b/lib/megaco/src/flex/Makefile.in
@@ -280,7 +280,7 @@ release_spec: opt
$(INSTALL_DATA) $(TARGET_FILES) $(RELSYSDIR)/ebin
ifeq ($(ENABLE_MEGACO_FLEX_SCANNER),true)
$(INSTALL_DATA) $(FLEX_FILES) $(C_TARGETS) $(RELSYSDIR)/src/flex
- $(INSTALL_DATA) $(SOLIBS) $(RELSYSDIR)/priv/lib
+ $(INSTALL_PROGRAM) $(SOLIBS) $(RELSYSDIR)/priv/lib
endif
diff --git a/lib/megaco/test/megaco_app_test.erl b/lib/megaco/test/megaco_app_test.erl
index 8e2148c236..597ec26338 100644
--- a/lib/megaco/test/megaco_app_test.erl
+++ b/lib/megaco/test/megaco_app_test.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2002-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2002-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -87,6 +87,10 @@ is_app(App) ->
case file:consult(File) of
{ok, [{application, App, AppFile}]} ->
{ok, AppFile};
+ {error, {LineNo, Mod, Code}} ->
+ IoList = lists:concat([File, ":", LineNo, ": ",
+ Mod:format_error(Code)]),
+ {error, list_to_atom(lists:flatten(IoList))};
Error ->
{error, {invalid_format, Error}}
end.
diff --git a/lib/megaco/test/megaco_config_test.erl b/lib/megaco/test/megaco_config_test.erl
index 453c1b8964..9ab1a7d90d 100644
--- a/lib/megaco/test/megaco_config_test.erl
+++ b/lib/megaco/test/megaco_config_test.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2000-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -106,166 +106,197 @@ config(Config) when is_list(Config) ->
Evil = 400,
End = 500,
+ InitialCmd =
+ fun(No, Desc, Cmd, VerifyVal) ->
+ initial_command(Initial + No, Desc, Cmd, VerifyVal)
+ end,
+
+ VerifyCmd =
+ fun(M, No, Key, V) ->
+ verify_user_default_command(M, Verify + No, Key, V)
+ end,
+
+ NiceCmd =
+ fun(M, No, Key, Val) ->
+ nice_user_update_command(M, Nice + No, Key, Val)
+ end,
+
+ EvilCmd =
+ fun(M, No, Key, Val) ->
+ evil_user_update_command(M, Evil + No, Key, Val)
+ end,
+
+ %% End commands
+ ExitCmd =
+ fun(No, Desc, Cmd) ->
+ exit_command(End + No, Desc, Cmd)
+ end,
+ ErrorCmd =
+ fun(No, Desc, Cmd, MainReason, TS) ->
+ error_command(End + No, Desc, Cmd, MainReason, TS)
+ end,
+ PlainCmd =
+ fun(No, Desc, Cmd, V) ->
+ command(End + No, Desc, Cmd, V)
+ end,
+
Commands =
[
%% Initial commands
- initial_command( Initial + 0,
- "enable trace",
- fun() -> megaco:enable_trace(100, io) end, ok),
- initial_command( Initial + 1,
- "start",
- fun() -> megaco:start() end, ok),
- initial_command( Initial + 2,
- "Verify no active requests",
- fun() -> megaco:system_info(n_active_requests) end,
- 0),
- initial_command( Initial + 3,
- "Verify no active replies",
- fun() -> megaco:system_info(n_active_replies) end,
- 0),
- initial_command( Initial + 4,
- "Verify no active connections",
- fun() ->
- megaco:system_info(n_active_connections)
- end,
- 0),
- initial_command( Initial + 5,
- "Verify no connections",
- fun() -> megaco:system_info(connections) end, []),
- initial_command( Initial + 6,
- "Verify no users",
- fun() -> megaco:system_info(users) end, []),
- initial_command( Initial + 7,
- "Start user",
- fun() -> megaco:start_user(Mid, []) end, ok),
-
+ InitialCmd(0,
+ "enable trace",
+ fun() -> megaco:enable_trace(100, io) end,
+ ok),
+ InitialCmd(1,
+ "start",
+ fun() -> megaco:start() end,
+ ok),
+ InitialCmd(2,
+ "Verify no active requests",
+ fun() -> megaco:system_info(n_active_requests) end,
+ 0),
+ InitialCmd(3,
+ "Verify no active replies",
+ fun() -> megaco:system_info(n_active_replies) end,
+ 0),
+ InitialCmd(4,
+ "Verify no active connections",
+ fun() -> megaco:system_info(n_active_connections) end,
+ 0),
+ InitialCmd(5,
+ "Verify no connections",
+ fun() -> megaco:system_info(connections) end,
+ []),
+ InitialCmd(6,
+ "Verify no users",
+ fun() -> megaco:system_info(users) end,
+ []),
+ InitialCmd(7,
+ "Start user",
+ fun() -> megaco:start_user(Mid, []) end,
+ ok),
+
%% Verify user defaults
- verify_user_default_command(Mid, Verify + 1, connections, []),
- verify_user_default_command(Mid, Verify + 2, min_trans_id, 1),
- verify_user_default_command(Mid, Verify + 3, max_trans_id, infinity),
- verify_user_default_command(Mid, Verify + 4, request_timer,
- #megaco_incr_timer{}),
- verify_user_default_command(Mid, Verify + 5, long_request_timer, timer:seconds(60)),
- verify_user_default_command(Mid, Verify + 6, auto_ack, false),
- verify_user_default_command(Mid, Verify + 7, pending_timer, 30000),
- verify_user_default_command(Mid, Verify + 8, reply_timer, 30000),
- verify_user_default_command(Mid, Verify + 9, send_mod, megaco_tcp),
- verify_user_default_command(Mid, Verify + 10, encoding_mod,
- megaco_pretty_text_encoder),
- verify_user_default_command(Mid, Verify + 11, encoding_config, []),
- verify_user_default_command(Mid, Verify + 12, protocol_version, 1),
- verify_user_default_command(Mid, Verify + 13, reply_data, undefined),
- verify_user_default_command(Mid, Verify + 14, receive_handle,
- fun(H) when is_record(H, megaco_receive_handle) -> {ok, H};
- (R) -> {error, R}
- end),
+ VerifyCmd(Mid, 1, connections, []),
+ VerifyCmd(Mid, 2, min_trans_id, 1),
+ VerifyCmd(Mid, 3, max_trans_id, infinity),
+ VerifyCmd(Mid, 4, request_timer, #megaco_incr_timer{}),
+ VerifyCmd(Mid, 5, long_request_timer, timer:seconds(60)),
+ VerifyCmd(Mid, 6, auto_ack, false),
+ VerifyCmd(Mid, 7, pending_timer, 30000),
+ VerifyCmd(Mid, 8, reply_timer, 30000),
+ VerifyCmd(Mid, 9, send_mod, megaco_tcp),
+ VerifyCmd(Mid, 10, encoding_mod, megaco_pretty_text_encoder),
+ VerifyCmd(Mid, 11, encoding_config, []),
+ VerifyCmd(Mid, 12, protocol_version, 1),
+ VerifyCmd(Mid, 13, reply_data, undefined),
+ VerifyCmd(Mid, 14, receive_handle,
+ fun(H) when is_record(H, megaco_receive_handle) ->
+ {ok, H};
+ (R) ->
+ {error, R}
+ end),
%% Nice update
- nice_user_update_command(Mid, Nice + 1, min_trans_id, Int),
- nice_user_update_command(Mid, Nice + 2, max_trans_id, Int),
- nice_user_update_command(Mid, Nice + 3, max_trans_id, infinity),
- nice_user_update_command(Mid, Nice + 4, request_timer, Int),
- nice_user_update_command(Mid, Nice + 5, request_timer, infinity),
- nice_user_update_command(Mid, Nice + 6, request_timer, IT),
- nice_user_update_command(Mid, Nice + 7, long_request_timer, Int),
- nice_user_update_command(Mid, Nice + 8, long_request_timer, infinity),
- nice_user_update_command(Mid, Nice + 9, long_request_timer, IT),
- nice_user_update_command(Mid, Nice + 10, auto_ack, true),
- nice_user_update_command(Mid, Nice + 11, auto_ack, false),
- nice_user_update_command(Mid, Nice + 12, pending_timer, Int),
- nice_user_update_command(Mid, Nice + 13, pending_timer, infinity),
- nice_user_update_command(Mid, Nice + 14, pending_timer, IT),
- nice_user_update_command(Mid, Nice + 15, reply_timer, Int),
- nice_user_update_command(Mid, Nice + 16, reply_timer, infinity),
- nice_user_update_command(Mid, Nice + 17, reply_timer, IT),
- nice_user_update_command(Mid, Nice + 18, send_mod, an_atom),
- nice_user_update_command(Mid, Nice + 19, encoding_mod, an_atom),
- nice_user_update_command(Mid, Nice + 20, encoding_config, []),
- nice_user_update_command(Mid, Nice + 21, protocol_version, Int),
- nice_user_update_command(Mid, Nice + 23, reply_data, IT),
- nice_user_update_command(Mid, Nice + 23, resend_indication, true),
- nice_user_update_command(Mid, Nice + 24, resend_indication, false),
- nice_user_update_command(Mid, Nice + 25, resend_indication, flag),
+ NiceCmd(Mid, 1, min_trans_id, Int),
+ NiceCmd(Mid, 2, max_trans_id, Int),
+ NiceCmd(Mid, 3, max_trans_id, infinity),
+ NiceCmd(Mid, 4, request_timer, Int),
+ NiceCmd(Mid, 5, request_timer, infinity),
+ NiceCmd(Mid, 6, request_timer, IT),
+ NiceCmd(Mid, 7, long_request_timer, Int),
+ NiceCmd(Mid, 8, long_request_timer, infinity),
+ NiceCmd(Mid, 9, long_request_timer, IT),
+ NiceCmd(Mid, 10, auto_ack, true),
+ NiceCmd(Mid, 11, auto_ack, false),
+ NiceCmd(Mid, 12, pending_timer, Int),
+ NiceCmd(Mid, 13, pending_timer, infinity),
+ NiceCmd(Mid, 14, pending_timer, IT),
+ NiceCmd(Mid, 15, reply_timer, Int),
+ NiceCmd(Mid, 16, reply_timer, infinity),
+ NiceCmd(Mid, 17, reply_timer, IT),
+ NiceCmd(Mid, 18, send_mod, an_atom),
+ NiceCmd(Mid, 19, encoding_mod, an_atom),
+ NiceCmd(Mid, 20, encoding_config, []),
+ NiceCmd(Mid, 21, protocol_version, Int),
+ NiceCmd(Mid, 23, reply_data, IT),
+ NiceCmd(Mid, 23, resend_indication, true),
+ NiceCmd(Mid, 24, resend_indication, false),
+ NiceCmd(Mid, 25, resend_indication, flag),
%% Evil update
- evil_user_update_command(Mid, Evil + 1, min_trans_id, NonInt),
- evil_user_update_command(Mid, Evil + 2, max_trans_id, NonInt),
- evil_user_update_command(Mid, Evil + 3, max_trans_id, non_infinity),
- evil_user_update_command(Mid, Evil + 4, request_timer, NonInt),
- evil_user_update_command(Mid, Evil + 5, request_timer, non_infinity),
- evil_user_update_command(Mid, Evil + 6, request_timer, IT2),
- evil_user_update_command(Mid, Evil + 7, request_timer, IT3),
- evil_user_update_command(Mid, Evil + 8, request_timer, IT4),
- evil_user_update_command(Mid, Evil + 9, request_timer, IT5),
- evil_user_update_command(Mid, Evil + 10, long_request_timer, NonInt),
- evil_user_update_command(Mid, Evil + 11, long_request_timer, non_infinity),
- evil_user_update_command(Mid, Evil + 12, long_request_timer, IT2),
- evil_user_update_command(Mid, Evil + 13, long_request_timer, IT3),
- evil_user_update_command(Mid, Evil + 14, long_request_timer, IT4),
- evil_user_update_command(Mid, Evil + 15, long_request_timer, IT5),
- evil_user_update_command(Mid, Evil + 16, auto_ack, non_bool),
- evil_user_update_command(Mid, Evil + 17, pending_timer, NonInt),
- evil_user_update_command(Mid, Evil + 18, pending_timer, non_infinity),
- evil_user_update_command(Mid, Evil + 19, pending_timer, IT2),
- evil_user_update_command(Mid, Evil + 20, pending_timer, IT3),
- evil_user_update_command(Mid, Evil + 21, pending_timer, IT4),
- evil_user_update_command(Mid, Evil + 22, pending_timer, IT5),
- evil_user_update_command(Mid, Evil + 23, reply_timer, NonInt),
- evil_user_update_command(Mid, Evil + 24, reply_timer, non_infinity),
- evil_user_update_command(Mid, Evil + 25, reply_timer, IT2),
- evil_user_update_command(Mid, Evil + 26, reply_timer, IT3),
- evil_user_update_command(Mid, Evil + 27, reply_timer, IT4),
- evil_user_update_command(Mid, Evil + 28, reply_timer, IT5),
- evil_user_update_command(Mid, Evil + 29, send_mod, {non_atom}),
- evil_user_update_command(Mid, Evil + 30, encoding_mod, {non_atom}),
- evil_user_update_command(Mid, Evil + 31, encoding_config, non_list),
- evil_user_update_command(Mid, Evil + 32, protocol_version, NonInt),
- evil_user_update_command(Mid, Evil + 33, resend_indication, flagg),
-
-
- exit_command(End + 1,
- "Verify non-existing system info",
- fun() -> megaco:system_info(non_exist) end),
- exit_command(End + 2,
- "Verify non-existing user user info",
- fun() -> megaco:user_info(non_exist, trans_id) end),
- exit_command(End + 3, "Verify non-existing user info",
- fun() -> megaco:user_info(Mid, non_exist) end),
-
- error_command(End + 4,
- "Try updating user info for non-existing user",
- fun() ->
- megaco:update_user_info(non_exist, trans_id, 1)
- end,
- no_such_user, 2),
- error_command(End + 11,
- "Try updating non-existing user info",
- fun() ->
- megaco:update_user_info(Mid, trans_id, 4711)
- end,
- bad_user_val, 4),
- error_command(End + 12,
- "Try start already started user",
- fun() ->
- megaco:start_user(Mid, [])
- end,
- user_already_exists, 2),
-
- command(End + 13, "Verify started users",
- fun() -> megaco:system_info(users) end, [Mid]),
- command(End + 14, "Stop user", fun() -> megaco:stop_user(Mid) end, ok),
- command(End + 15, "Verify started users",
- fun() -> megaco:system_info(users) end, []),
- error_command(End + 16, "Try stop not started user",
- fun() -> megaco:stop_user(Mid) end, no_such_user, 2),
- error_command(End + 17, "Try start megaco (it's already started)",
- fun() -> megaco:start() end, already_started, 2),
- command(End + 18, "Stop megaco", fun() -> megaco:stop() end, ok),
- error_command(End + 19, "Try stop megaco (it's not running)",
- fun() -> megaco:stop() end, not_started, 2)
+ EvilCmd(Mid, 1, min_trans_id, NonInt),
+ EvilCmd(Mid, 2, max_trans_id, NonInt),
+ EvilCmd(Mid, 3, max_trans_id, non_infinity),
+ EvilCmd(Mid, 4, request_timer, NonInt),
+ EvilCmd(Mid, 5, request_timer, non_infinity),
+ EvilCmd(Mid, 6, request_timer, IT2),
+ EvilCmd(Mid, 7, request_timer, IT3),
+ EvilCmd(Mid, 8, request_timer, IT4),
+ EvilCmd(Mid, 9, request_timer, IT5),
+ EvilCmd(Mid, 10, long_request_timer, NonInt),
+ EvilCmd(Mid, 11, long_request_timer, non_infinity),
+ EvilCmd(Mid, 12, long_request_timer, IT2),
+ EvilCmd(Mid, 13, long_request_timer, IT3),
+ EvilCmd(Mid, 14, long_request_timer, IT4),
+ EvilCmd(Mid, 15, long_request_timer, IT5),
+ EvilCmd(Mid, 16, auto_ack, non_bool),
+ EvilCmd(Mid, 17, pending_timer, NonInt),
+ EvilCmd(Mid, 18, pending_timer, non_infinity),
+ EvilCmd(Mid, 19, pending_timer, IT2),
+ EvilCmd(Mid, 20, pending_timer, IT3),
+ EvilCmd(Mid, 21, pending_timer, IT4),
+ EvilCmd(Mid, 22, pending_timer, IT5),
+ EvilCmd(Mid, 23, reply_timer, NonInt),
+ EvilCmd(Mid, 24, reply_timer, non_infinity),
+ EvilCmd(Mid, 25, reply_timer, IT2),
+ EvilCmd(Mid, 26, reply_timer, IT3),
+ EvilCmd(Mid, 27, reply_timer, IT4),
+ EvilCmd(Mid, 28, reply_timer, IT5),
+ EvilCmd(Mid, 29, send_mod, {non_atom}),
+ EvilCmd(Mid, 30, encoding_mod, {non_atom}),
+ EvilCmd(Mid, 31, encoding_config, non_list),
+ EvilCmd(Mid, 32, protocol_version, NonInt),
+ EvilCmd(Mid, 33, resend_indication, flagg),
+
+
+ %% End
+ ExitCmd(1, "Verify non-existing system info",
+ fun() -> megaco:system_info(non_exist) end),
+ ExitCmd(2, "Verify non-existing user user info",
+ fun() -> megaco:user_info(non_exist, trans_id) end),
+ ExitCmd(3, "Verify non-existing user info",
+ fun() -> megaco:user_info(Mid, non_exist) end),
+
+ ErrorCmd(4, "Try updating user info for non-existing user",
+ fun() ->
+ megaco:update_user_info(non_exist, trans_id, 1)
+ end,
+ no_such_user, 2),
+ ErrorCmd(11, "Try updating non-existing user info",
+ fun() ->
+ megaco:update_user_info(Mid, trans_id, 4711)
+ end,
+ bad_user_val, 4),
+ ErrorCmd(12, "Try start already started user",
+ fun() -> megaco:start_user(Mid, []) end,
+ user_already_exists, 2),
+
+ PlainCmd(13, "Verify started users",
+ fun() -> megaco:system_info(users) end, [Mid]),
+ PlainCmd(14, "Stop user", fun() -> megaco:stop_user(Mid) end, ok),
+ PlainCmd(15, "Verify started users",
+ fun() -> megaco:system_info(users) end, []),
+ ErrorCmd(16, "Try stop not started user",
+ fun() -> megaco:stop_user(Mid) end, no_such_user, 2),
+ ErrorCmd(17, "Try start megaco (it's already started)",
+ fun() -> megaco:start() end, already_started, 2),
+ PlainCmd(18, "Stop megaco", fun() -> megaco:stop() end, ok),
+ ErrorCmd(19, "Try stop megaco (it's not running)",
+ fun() -> megaco:stop() end, not_started, 2)
],
@@ -279,7 +310,7 @@ exec([#command{id = No,
desc = Desc,
cmd = Cmd,
verify = Verify}|Commands]) ->
- io:format("Executing command ~2w: ~s: ", [No, Desc]),
+ io:format("Executing command ~3w: ~s: ", [No, Desc]),
case (catch Verify((catch Cmd()))) of
{ok, OK} ->
io:format("ok => ~p~n", [OK]),
@@ -320,7 +351,7 @@ nice_user_update_command(Mid, No, Key, Val) ->
evil_user_update_command(Mid, No, Key, Val) ->
- Desc = lists:flatten(io_lib:format("Evil: Update ~w", [Key])),
+ Desc = lists:flatten(io_lib:format("Evil - Update ~w", [Key])),
Cmd = fun() ->
case (catch megaco:user_info(Mid, Key)) of
{'EXIT', R} ->
@@ -371,14 +402,19 @@ error_command(No, Desc, Cmd, MainReason, TS) when is_function(Cmd) ->
end,
command(No, Desc, Cmd, Verify).
-command(No, Desc, Cmd, Verify) when is_integer(No) and is_list(Desc) and
- is_function(Cmd) and is_function(Verify) ->
+command(No, Desc, Cmd, Verify)
+ when (is_integer(No) andalso
+ is_list(Desc) andalso
+ is_function(Cmd) andalso
+ is_function(Verify)) ->
#command{id = No,
desc = Desc,
cmd = Cmd,
verify = Verify};
-command(No, Desc, Cmd, VerifyVal) when is_integer(No) and is_list(Desc) and
- is_function(Cmd) ->
+command(No, Desc, Cmd, VerifyVal)
+ when (is_integer(No) andalso
+ is_list(Desc) andalso
+ is_function(Cmd)) ->
Verify = fun(Val) ->
case Val of
VerifyVal ->
@@ -881,6 +917,12 @@ otp_8167(Config) when is_list(Config) ->
p("connect ok: CD = ~n~p", [CD]),
CH = CD#conn_data.conn_handle,
+ p("get value for item cancel from connection: ~p", [CH]),
+ false = megaco_config:conn_info(CH, cancel),
+
+ p("get value for item cancel from connection data", []),
+ false = megaco_config:conn_info(CD, cancel),
+
p("get value for item call_proxy_gc_timeout for connection: ~p", [CH]),
10101 = megaco_config:conn_info(CH, call_proxy_gc_timeout),
diff --git a/lib/megaco/vsn.mk b/lib/megaco/vsn.mk
index 19eca6d309..4ef0ed8f18 100644
--- a/lib/megaco/vsn.mk
+++ b/lib/megaco/vsn.mk
@@ -18,11 +18,13 @@
# %CopyrightEnd%
APPLICATION = megaco
-MEGACO_VSN = 3.14
+MEGACO_VSN = 3.14.1
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(MEGACO_VSN)$(PRE_VSN)"
-TICKETS = OTP-8317 OTP-8323 OTP-8328 OTP-8362 OTP-8403
+TICKETS = OTP-8529 OTP-8561 OTP-8627 OTP-8634
+
+TICKETS_3_14 = OTP-8317 OTP-8323 OTP-8328 OTP-8362 OTP-8403
TICKETS_3_13 = OTP-8205 OTP-8239 OTP-8249
diff --git a/lib/mnesia/test/Makefile b/lib/mnesia/test/Makefile
new file mode 100644
index 0000000000..a4f32e3f78
--- /dev/null
+++ b/lib/mnesia/test/Makefile
@@ -0,0 +1,118 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+
+MODULES= \
+ mt \
+ mnesia_SUITE \
+ mnesia_test_lib \
+ mnesia_install_test \
+ mnesia_registry_test \
+ mnesia_config_test \
+ mnesia_frag_test \
+ mnesia_inconsistent_database_test \
+ mnesia_config_backup \
+ mnesia_config_event \
+ mnesia_examples_test \
+ mnesia_nice_coverage_test \
+ mnesia_evil_coverage_test \
+ mnesia_evil_backup \
+ mnesia_trans_access_test \
+ mnesia_dirty_access_test \
+ mnesia_atomicity_test \
+ mnesia_consistency_test \
+ mnesia_isolation_test \
+ mnesia_durability_test \
+ mnesia_recovery_test \
+ mnesia_qlc_test \
+ mnesia_schema_recovery_test \
+ mnesia_measure_test \
+ mnesia_cost \
+ mnesia_dbn_meters
+
+MnesiaExamplesDir := ../examples
+
+ExampleModules = \
+ company \
+ company_o \
+ bup \
+ mnesia_meter \
+ mnesia_tpcb
+ExamplesHrl = \
+ company.hrl \
+ company_o.hrl
+
+ERL_FILES= $(MODULES:%=%.erl) $(ExampleModules:%=$(MnesiaExamplesDir)/%.erl)
+
+HRL_FILES= mnesia_test_lib.hrl $(ExamplesHrl:%=$(MnesiaExamplesDir)/%)
+
+TARGET_FILES= \
+ $(MODULES:%=$(EBIN)/%.$(EMULATOR)) $(ExampleModules:%=$(EBIN)/%.$(EMULATOR))
+
+INSTALL_PROGS= $(TARGET_FILES)
+
+# ----------------------------------------------------
+# Release directory specification
+# ----------------------------------------------------
+RELSYSDIR = $(RELEASE_PATH)/mnesia_test
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+#ERL_COMPILE_FLAGS +=
+
+EBIN = .
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+
+tests debug opt: $(TARGET_FILES)
+
+$(EBIN)/%.beam: $(MnesiaExamplesDir)/%.erl
+ $(ERLC) -bbeam $(ERL_COMPILE_FLAGS) -o$(EBIN) $<
+
+clean:
+ rm -f $(TARGET_FILES)
+ rm -f core
+
+docs:
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+release_spec: opt
+
+release_tests_spec: opt
+ $(INSTALL_DIR) $(RELSYSDIR)
+ $(INSTALL_DATA) mnesia.spec mnesia.spec.vxworks $(ERL_FILES) $(HRL_FILES) $(RELSYSDIR)
+ $(INSTALL_PROGRAM) mt $(INSTALL_PROGS) $(RELSYSDIR)
+# chmod -f -R u+w $(RELSYSDIR)
+# @tar cf - *_SUITE_data | (cd $(RELSYSDIR); tar xf -)
+
+release_docs_spec:
+
+
diff --git a/lib/mnesia/test/README b/lib/mnesia/test/README
new file mode 100644
index 0000000000..e0ced7399d
--- /dev/null
+++ b/lib/mnesia/test/README
@@ -0,0 +1,107 @@
+This directory contains the test suite of Mnesia.
+Compile it with "erl -make".
+
+Test cases are identified with a {Mod, Fun} tuple that maps
+to a function Mod:Fun(Config), where the test case hopefully
+is implemented. The test suite is organized in a hierarchy
+with {mnesia_SUITE, all} as the top.
+
+The module called mt, implements various convenience functions
+to ease up the execution of test cases. It does also provide
+aliases for some test cases. For example the atom Mod is an
+alias for {Mod, all}, the atom all for {mnesia_SUITE, all},
+evil for mnesia_evil_coverage_test etc.
+
+ mt:struct(TestCase)
+
+ Displays the test case structure from TestCase
+ and downwards the hierarchy. E.g. mt:struct(all)
+ will display the entire test suite.
+
+ mt:t(TestCase), mt:t(TestCase, Config)
+
+ Runs a single test case or a hierarchy of test cases.
+ mt:t(silly) is be a good starter, but you may also
+ try mt:t(all) directly if you feel lucky.
+
+ The identity of the last run test case and the outcome of
+ it is stored on file. mt:t() will re-run the last test case.
+
+ The Config argument contains various configuration
+ parameters for the test cases, such as which nodes that
+ are available for running the test suite. The default
+ settings should be enough for the most. Use mt:read_config()
+ to get the current default setting and change it with
+ mt:write_config(Config).
+
+ mt:doc(TestCase)
+
+ Generates html documentation for the test suite.
+
+In order to be able to run the test suite, the Erlang node must
+be started with the distribution enabled and the code path must
+be set to the mnesia/ebin, mnesia/examples, and mnesia/test
+directories. E.g. the following would do:
+
+ erl -sname a -pa $top/examples -pa $top/src -pa $top/ebin
+
+where $top is the path to the Mnesia installation. Many test
+cases needs 2 or 3 nodes. The node names may explicitly be
+stated as test suite configuration parameters, but by default
+the extra node names are generated. In this example the names
+will be: a, a1 and a2. It is enough to start the first node
+manually, the extra nodes will automatically be started if
+neccessary.
+
+The attached UNIX shell script mt, does not work on all
+platforms, but it may be used as a source for inspiration. It
+starts three Erlang nodes in one xterm's each. The main xterm
+(a@localhost) logs all output in the Erlang shell to a
+file. The file is piped thru grep to easily find successful
+test cases (i.e. test cases that encountered an error).
+
+During development we want to be able to run the test cases
+in the debugger. This demands a little bit of preparations:
+
+ - Start the neccessary number of nodes (normally 3).
+ This may either be done by running the mt script or
+ by starting the main node and then invoke mt:start_nodes()
+ to start the extra nodes with slave.
+
+ - Ensure that the nodes are connected. The easiest way to do
+ this is by invoking mt:ping().
+
+ - Load all files that needs to be interpreted. This is typically
+ all Mnesia files plus the test case. By invoking mnesia:ni()
+ and mnesia:ni([TestModule]) the neccessary modules will be
+ loaded on all CONNECTED nodes.
+
+The test case execution is supervised in order to ensure that no test
+case exceeds its maximum time limit, which by default is 5 minutes.
+When the limit is reached, the running test case gets aborted and the
+test server runs the next test case in line. This behaviour is useful
+when running the entire test suite during the night, but it is really
+annoying during debugging.
+
+ Use the "erl -mnesia_test_timeout" flag to disable the test case
+ time limit mechanism.
+
+Some mechanisms in Mnesia are almost impossible to test with a
+white box technique. In order to be able to write predictable
+test cases which tests the same thing every time it is run,
+Mnesia has been instrumented with debug functions. These may be
+controlled from a test program. For example to verify that the
+commit protocols work it is essential that it is possible to
+ensure that we are able to kill Mnesia in the most critical
+situations. Normally Mnesia is compiled with the debug
+functions disabled and this means that test cases which
+requires this functionality will be skipped. The mnesia:ni(),
+mentioned above, functions ensures that the interpreted code is
+instrumented with Mnesia's debug functionality. The mnesia:nc()
+functions compiles Mnesia with the debug setting enabled.
+
+Happy bug hunting!
+
+ Hakan Mattsson <[email protected]>
+
+
diff --git a/lib/mnesia/test/mnesia.spec b/lib/mnesia/test/mnesia.spec
new file mode 100644
index 0000000000..596f8b917d
--- /dev/null
+++ b/lib/mnesia/test/mnesia.spec
@@ -0,0 +1,23 @@
+{topcase, {dir, "../mnesia_test"}}.
+{require_nodenames, 2}.
+{skip, {mnesia_measure_test, ram_meter, "Takes to long time"}}.
+{skip, {mnesia_measure_test, disc_meter, "Takes to long time"}}.
+{skip, {mnesia_measure_test, disc_only_meter, "Takes to long time"}}.
+{skip, {mnesia_measure_test, cost, "Takes to long time"}}.
+{skip, {mnesia_measure_test, dbn_meters, "Takes to long time"}}.
+{skip, {mnesia_measure_test, tpcb, "Takes to long time"}}.
+{skip, {mnesia_measure_test, prediction, "Not yet implemented"}}.
+{skip, {mnesia_measure_test, consumption, "Not yet implemented"}}.
+{skip, {mnesia_measure_test, scalability, "Not yet implemented"}}.
+{skip, {mnesia_measure_test, tpcb, "Takes too much time and memory"}}.
+{skip, {mnesia_measure_test, measure_all_api_functions, "Not yet implemented"}}.
+{skip, {mnesia_measure_test, mnemosyne_vs_mnesia_kernel, "Not yet implemented"}}.
+{skip, {mnesia_examples_test, company, "Not yet implemented"}}.
+{skip, {mnesia_config_test, ignore_fallback_at_startup, "Not yet implemented"}}.
+{skip, {mnesia_evil_backup, local_backup_checkpoint, "Not yet implemented"}}.
+{skip, {mnesia_config_test, max_wait_for_decision, "Not yet implemented"}}.
+{skip, {mnesia_recovery_test, after_full_disc_partition, "Not yet implemented"}}.
+{skip, {mnesia_recovery_test, system_upgrade, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, consistency_after_change_table_copy_type, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, consistency_after_transform_table, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, consistency_after_rename_of_node, "Not yet implemented"}}.
diff --git a/lib/mnesia/test/mnesia.spec.vxworks b/lib/mnesia/test/mnesia.spec.vxworks
new file mode 100644
index 0000000000..11c01ea3fe
--- /dev/null
+++ b/lib/mnesia/test/mnesia.spec.vxworks
@@ -0,0 +1,362 @@
+{topcase, {dir, "../mnesia_test"}}.
+{require_nodenames, 3}.
+{diskless, true}.
+{skip, {mnesia_measure_test, all, "Too heavy"}}.
+%{mnesia_install_test, silly_durability} 'IMPL'
+%{mnesia_install_test, silly_move} 'IMPL'
+{skip, {mnesia_install_test, silly_upgrade, "Uses disk"}}.
+%{mnesia_install_test, conflict} 'IMPL'
+%{mnesia_install_test, dist} 'IMPL'
+{skip, {mnesia_examples_test, all, "Uses disk"}}.
+{skip, {mnesia_nice_coverage_test, all, "Uses disk"}}.
+
+%{mnesia_evil_coverage_test, system_info} 'IMPL'
+%{mnesia_evil_coverage_test, table_info} 'IMPL'
+%{mnesia_evil_coverage_test, error_description} 'IMPL'
+{skip, {mnesia_evil_coverage_test, db_node_lifecycle, "Uses disk"}}.
+{skip, {mnesia_evil_coverage_test, local_content, "Uses disk"}}.
+%{mnesia_evil_coverage_test, start_and_stop} 'IMPL'
+%{mnesia_evil_coverage_test, transaction} 'IMPL'
+{skip, {mnesia_evil_coverage_test, checkpoint, "Uses disk"}}.
+{skip, {mnesia_evil_backup, backup, "Uses disk"}}.
+{skip, {mnesia_evil_backup, global_backup_checkpoint, "Uses disk"}}.
+{skip, {mnesia_evil_backup, incremental_backup_checkpoint, "Uses disk"}}.
+{skip, {mnesia_evil_backup, local_backup_checkpoint, "Uses disk"}}.
+{skip, {mnesia_evil_backup, selective_backup_checkpoint, "Uses disk"}}.
+{skip, {mnesia_evil_backup, restore_errors, "Uses disk"}}.
+{skip, {mnesia_evil_backup, restore_clear, "Uses disk"}}.
+{skip, {mnesia_evil_backup, restore_keep, "Uses disk"}}.
+{skip, {mnesia_evil_backup, restore_recreate, "Uses disk"}}.
+{skip, {mnesia_evil_backup, traverse_backup, "Uses disk"}}.
+{skip, {mnesia_evil_backup, install_fallback, "Uses disk"}}.
+{skip, {mnesia_evil_backup, uninstall_fallback, "Uses disk"}}.
+{skip, {mnesia_evil_backup, local_fallback, "Uses disk"}}.
+%{mnesia_evil_coverage_test, table_lifecycle} 'IMPL'
+{skip, {mnesia_evil_coverage_test, replica_management, "Uses disk"}}.
+%{mnesia_evil_coverage_test, change_table_access_mode} 'IMPL'
+%{mnesia_evil_coverage_test, change_table_load_order} 'IMPL'
+{skip, {mnesia_evil_coverage_test, set_master_nodes, "Uses disk"}}.
+{skip, {mnesia_evil_coverage_test, offline_set_master_nodes, "Uses disk"}}.
+{skip, {mnesia_evil_coverage_test, replica_location, "Uses disk"}}.
+%{mnesia_evil_coverage_test, add_table_index_ram} 'IMPL'
+{skip, {mnesia_trans_access_test, add_table_index_disc, "Uses disc"}}.
+{skip, {mnesia_trans_access_test, add_table_index_disc_only, "Uses disc"}}.
+%{mnesia_evil_coverage_test, create_live_table_index_ram} 'IMPL'
+{skip, {mnesia_trans_access_test, create_live_table_index_disc, "Uses disc"}}.
+{skip, {mnesia_trans_access_test, create_live_table_index_disc_only, "Uses disc"}}.
+%{mnesia_evil_coverage_test, del_table_index_ram} 'IMPL'
+{skip, {mnesia_trans_access_test, del_table_index_disc, "Uses disc"}}.
+{skip, {mnesia_trans_access_test, del_table_index_disc_only, "Uses disc"}}.
+{skip, {mnesia_trans_access_test, idx_schema_changes_ram, "Uses disk"}}.
+{skip, {mnesia_trans_access_test, idx_schema_changes_disc, "Uses disc"}}.
+{skip, {mnesia_trans_access_test, idx_schema_changes_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_write_ram} 'IMPL'
+
+{skip, {mnesia_dirty_access_test, dirty_write_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_write_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_read_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_read_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_read_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_update_counter_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_update_counter_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_update_counter_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_delete_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_delete_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_delete_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_delete_object_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_delete_object_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_delete_object_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_match_object_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_match_object_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_match_object_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_index_match_object_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_index_match_object_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_index_match_object_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_index_read_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_index_read_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_index_read_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_index_update_set_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_index_update_set_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_index_update_set_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_index_update_bag_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_index_update_bag_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_index_update_bag_disc_only, "Uses disc"}}.
+%{mnesia_dirty_access_test, dirty_iter_ram} 'IMPL'
+{skip, {mnesia_dirty_access_test, dirty_iter_disc, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, dirty_iter_disc_only, "Uses disc"}}.
+{skip, {mnesia_dirty_access_test, admin_tests, "Uses disk"}}.
+
+%{mnesia_trans_access_test, write} 'IMPL'
+%{mnesia_trans_access_test, read} 'IMPL'
+%{mnesia_trans_access_test, wread} 'IMPL'
+%{mnesia_trans_access_test, delete} 'IMPL'
+%{mnesia_trans_access_test, delete_object} 'IMPL'
+%{mnesia_trans_access_test, match_object} 'IMPL'
+%{mnesia_trans_access_test, all_keys} 'IMPL'
+%{mnesia_trans_access_test, index_match_object} 'IMPL'
+%{mnesia_trans_access_test, index_read} 'IMPL'
+%{mnesia_trans_access_test, index_update_set} 'IMPL'
+%{mnesia_trans_access_test, index_update_bag} 'IMPL'
+{skip, {mnesia_evil_coverage_test, dump_tables, "Uses disk"}}.
+{skip, {mnesia_evil_coverage_test, dump_log, "Uses disk"}}.
+%{mnesia_evil_coverage_test, wait_for_tables} 'IMPL'
+{skip, {mnesia_evil_coverage_test, force_load_table, "Uses disk"}}.
+%{mnesia_evil_coverage_test, user_properties} 'IMPL'
+%{mnesia_evil_coverage_test, record_name_dirty_access_ram} 'IMPL'
+{skip, {mnesia_evil_coverage_test, record_name_dirty_access_disc, "Uses disc"}}.
+{skip, {mnesia_evil_coverage_test, record_name_dirty_access_disc_only, "Uses disc"}}.
+%{mnesia_evil_coverage_test, snmp_open_table} 'IMPL'
+%{mnesia_evil_coverage_test, snmp_close_table} 'IMPL'
+%{mnesia_evil_coverage_test, snmp_get_next_index} 'IMPL'
+%{mnesia_evil_coverage_test, snmp_get_row} 'IMPL'
+%{mnesia_evil_coverage_test, snmp_get_mnesia_key} 'IMPL'
+%{mnesia_evil_coverage_test, snmp_update_counter} 'IMPL'
+%{mnesia_evil_coverage_test, info} 'IMPL'
+%{mnesia_evil_coverage_test, schema_0} 'IMPL'
+%{mnesia_evil_coverage_test, schema_1} 'IMPL'
+%{mnesia_evil_coverage_test, view_0} 'IMPL'
+{skip, {mnesia_evil_coverage_test, view_1, "Uses disk"}}.
+{skip, {mnesia_evil_coverage_test, view_2, "Uses disk"}}.
+%{mnesia_evil_coverage_test, lkill} 'IMPL'
+%{mnesia_evil_coverage_test, kill} 'IMPL'
+
+%{mnesia_config_test, access_module} 'IMPL'
+%{mnesia_config_test, auto_repair} 'IMPL'
+{skip, {mnesia_config_test, backup_module, "Uses disk"}}.
+{skip, {mnesia_config_test, dynamic_connect, "Uses disk"}}.
+%{mnesia_config_test, debug} 'IMPL'
+%{mnesia_config_test, dir} 'IMPL'
+{skip, {mnesia_config_test, dump_log_load_regulation, "Uses disk"}}.
+{skip, {mnesia_config_test, dump_log_time_threshold, "Uses disk"}}.
+{skip, {mnesia_config_test, dump_log_write_threshold, "Uses disk"}}.
+{skip, {mnesia_config_test, dump_log_update_in_place, "Uses disk"}}.
+{skip, {mnesia_config_test, embedded_mnemosyne, "Uses Mnemosyne"}}.
+%{mnesia_config_test, event_module} 'IMPL'
+{skip, {mnesia_config_test, ignore_fallback_at_startup, "Not Yet impl"}}.
+%{mnesia_config_test, inconsistent_database} 'IMPL'
+{skip, {mnesia_config_test, max_wait_for_decision, "Not Yet impl"}}.
+{skip, {mnesia_config_test, start_one_disc_full_then_one_disc_less, "Uses disc"}}.
+{skip, {mnesia_config_test, start_first_one_disc_less_then_one_disc_full, "Uses disc"}}.
+%%{skip, {mnesia_config_test, start_first_one_disc_less_then_two_more_disc_less, "Uses disc"}}.
+{skip, {mnesia_config_test, schema_location_and_extra_db_nodes_combinations, "Uses disk"}}.
+{skip, {mnesia_config_test, table_load_to_disc_less_nodes, "Uses disc"}}.
+{skip, {mnesia_config_test, schema_merge, "Uses Disc"}}.
+%{mnesia_config_test, unknown_config} 'IMPL'
+%{mnesia_registry_test, good_dump} 'IMPL'
+%{mnesia_registry_test, bad_dump} 'IMPL'
+
+%{mnesia_atomicity_test, explicit_abort_in_middle_of_trans} 'IMPL'
+%{mnesia_atomicity_test, runtime_error_in_middle_of_trans} 'IMPL'
+%{mnesia_atomicity_test, kill_self_in_middle_of_trans} 'IMPL'
+%{mnesia_atomicity_test, throw_in_middle_of_trans} 'IMPL'
+%{mnesia_atomicity_test, mnesia_down_during_infinite_trans} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_sw_rt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_sw_wt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wr_r} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_sw_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_sw_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_sw_wr} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wr_wt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wr_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wr_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_r_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_r_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_r_wt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_rt_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_rt_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_rt_wt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_r} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_rt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_wt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_wr} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_wt_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_wr} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_sw} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_r} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_w} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_rt} 'IMPL'
+%{mnesia_atomicity_test, lock_waiter_w_wt} 'IMPL'
+%{mnesia_atomicity_test, restart_r_one} 'IMPL'
+%{mnesia_atomicity_test, restart_w_one} 'IMPL'
+%{mnesia_atomicity_test, restart_rt_one} 'IMPL'
+%{mnesia_atomicity_test, restart_wt_one} 'IMPL'
+%{mnesia_atomicity_test, restart_wr_one} 'IMPL'
+%{mnesia_atomicity_test, restart_sw_one} 'IMPL'
+%{mnesia_atomicity_test, restart_r_two} 'IMPL'
+%{mnesia_atomicity_test, restart_w_two} 'IMPL'
+%{mnesia_atomicity_test, restart_rt_two} 'IMPL'
+%{mnesia_atomicity_test, restart_wt_two} 'IMPL'
+%{mnesia_atomicity_test, restart_wr_two} 'IMPL'
+%{mnesia_atomicity_test, restart_sw_two} 'IMPL'
+
+%{mnesia_isolation_test, no_conflict} 'IMPL'
+%{mnesia_isolation_test, simple_queue_conflict} 'IMPL'
+%{mnesia_isolation_test, advanced_queue_conflict} 'IMPL'
+%{mnesia_isolation_test, simple_deadlock_conflict} 'IMPL'
+%{mnesia_isolation_test, advanced_deadlock_conflict} 'IMPL'
+%{mnesia_isolation_test, lock_burst} 'IMPL'
+%{mnesia_isolation_test, basic_sticky_functionality} 'IMPL'
+%{mnesia_isolation_test, create_table} 'IMPL'
+%{mnesia_isolation_test, delete_table} 'IMPL'
+%{mnesia_isolation_test, move_table_copy} 'IMPL'
+%{mnesia_isolation_test, add_table_index} 'IMPL'
+%{mnesia_isolation_test, del_table_index} 'IMPL'
+%{mnesia_isolation_test, transform_table} 'IMPL'
+%{mnesia_isolation_test, snmp_open_table} 'IMPL'
+%{mnesia_isolation_test, snmp_close_table} 'IMPL'
+{skip, {mnesia_isolation_test, change_table_copy_type, "Uses disk"}}.
+%{mnesia_isolation_test, change_table_access} 'IMPL'
+%{mnesia_isolation_test, add_table_copy} 'IMPL'
+%{mnesia_isolation_test, del_table_copy} 'IMPL'
+{skip, {mnesia_isolation_test, dump_tables, "Uses disk"}}.
+{skip, {mnesia_isolation_test, extra_admin_tests, "Uses disk"}}.
+%{mnesia_isolation_test, del_table_copy_1} 'IMPL'
+%{mnesia_isolation_test, del_table_copy_2} 'IMPL'
+%{mnesia_isolation_test, del_table_copy_3} 'IMPL'
+%{mnesia_isolation_test, add_table_copy_1} 'IMPL'
+%{mnesia_isolation_test, add_table_copy_2} 'IMPL'
+%{mnesia_isolation_test, add_table_copy_3} 'IMPL'
+%{mnesia_isolation_test, add_table_copy_4} 'IMPL'
+%{mnesia_isolation_test, move_table_copy_1} 'IMPL'
+%{mnesia_isolation_test, move_table_copy_2} 'IMPL'
+%{mnesia_isolation_test, move_table_copy_3} 'IMPL'
+%{mnesia_isolation_test, move_table_copy_4} 'IMPL'
+%{mnesia_isolation_test, dirty_updates_visible_direct} 'IMPL'
+%{mnesia_isolation_test, dirty_reads_regardless_of_trans} 'IMPL'
+%{mnesia_isolation_test, trans_update_invisibible_outside_trans} 'IMPL'
+%{mnesia_isolation_test, trans_update_visible_inside_trans} 'IMPL'
+%{mnesia_isolation_test, write_shadows} 'IMPL'
+%{mnesia_isolation_test, delete_shadows} 'IMPL'
+%{mnesia_isolation_test, write_delete_shadows_bag} 'IMPL'
+
+{skip, {mnesia_durability_test, all, "Uses disk "}}.
+%{mnesia_durability_test, load_local_contents_directly} 'IMPL'
+%{mnesia_durability_test, load_directly_when_all_are_ram_copiesA} 'IMPL'
+%{mnesia_durability_test, load_directly_when_all_are_ram_copiesB} 'IMPL'
+%{skip, {mnesia_durability_test, late_load_when_all_are_ram_copies_on_ram_nodes1, "Uses disk schema"}}.
+%{skip, {mnesia_durability_test, late_load_when_all_are_ram_copies_on_ram_nodes2, "Uses disk schema"}}.
+%{skip, {mnesia_durability_test, load_when_last_replica_becomes_available, "Uses disk"}}.
+%{skip, {mnesia_durability_test, load_when_we_have_down_from_all_other_replica_nodes, "Uses disk"}}.
+%{skip, {mnesia_durability_test, late_load_transforms_into_disc_load, "Uses disc"}}.
+%{mnesia_durability_test, late_load_leads_to_hanging} 'IMPL'
+%{mnesia_durability_test, force_load_when_nobody_intents_to_load} 'IMPL'
+%{mnesia_durability_test, force_load_when_someone_has_decided_to_load} 'IMPL'
+%{mnesia_durability_test, force_load_when_someone_else_already_has_loaded} 'IMPL'
+%{mnesia_durability_test, force_load_when_we_has_loaded} 'IMPL'
+%{mnesia_durability_test, force_load_on_a_non_local_table} 'IMPL'
+%{mnesia_durability_test, force_load_when_the_table_does_not_exist} 'IMPL'
+%{mnesia_durability_test, master_nodes} 'IMPL'
+%{mnesia_durability_test, master_on_non_local_tables} 'IMPL'
+%{mnesia_durability_test, remote_force_load_with_local_master_node} 'IMPL'
+%{mnesia_durability_test, dump_ram_copies} 'IMPL'
+%{skip, {mnesia_durability_test, dump_disc_copies, "Uses disc"}}.
+%{skip, {mnesia_durability_test, dump_disc_only, "Uses disc"}}.
+%{skip, {mnesia_durability_test, durability_of_disc_copies, "Uses disc"}}.
+%{skip, {mnesia_durability_test, durability_of_disc_only_copies, "Uses disc"}}.
+
+{skip, {mnesia_recovery_test, mnesia_down, "Uses Disk"}}.
+%{mnesia_recovery_test, no_master_2} 'IMPL'
+%{mnesia_recovery_test, no_master_3} 'IMPL'
+%{mnesia_recovery_test, one_master_2} 'IMPL'
+%{mnesia_recovery_test, one_master_3} 'IMPL'
+%{mnesia_recovery_test, two_master_2} 'IMPL'
+%{mnesia_recovery_test, two_master_3} 'IMPL'
+%{mnesia_recovery_test, all_master_2} 'IMPL'
+%{mnesia_recovery_test, all_master_3} 'IMPL'
+{skip, {mnesia_recovery_test, mnesia_down_during_startup_disk_ram, "Uses disk"}}.
+%{mnesia_recovery_test, mnesia_down_during_startup_init_ram} 'IMPL'
+{skip, {mnesia_recovery_test, mnesia_down_during_startup_init_disc, "Uses disc"}}.
+{skip, {mnesia_recovery_test, mnesia_down_during_startup_init_disc_only, "Uses disc"}}.
+%{mnesia_recovery_test, mnesia_down_during_startup_tm_ram} 'IMPL'
+{skip, {mnesia_recovery_test, mnesia_down_during_startup_tm_disc, "Uses disc"}}.
+{skip, {mnesia_recovery_test, mnesia_down_during_startup_tm_disc_only, "Uses disc"}}.
+%{mnesia_recovery_test, explicit_stop_during_snmp} 'IMPL'
+
+{skip, {mnesia_recovery_test, schema_trans, "Uses Disk, needs disk log"}}.
+{skip, {mnesia_recovery_test, async_dirty, "Uses disc"}}.
+{skip, {mnesia_recovery_test, sync_dirty, "Uses disc"}}.
+{skip, {mnesia_recovery_test, sym_trans, "Uses disc"}}.
+{skip, {mnesia_recovery_test, asym_trans, "Uses disc"}}.
+
+{skip, {mnesia_recovery_test, after_full_disc_partition, "Not Yet impl"}}.
+{skip, {mnesia_recovery_test, after_corrupt_files, "Uses disk"}}.
+
+%{mnesia_evil_coverage_test, subscriptions} 'IMPL'
+%{mnesia_evil_coverage_test, nested_trans_both_ok} 'IMPL'
+%{mnesia_evil_coverage_test, nested_trans_child_dies} 'IMPL'
+%{mnesia_evil_coverage_test, nested_trans_parent_dies} 'IMPL'
+%{mnesia_evil_coverage_test, nested_trans_both_dies} 'IMPL'
+%{mnesia_evil_coverage_test, mix_of_trans_sync_dirty} 'IMPL'
+%{mnesia_evil_coverage_test, mix_of_trans_async_dirty} 'IMPL'
+%{mnesia_evil_coverage_test, mix_of_trans_ets} 'IMPL'
+
+{skip, {mnesia_recovery_test, disc_less, "Uses disc (on the other nodes)"}}.
+{skip, {mnesia_recovery_test, system_upgrade, "Not Yet impl"}}.
+%{mnesia_consistency_test, consistency_after_restart_1_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_restart_1_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restart_1_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_restart_2_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_restart_2_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restart_2_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_dump_tables_1_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, consistency_after_dump_tables_2_ram, "Uses disk"}}.
+%{mnesia_consistency_test, consistency_after_add_replica_2_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_add_replica_2_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_add_replica_2_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_add_replica_3_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_add_replica_3_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_add_replica_3_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_del_replica_2_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_del_replica_2_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_del_replica_2_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_del_replica_3_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_del_replica_3_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_del_replica_3_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_move_replica_2_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_move_replica_2_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_move_replica_2_disc_only, "Uses disc"}}.
+%{mnesia_consistency_test, consistency_after_move_replica_3_ram} 'IMPL'
+{skip, {mnesia_consistency_test, consistency_after_move_replica_3_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_move_replica_3_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_transform_table, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, consistency_after_change_table_copy_type, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_2_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_2_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_2_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_3_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_3_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_fallback_3_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_clear_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_clear_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_clear_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_recreate_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_recreate_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_restore_recreate_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, consistency_after_rename_of_node, "Not yet implemented"}}.
+{skip, {mnesia_consistency_test, updates_during_checkpoint_activation, "Uses disk"}}.
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_activation_2_disc, "Uses disc"}}.
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_activation_2_disc_only, "Uses disc"}}.
+%%{mnesia_consistency_test, updates_during_checkpoint_activation_3_ram} 'IMPL'
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_activation_3_disc, "Uses disc"}}.
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_activation_3_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, updates_during_checkpoint_iteration, "Uses disk"}}.
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_iteration_2_disc, "Uses disc"}}.
+%{skip, {mnesia_consistency_test, updates_during_checkpoint_iteration_2_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, load_table_with_activated_checkpoint_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, load_table_with_activated_checkpoint_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, load_table_with_activated_checkpoint_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, add_table_copy_to_table_with_activated_checkpoint_ram, "Uses disk"}}.
+{skip, {mnesia_consistency_test, add_table_copy_to_table_with_activated_checkpoint_disc, "Uses disc"}}.
+{skip, {mnesia_consistency_test, add_table_copy_to_table_with_activated_checkpoint_disc_only, "Uses disc"}}.
+{skip, {mnesia_consistency_test, inst_fallback_process_dies, "Uses disk"}}.
+{skip, {mnesia_consistency_test, fatal_when_inconsistency, "Uses disk"}}.
+{skip, {mnesia_consistency_test, after_delete, "Uses disk"}}.
+{skip, {mnesia_consistency_test, mnesia_down_during_backup_causes_switch, "Uses disk"}}.
+{skip, {mnesia_consistency_test, mnesia_down_during_backup_causes_abort, "Uses disk"}}.
+%{mnesia_consistency_test, cause_switch_after} 'IMPL'
+%{mnesia_consistency_test, cause_abort_before} 'IMPL'
+%{mnesia_consistency_test, cause_abort_after} 'IMPL'
+%{mnesia_consistency_test, change_schema_before} 'IMPL'
+%{mnesia_consistency_test, change_schema_after} 'IMPL'
+
diff --git a/lib/mnesia/test/mnesia_SUITE.erl b/lib/mnesia/test/mnesia_SUITE.erl
new file mode 100644
index 0000000000..b28deaf330
--- /dev/null
+++ b/lib/mnesia/test/mnesia_SUITE.erl
@@ -0,0 +1,203 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_SUITE).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Verify that Mnesia really is a distributed real-time DBMS",
+ "This is the test suite of the Mnesia DBMS. The test suite",
+ "covers many aspects of usage and is indended to be developed",
+ "incrementally. The test suite is divided into a hierarchy of test",
+ "suites where the leafs actually implements the test cases.",
+ "The intention of each test case and sub test suite can be",
+ "read in comments where they are implemented or in worst cases",
+ "from their long mnemonic names. ",
+ "",
+ "The most simple test case of them all is called 'silly'",
+ "and is useful to run now and then, e.g. when some new fatal",
+ "bug has been introduced. It may be run even if Mnesia is in",
+ "such a bad shape that the test machinery cannot be used.",
+ "NB! Invoke the function directly with mnesia_SUITE:silly()",
+ "and do not involve the normal test machinery."];
+all(suite) ->
+ [
+ light,
+ medium,
+ heavy,
+ clean_up_suite
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+silly() ->
+ mnesia_install_test:silly().
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+light(doc) ->
+ ["The 'light' test suite runs a selected set of test suites and is",
+ "intended to be the smallest test suite that is meaningful",
+ "to run. It starts with an installation test (which in essence is the",
+ "'silly' test case) and then it covers all functions in the API in",
+ "various depths. All configuration parameters and examples are also",
+ "covered."];
+light(suite) ->
+ [
+ install,
+ nice,
+ evil,
+ {mnesia_frag_test, light},
+ qlc,
+ registry,
+ config,
+ examples
+ ].
+
+install(suite) ->
+ [{mnesia_install_test, all}].
+
+nice(suite) ->
+ [{mnesia_nice_coverage_test, all}].
+
+evil(suite) ->
+ [{mnesia_evil_coverage_test, all}].
+
+qlc(suite) ->
+ [{mnesia_qlc_test, all}].
+
+registry(suite) ->
+ [{mnesia_registry_test, all}].
+
+config(suite) ->
+ [{mnesia_config_test, all}].
+
+examples(suite) ->
+ [{mnesia_examples_test, all}].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+medium(doc) ->
+ ["The 'medium' test suite verfies the ACID (atomicity, consistency",
+ "isolation and durability) properties and various recovery scenarios",
+ "These tests may take quite while to run."];
+medium(suite) ->
+ [
+ install,
+ atomicity,
+ isolation,
+ durability,
+ recovery,
+ consistency,
+ {mnesia_frag_test, medium}
+ ].
+
+atomicity(suite) ->
+ [{mnesia_atomicity_test, all}].
+
+isolation(suite) ->
+ [{mnesia_isolation_test, all}].
+
+durability(suite) ->
+ [{mnesia_durability_test, all}].
+
+recovery(suite) ->
+ [{mnesia_recovery_test, all}].
+
+consistency(suite) ->
+ [{mnesia_consistency_test, all}].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+heavy(doc) ->
+ ["The 'heavy' test suite runs some resource consuming tests and",
+ "benchmarks"];
+heavy(suite) ->
+ [measure].
+
+measure(suite) ->
+ [{mnesia_measure_test, all}].
+
+prediction(suite) ->
+ [{mnesia_measure_test, prediction}].
+
+fairness(suite) ->
+ [{mnesia_measure_test, fairness}].
+
+benchmarks(suite) ->
+ [{mnesia_measure_test, benchmarks}].
+
+consumption(suite) ->
+ [{mnesia_measure_test, consumption}].
+
+scalability(suite) ->
+ [{mnesia_measure_test, scalability}].
+
+
+clean_up_suite(doc) -> ["Not a test case only kills mnesia and nodes, that where"
+ "started during the tests"];
+clean_up_suite(suite) ->
+ [];
+clean_up_suite(Config) when is_list(Config)->
+ mnesia:kill(),
+ Slaves = mnesia_test_lib:lookup_config(nodenames, Config),
+ Nodes = lists:delete(node(), Slaves),
+ rpc:multicall(Nodes, erlang, halt, []),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+otp_r4b(doc) ->
+ ["This test suite is an extract of the grand Mnesia suite",
+ "it contains OTP R4B specific test cases"];
+otp_r4b(suite) ->
+ [
+ {mnesia_config_test, access_module},
+ {mnesia_config_test, dump_log_load_regulation},
+ {mnesia_config_test, embedded_mnemosyne},
+ {mnesia_config_test, ignore_fallback_at_startup},
+ {mnesia_config_test, max_wait_for_decision},
+ {mnesia_consistency_test, consistency_after_restore},
+ {mnesia_evil_backup, restore},
+ {mnesia_evil_coverage_test, offline_set_master_nodes},
+ {mnesia_evil_coverage_test, record_name},
+ {mnesia_evil_coverage_test, user_properties},
+ {mnesia_registry_test, all},
+ otp_2363
+ ].
+
+otp_2363(doc) ->
+ ["Index on disc only tables"];
+otp_2363(suite) ->
+ [
+ {mnesia_dirty_access_test, dirty_index_match_object_disc_only},
+ {mnesia_dirty_access_test,dirty_index_read_disc_only},
+ {mnesia_dirty_access_test,dirty_index_update_bag_disc_only},
+ {mnesia_dirty_access_test,dirty_index_update_set_disc_only},
+ {mnesia_evil_coverage_test, create_live_table_index_disc_only}
+ ].
+
+
+
diff --git a/lib/mnesia/test/mnesia_atomicity_test.erl b/lib/mnesia/test/mnesia_atomicity_test.erl
new file mode 100644
index 0000000000..645c203a91
--- /dev/null
+++ b/lib/mnesia/test/mnesia_atomicity_test.erl
@@ -0,0 +1,839 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_atomicity_test).
+-author('[email protected]').
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Verify atomicity of transactions",
+ "Verify that transactions are atomic, i.e. either all operations",
+ "in a transaction will be performed or none of them. It must be",
+ "assured that no partitially completed operations leaves any",
+ "effects in the database."];
+all(suite) ->
+ [
+ explicit_abort_in_middle_of_trans,
+ runtime_error_in_middle_of_trans,
+ kill_self_in_middle_of_trans,
+ throw_in_middle_of_trans,
+ mnesia_down_in_middle_of_trans
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+explicit_abort_in_middle_of_trans(suite) -> [];
+explicit_abort_in_middle_of_trans(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = explicit_abort_in_middle_of_trans,
+
+ Rec1A = {Tab, 1, a},
+ Rec1B = {Tab, 1, b},
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ %% Start a transaction on one node
+ {success, [A]} = ?start_activities([Node1]),
+
+ %% store an object in the Tab - first tranaction
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1A) % returns ok when successful
+ end,
+ ?match_receive({A, ok}),
+ A ! end_trans,
+ ?match_receive({A, {atomic, end_trans}}),
+
+ %% second transaction: store some new objects and abort before the
+ %% transaction is finished -> the new changes should be invisable
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1B),
+ exit(abort_by_purpose) %does that stop the process A ???
+ end,
+ ?match_receive({A, {aborted, abort_by_purpose}}),
+
+
+ %?match_receive({A, {'EXIT', Pid, normal}}), % A died and sends EXIT
+
+
+ %% Start a second transactionprocess, after the first failed
+ {success, [B]} = ?start_activities([Node1]),
+
+ %% check, whether the interupted transaction had no influence on the db
+ ?start_transactions([B]),
+ B ! fun() ->
+ ?match([Rec1A], mnesia:read({Tab, 1})),
+ ok
+ end,
+ ?match_receive({B, ok}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+runtime_error_in_middle_of_trans(suite) -> [];
+runtime_error_in_middle_of_trans(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = runtime_error_in_middle_of_trans,
+
+ Rec1A = {Tab, 1, a},
+ Rec1B = {Tab, 1, b},
+ Rec1C = {Tab, 1, c},
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ %% Start a transaction on one node
+ {success, [A]} = ?start_activities([Node1]),
+
+ %% store an object in the Tab - first tranaction
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1A) % returns ok when successful
+ end,
+ ?match_receive({A, ok}),
+ A ! end_trans,
+ ?match_receive({A, {atomic, end_trans}}),
+
+ %% second transaction: store some new objects and abort before the
+ %% transaction is finished -> the new changes should be invisable
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1B),
+ erlang:error(foo), % that should provoke a runtime error
+ mnesia:write(Rec1C)
+ end,
+ ?match_receive({A, {aborted, _Reason}}),
+
+ %?match_receive({A, {'EXIT', Msg1}), % A died and sends EXIT
+
+
+ %% Start a second transactionprocess, after the first failed
+ {success, [B]} = ?start_activities([Node1]),
+
+ %% check, whether the interupted transaction had no influence on the db
+ ?start_transactions([B]),
+ B ! fun() ->
+ ?match([Rec1A], mnesia:read({Tab, 1})),
+ ok
+ end,
+ ?match_receive({B, ok}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+kill_self_in_middle_of_trans(suite) -> [];
+kill_self_in_middle_of_trans(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = kill_self_in_middle_of_trans,
+
+ Rec1A = {Tab, 1, a},
+ Rec1B = {Tab, 1, b},
+ Rec1C = {Tab, 1, c},
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ %% Start a transaction on one node
+ {success, [A]} = ?start_activities([Node1]),
+
+ %% store an object in the Tab - first tranaction
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1A) % returns ok when successful
+ end,
+ ?match_receive({A, ok}),
+ A ! end_trans,
+ ?match_receive({A, {atomic, end_trans}}),
+
+ %% second transaction: store some new objects and abort before the
+ %% transaction is finished -> the new changes should be invisable
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1B),
+ exit(self(), kill), % that should kill the process himself
+ % - poor guy !
+ mnesia:write(Rec1C)
+ end,
+ %%
+ %% exit(.., kill) : the transaction can't trap this error - thus no
+ %% proper result can be send by the test server
+
+ % ?match_receive({A, {aborted, Reason}}),
+
+ ?match_receive({'EXIT', _Pid, killed}), % A is killed and sends EXIT
+
+ %% Start a second transactionprocess, after the first failed
+ {success, [B]} = ?start_activities([Node1]),
+
+ %% check, whether the interupted transaction had no influence on the db
+ ?start_transactions([B]),
+ B ! fun() ->
+ ?match([Rec1A], mnesia:read({Tab, 1})),
+ ok
+ end,
+ ?match_receive({B, ok}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+throw_in_middle_of_trans(suite) -> [];
+throw_in_middle_of_trans(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = throw_in_middle_of_trans,
+
+ Rec1A = {Tab, 1, a},
+ Rec1B = {Tab, 1, b},
+ Rec1C = {Tab, 1, c},
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ %% Start a transaction on one node
+ {success, [A]} = ?start_activities([Node1]),
+
+ %% store an object in the Tab - first tranaction
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1A) % returns ok when successful
+ end,
+ ?match_receive({A, ok}),
+ A ! end_trans,
+ ?match_receive({A, {atomic, end_trans}}),
+
+ %% second transaction: store some new objects and abort before the
+ %% transaction is finished -> the new changes should be invisable
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write(Rec1B),
+ throw(exit_transactian_by_a_throw),
+ mnesia:write(Rec1C)
+ end,
+ ?match_receive({A, {aborted, {throw, exit_transactian_by_a_throw}}}),
+ % A ! end_trans, % is A still alive ?
+ % ?match_receive({A, {atomic, end_trans}}), % {'EXIT', Pid, normal}
+
+ %?match_receive({A, {'EXIT', Pid, normal}}), % A died and sends EXIT
+
+ %% Start a second transactionprocess, after the first failed
+ {success, [B]} = ?start_activities([Node1]),
+
+ %% check, whether the interupted transaction had no influence on the db
+ ?start_transactions([B]),
+ B ! fun() ->
+ ?match([Rec1A], mnesia:read({Tab, 1})),
+ ok
+ end,
+ ?match_receive({B, ok}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+mnesia_down_in_middle_of_trans(suite) ->
+ [
+ mnesia_down_during_infinite_trans,
+ lock_waiter,
+ restart_check
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+mnesia_down_during_infinite_trans(suite) -> [];
+mnesia_down_during_infinite_trans(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+ Tab = mnesia_down_during_infinite_trans,
+
+ ?match({atomic, ok},
+ mnesia:create_table([{name, Tab}, {ram_copies, [Node1, Node2]}])),
+ %% Start a transaction on one node
+ {success, [A2, A1]} = ?start_activities([Node2, Node1]),
+ %% Start order of the transactions are important
+ %% We also needs to sync the tid counter
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 1, test_ok}) end)),
+ mnesia_test_lib:start_sync_transactions([A2, A1]),
+
+ %% Obtain a write lock and wait forever
+ RecA = {Tab, 1, test_not_ok},
+ A1 ! fun() -> mnesia:write(RecA) end,
+ ?match_receive({A1, ok}),
+
+ A1 ! fun() -> process_flag(trap_exit, true), timer:sleep(infinity) end,
+ ?match_receive(timeout),
+
+ %% Try to get read lock, but gets queued
+ A2 ! fun() -> mnesia:read({Tab, 1}) end,
+ ?match_receive(timeout),
+
+ %% Kill Mnesia on other node
+ mnesia_test_lib:kill_mnesia([Node1]),
+
+ %% Second transaction gets the read lock
+ ?match_receive({A2, [{Tab, 1, test_ok}]}),
+ exit(A1, kill), % Needed since we trap exit
+
+ ?verify_mnesia([Node2], [Node1]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+lock_waiter(doc) ->
+ ["The purpose of this test case is to test the following situation:",
+ "process B locks an object, process A accesses that object as",
+ "well, but A has to wait for the lock to be released. Then",
+ "mnesia of B goes down. Question: will A get the lock ?",
+ "important: the transaction of A is the oldest one !!! (= a little tricky)",
+ "",
+ "several different access operations shall be tested",
+ "rt = read_lock_table, wt = write_lock_table, r = read,",
+ "sw = s_write, w = write, wr = wread"];
+lock_waiter(suite) ->
+ [
+ lock_waiter_sw_r,
+ lock_waiter_sw_rt,
+ lock_waiter_sw_wt,
+ lock_waiter_wr_r,
+ lock_waiter_srw_r,
+ lock_waiter_sw_sw,
+ lock_waiter_sw_w,
+ lock_waiter_sw_wr,
+ lock_waiter_sw_srw,
+ lock_waiter_wr_wt,
+ lock_waiter_srw_wt,
+ lock_waiter_wr_sw,
+ lock_waiter_srw_sw,
+ lock_waiter_wr_w,
+ lock_waiter_srw_w,
+ lock_waiter_r_sw,
+ lock_waiter_r_w,
+ lock_waiter_r_wt,
+ lock_waiter_rt_sw,
+ lock_waiter_rt_w,
+ lock_waiter_rt_wt,
+ lock_waiter_wr_wr,
+ lock_waiter_srw_srw,
+ lock_waiter_wt_r,
+ lock_waiter_wt_w,
+ lock_waiter_wt_rt,
+ lock_waiter_wt_wt,
+ lock_waiter_wt_wr,
+ lock_waiter_wt_srw,
+ lock_waiter_wt_sw,
+ lock_waiter_w_wr,
+ lock_waiter_w_srw,
+ lock_waiter_w_sw,
+ lock_waiter_w_r,
+ lock_waiter_w_w,
+ lock_waiter_w_rt,
+ lock_waiter_w_wt
+ ].
+
+lock_waiter_sw_r(suite) -> [];
+lock_waiter_sw_r(Config) when is_list(Config) ->
+ start_lock_waiter(sw, r, Config).
+
+lock_waiter_sw_rt(suite) -> [];
+lock_waiter_sw_rt(Config) when is_list(Config) ->
+ start_lock_waiter(sw, rt, Config).
+
+lock_waiter_sw_wt(suite) -> [];
+lock_waiter_sw_wt(Config) when is_list(Config) ->
+ start_lock_waiter(sw, wt,Config).
+
+lock_waiter_wr_r(suite) -> [];
+lock_waiter_wr_r(Config) when is_list(Config) ->
+ start_lock_waiter(wr, r, Config).
+
+lock_waiter_srw_r(suite) -> [];
+lock_waiter_srw_r(Config) when is_list(Config) ->
+ start_lock_waiter(srw, r, Config).
+
+lock_waiter_sw_sw(suite) -> [];
+lock_waiter_sw_sw(Config) when is_list(Config) ->
+ start_lock_waiter(sw, sw,Config).
+
+lock_waiter_srw_srw(suite) -> [];
+lock_waiter_srw_srw(Config) when is_list(Config) ->
+ start_lock_waiter(srw, srw,Config).
+
+lock_waiter_wr_wr(suite) -> [];
+lock_waiter_wr_wr(Config) when is_list(Config) ->
+ start_lock_waiter(wr, wr,Config).
+
+lock_waiter_sw_w(suite) -> [];
+lock_waiter_sw_w(Config) when is_list(Config) ->
+ start_lock_waiter(sw, w,Config).
+
+lock_waiter_sw_wr(suite) -> [];
+lock_waiter_sw_wr(Config) when is_list(Config) ->
+ start_lock_waiter(sw, wr,Config).
+
+lock_waiter_sw_srw(suite) -> [];
+lock_waiter_sw_srw(Config) when is_list(Config) ->
+ start_lock_waiter(sw, srw,Config).
+
+lock_waiter_wr_wt(suite) -> [];
+lock_waiter_wr_wt(Config) when is_list(Config) ->
+ start_lock_waiter(wr, wt,Config).
+
+lock_waiter_srw_wt(suite) -> [];
+lock_waiter_srw_wt(Config) when is_list(Config) ->
+ start_lock_waiter(srw, wt,Config).
+
+lock_waiter_wr_sw(suite) -> [];
+lock_waiter_wr_sw(Config) when is_list(Config) ->
+ start_lock_waiter(wr, sw,Config).
+
+lock_waiter_srw_sw(suite) -> [];
+lock_waiter_srw_sw(Config) when is_list(Config) ->
+ start_lock_waiter(srw, sw,Config).
+
+lock_waiter_wr_w(suite) -> [];
+lock_waiter_wr_w(Config) when is_list(Config) ->
+ start_lock_waiter(wr, w,Config).
+
+lock_waiter_srw_w(suite) -> [];
+lock_waiter_srw_w(Config) when is_list(Config) ->
+ start_lock_waiter(srw, w,Config).
+
+lock_waiter_r_sw(suite) -> [];
+lock_waiter_r_sw(Config) when is_list(Config) ->
+ start_lock_waiter(r, sw,Config).
+
+lock_waiter_r_w(suite) -> [];
+lock_waiter_r_w(Config) when is_list(Config) ->
+ start_lock_waiter(r, w,Config).
+
+lock_waiter_r_wt(suite) -> [];
+lock_waiter_r_wt(Config) when is_list(Config) ->
+ start_lock_waiter(r, wt,Config).
+
+lock_waiter_rt_sw(suite) -> [];
+lock_waiter_rt_sw(Config) when is_list(Config) ->
+ start_lock_waiter(rt, sw,Config).
+
+lock_waiter_rt_w(suite) -> [];
+lock_waiter_rt_w(Config) when is_list(Config) ->
+ start_lock_waiter(rt, w,Config).
+
+lock_waiter_rt_wt(suite) -> [];
+lock_waiter_rt_wt(Config) when is_list(Config) ->
+ start_lock_waiter(rt, wt,Config).
+
+lock_waiter_wt_r(suite) -> [];
+lock_waiter_wt_r(Config) when is_list(Config) ->
+ start_lock_waiter(wt, r,Config).
+
+lock_waiter_wt_w(suite) -> [];
+lock_waiter_wt_w(Config) when is_list(Config) ->
+ start_lock_waiter(wt, w,Config).
+
+lock_waiter_wt_rt(suite) -> [];
+lock_waiter_wt_rt(Config) when is_list(Config) ->
+ start_lock_waiter(wt, rt,Config).
+
+lock_waiter_wt_wt(suite) -> [];
+lock_waiter_wt_wt(Config) when is_list(Config) ->
+ start_lock_waiter(wt, wt,Config).
+
+lock_waiter_wt_wr(suite) -> [];
+lock_waiter_wt_wr(Config) when is_list(Config) ->
+ start_lock_waiter(wt, wr,Config).
+
+lock_waiter_wt_srw(suite) -> [];
+lock_waiter_wt_srw(Config) when is_list(Config) ->
+ start_lock_waiter(wt, srw,Config).
+
+lock_waiter_wt_sw(suite) -> [];
+lock_waiter_wt_sw(Config) when is_list(Config) ->
+ start_lock_waiter(wt, sw,Config).
+
+lock_waiter_w_wr(suite) -> [];
+lock_waiter_w_wr(Config) when is_list(Config) ->
+ start_lock_waiter(w, wr, Config).
+
+lock_waiter_w_srw(suite) -> [];
+lock_waiter_w_srw(Config) when is_list(Config) ->
+ start_lock_waiter(w, srw, Config).
+
+lock_waiter_w_sw(suite) -> [];
+lock_waiter_w_sw(Config) when is_list(Config) ->
+ start_lock_waiter(w, sw, Config).
+
+lock_waiter_w_r(suite) -> [];
+lock_waiter_w_r(Config) when is_list(Config) ->
+ start_lock_waiter(w, r, Config).
+
+lock_waiter_w_w(suite) -> [];
+lock_waiter_w_w(Config) when is_list(Config) ->
+ start_lock_waiter(w, w, Config).
+
+lock_waiter_w_rt(suite) -> [];
+lock_waiter_w_rt(Config) when is_list(Config) ->
+ start_lock_waiter(w, rt, Config).
+
+lock_waiter_w_wt(suite) -> [];
+lock_waiter_w_wt(Config) when is_list(Config) ->
+ start_lock_waiter(w, wt, Config).
+
+start_lock_waiter(BlockOpA, BlockOpB, Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+
+ TabName = mk_tab_name(lock_waiter_),
+ ?match({atomic, ok}, mnesia:create_table(TabName,
+ [{ram_copies, [N1, N2]}])),
+
+ %% initialize the table with object {1, c} - when there
+ %% is a read transaction, the read will find that value
+ ?match({atomic, ok}, mnesia:sync_transaction(fun() -> mnesia:write({TabName, 1, c}) end)),
+ rpc:call(N2, ?MODULE, sync_tid_release, []),
+
+ Tester = self(),
+ Fun_A =fun() ->
+ NewCounter = incr_restart_counter(),
+ if
+ NewCounter == 1 ->
+ Tester ! go_ahead_test,
+ receive go_ahead -> ok end;
+ true -> ok
+ end,
+ lock_waiter_fun(BlockOpA, TabName, a),
+ NewCounter
+ end,
+
+ %% it's not possible to just spawn the transaction, because
+ %% the result shall be evaluated
+ A = spawn_link(N1, ?MODULE, perform_restarted_transaction, [Fun_A]),
+
+ ?match(ok, receive go_ahead_test -> ok after 10000 -> timeout end),
+
+ mnesia_test_lib:sync_trans_tid_serial([N1, N2]),
+
+ Fun_B = fun() ->
+ lock_waiter_fun(BlockOpB, TabName, b),
+ A ! go_ahead,
+ wait(infinity)
+ end,
+
+ B = spawn_link(N2, mnesia, transaction, [Fun_B, 100]),
+
+ io:format("waiting for A (~p on ~p) to be in the queue ~n", [A, [N1, N2]]),
+ wait_for_a(A, [N1, N2]),
+
+ io:format("Queus ~p~n",
+ [[{N,rpc:call(N, mnesia, system_info, [lock_queue])} || N <- Nodes]]),
+
+ KillNode = node(B),
+ io:format("A was in the queue, time to kill Mnesia on B's node (~p on ~p)~n",
+ [B, KillNode]),
+
+ mnesia_test_lib:kill_mnesia([KillNode]), % kill mnesia of fun B
+
+ %% Read Ops does not need to be restarted
+ ExpectedCounter =
+ if
+ BlockOpA == sw, BlockOpB == w -> 1;
+ BlockOpA == sw, BlockOpB == wt -> 1;
+ BlockOpA == sw, BlockOpB == wr -> 1;
+ BlockOpA == srw, BlockOpB == w -> 1;
+ BlockOpA == srw, BlockOpB == wt -> 1;
+ BlockOpA == srw, BlockOpB == wr -> 1;
+ BlockOpA == r, BlockOpB /= sw -> 1;
+ BlockOpA == rt, BlockOpB /= sw -> 1;
+ true -> 2
+ end,
+ ?match_multi_receive([{'EXIT', A, {atomic, ExpectedCounter}},
+ {'EXIT', B, killed}]),
+
+ %% the expected result depends on the transaction of
+ %% fun A - when that doesn't change the object in the
+ %% table (e.g. it is a read) then the predefined
+ %% value {Tabname, 1, c} is expected to be the result here
+ ExpectedResult =
+ case BlockOpA of
+ w -> {TabName, 1, a};
+ sw ->{TabName, 1, a};
+ _all_other -> {TabName, 1, c}
+ end,
+
+ ?match({atomic, [ExpectedResult]},
+ mnesia:transaction(fun() -> mnesia:read({TabName, 1}) end, 100)),
+ ?verify_mnesia([N1], [N2]).
+
+mk_tab_name(Prefix) ->
+ {Mega, Sec, Micro} = erlang:now(),
+ list_to_atom(lists:concat([Prefix , Mega, '_', Sec, '_', Micro])).
+
+lock_waiter_fun(Op, TabName, Val) ->
+ case Op of
+ rt -> mnesia:read_lock_table(TabName);
+ wt -> mnesia:write_lock_table(TabName);
+ r -> mnesia:read({TabName, 1});
+ w -> mnesia:write({TabName, 1, Val});
+ wr -> mnesia:wread({TabName, 1});
+ srw -> mnesia:read(TabName, 1, sticky_write);
+ sw -> mnesia:s_write({TabName, 1, Val})
+ end.
+
+wait_for_a(Pid, Nodes) ->
+ wait_for_a(Pid, Nodes, 5).
+
+wait_for_a(_P, _N, 0) ->
+ ?error("Timeout while waiting for lock on a~n", []);
+
+wait_for_a(Pid, Nodes, Count) ->
+ %% io:format("WAIT_FOR_A ~p ON ~w ~n", [Pid, Nodes]),
+ List = [rpc:call(N, mnesia, system_info, [lock_queue]) || N <- Nodes],
+ Q = lists:append(List),
+ check_q(Pid, Q, Nodes, Count).
+
+check_q(Pid, [{{_Oid,_Tid}, _Op, Pid, _WFT} | _Tail], _N, _Count) ->
+ ok;
+check_q(Pid, [{_Oid, _Op, Pid, _Tid, _WFT} | _Tail], _N, _Count) ->
+ ok;
+check_q(Pid, [_ | Tail], N, Count) ->
+ check_q(Pid, Tail, N, Count);
+check_q(Pid, [], N, Count) ->
+ timer:sleep(500),
+ wait_for_a(Pid, N, Count - 1).
+
+perform_restarted_transaction (Fun_Trans) ->
+ %% the result of the transaction shall be:
+ %% - undefined (if the transaction was never executed)
+ %% - Times ( number of times that the transaction has been executed)
+
+ Result = mnesia:transaction(Fun_Trans, 100),
+ exit(Result).
+
+%% Returns new val
+incr_restart_counter() ->
+ NewCount =
+ case get(count_restart_of_transaction) of
+ undefined -> 1;
+ OldCount -> OldCount + 1
+ end,
+ put(count_restart_of_transaction, NewCount),
+ NewCount.
+
+wait(Mseconds) ->
+ receive
+ after Mseconds -> ok
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+restart_check (doc) ->
+ [
+ "test case:'A' performs a transaction on a table which",
+ "is only replicated on node B. During that transaction",
+ "mnesia on node B is killed. The transaction of A should",
+ "be stopped, since there is no further replica",
+ "rt = read_lock_table, wt = write_lock_table, r = read,",
+ "sw = s_write, w = write, wr = wread,"];
+restart_check(suite) ->
+ [
+ restart_r_one,
+ restart_w_one,
+ restart_rt_one,
+ restart_wt_one,
+ restart_wr_one,
+ restart_sw_one,
+ restart_r_two,
+ restart_w_two,
+ restart_rt_two,
+ restart_wt_two,
+ restart_wr_two,
+ restart_sw_two
+ ].
+
+restart_r_one(suite) -> [];
+restart_r_one(Config) when is_list(Config) ->
+ start_restart_check(r, one, Config).
+
+restart_w_one(suite) -> [];
+restart_w_one(Config) when is_list(Config) ->
+ start_restart_check(w, one, Config).
+
+restart_rt_one(suite) -> [];
+restart_rt_one(Config) when is_list(Config) ->
+ start_restart_check(rt, one, Config).
+
+restart_wt_one(suite) -> [];
+restart_wt_one(Config) when is_list(Config) ->
+ start_restart_check(wt, one, Config).
+
+restart_wr_one(suite) -> [];
+restart_wr_one(Config) when is_list(Config) ->
+ start_restart_check(wr, one, Config).
+
+restart_sw_one(suite) -> [];
+restart_sw_one(Config) when is_list(Config) ->
+ start_restart_check(sw, one, Config).
+
+restart_r_two(suite) -> [];
+restart_r_two(Config) when is_list(Config) ->
+ start_restart_check(r, two, Config).
+
+restart_w_two(suite) -> [];
+restart_w_two(Config) when is_list(Config) ->
+ start_restart_check(w, two, Config).
+
+restart_rt_two(suite) -> [];
+restart_rt_two(Config) when is_list(Config) ->
+ start_restart_check(rt, two, Config).
+
+restart_wt_two(suite) -> [];
+restart_wt_two(Config) when is_list(Config) ->
+ start_restart_check(wt, two, Config).
+
+restart_wr_two(suite) -> [];
+restart_wr_two(Config) when is_list(Config) ->
+ start_restart_check(wr, two, Config).
+
+restart_sw_two(suite) -> [];
+restart_sw_two(Config) when is_list(Config) ->
+ start_restart_check(sw, two, Config).
+
+start_restart_check(RestartOp, ReplicaNeed, Config) ->
+ [N1, N2, N3] = Nodes = ?acquire_nodes(3, Config),
+
+ {TabName, _TabNodes} = create_restart_table(ReplicaNeed, Nodes),
+
+ %% initialize the table with object {1, c} - when there
+ %% is a read transaction, the read will find that value
+ ?match({atomic, ok}, mnesia:sync_transaction(fun() -> mnesia:write({TabName, 1, c}) end)),
+
+ %% Really sync tid_release
+ rpc:multicall([N2,N3], ?MODULE, sync_tid_release, []),
+ Coord = self(),
+
+ Fun_A = fun() ->
+ NewCounter = incr_restart_counter(),
+ case NewCounter of
+ 1 ->
+ mnesia:write({TabName, 1, d}),
+ %% send a message to the test proc
+ Coord ! {self(),fun_a_is_blocked},
+ receive go_ahead -> ok end;
+ _ ->
+ %% the fun will NOT be blocked here
+ restart_fun_A(RestartOp, TabName)
+ end,
+ NewCounter
+ end,
+
+ A = spawn_link(N1, ?MODULE, perform_restarted_transaction, [Fun_A]),
+ ?match_receive({A,fun_a_is_blocked}),
+
+ %% mnesia shall be killed at that node, where A is reading
+ %% the information from
+ kill_where_to_read(TabName, N1, [N2, N3]),
+
+ %% wait some time to let mnesia go down and spread those news around
+ %% fun A shall be able to finish its job before being restarted
+ wait(500),
+ A ! go_ahead,
+
+ %% the sticky write doesnt work on remote nodes !!!
+ ExpectedMsg =
+ case RestartOp of
+ sw when ReplicaNeed == two ->
+ {'EXIT',A,{aborted, {not_local, TabName}}};
+ _all_other ->
+ case ReplicaNeed of
+ one ->
+ {'EXIT',A,{aborted, {no_exists, TabName}}};
+ two ->
+ {'EXIT',A,{atomic, 2}}
+ end
+ end,
+
+ ?match_receive(ExpectedMsg),
+
+ %% now mnesia has to be started again on the node KillNode
+ %% because the next test suite will need it
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [TabName])),
+
+
+ %% the expected result depends on the transaction of
+ %% fun A - when that doesnt change the object in the
+ %% table (e.g. it is a read) then the predefined
+ %% value {Tabname, 1, c} is expected to be the result here
+
+ ExpectedResult =
+ case ReplicaNeed of
+ one ->
+ [];
+ two ->
+ case RestartOp of
+ w -> [{TabName, 1, a}];
+ _ ->[ {TabName, 1, c}]
+ end
+ end,
+
+ ?match({atomic, ExpectedResult},
+ mnesia:transaction(fun() -> mnesia:read({TabName, 1}) end,100)),
+ ?verify_mnesia(Nodes, []).
+
+create_restart_table(ReplicaNeed, [_N1, N2, N3]) ->
+ TabNodes =
+ case ReplicaNeed of
+ one -> [N2];
+ two -> [N2, N3]
+ end,
+ TabName = mk_tab_name(restart_check_),
+ ?match({atomic, ok}, mnesia:create_table(TabName, [{ram_copies, TabNodes}])),
+ {TabName, TabNodes}.
+
+restart_fun_A(Op, TabName) ->
+ case Op of
+ rt -> mnesia:read_lock_table(TabName);
+ wt -> mnesia:write_lock_table(TabName);
+ r -> mnesia:read( {TabName, 1});
+ w -> mnesia:write({TabName, 1, a});
+ wr -> mnesia:wread({TabName, 1});
+ sw -> mnesia:s_write({TabName, 1, a})
+ end.
+
+kill_where_to_read(TabName, N1, Nodes) ->
+ Read = rpc:call(N1,mnesia,table_info, [TabName, where_to_read]),
+ case lists:member(Read, Nodes) of
+ true ->
+ mnesia_test_lib:kill_mnesia([Read]);
+ false ->
+ ?error("Fault while killing Mnesia: ~p~n", [Read]),
+ mnesia_test_lib:kill_mnesia(Nodes)
+ end.
+
+sync_tid_release() ->
+ sys:get_status(whereis(mnesia_tm)),
+ sys:get_status(whereis(mnesia_locker)),
+ ok.
+
diff --git a/lib/mnesia/test/mnesia_config_backup.erl b/lib/mnesia/test/mnesia_config_backup.erl
new file mode 100644
index 0000000000..a33ec6ac5c
--- /dev/null
+++ b/lib/mnesia/test/mnesia_config_backup.erl
@@ -0,0 +1,105 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_config_backup).
+-author('[email protected]').
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% This module is used for testing the backup module config parameter.
+%%
+%% This module is an impostor for the mnesia_backup module.
+%%
+%%
+%% Original doc below:
+%%
+%% This module contains one implementation of callback functions
+%% used by Mnesia at backup and restore. The user may however
+%% write an own module the same interface as mnesia_backup and
+%% configure Mnesia so the alternate module performs the actual
+%% accesses to the backup media. This means that the user may put
+%% the backup on medias that Mnesia does not know about, possibly
+%% on hosts where Erlang is not running.
+%%
+%% The OpaqueData argument is never interpreted by other parts of
+%% Mnesia. It is the property of this module. Alternate implementations
+%% of this module may have different interpretations of OpaqueData.
+%% The OpaqueData argument given to open_write/1 and open_read/1
+%% are forwarded directly from the user.
+%%
+%% All functions must return {ok, NewOpaqueData} or {error, Reason}.
+%%
+%% The NewOpaqueData arguments returned by backup callback functions will
+%% be given as input when the next backup callback function is invoked.
+%% If any return value does not match {ok, _} the backup will be aborted.
+%%
+%% The NewOpaqueData arguments returned by restore callback functions will
+%% be given as input when the next restore callback function is invoked
+%% If any return value does not match {ok, _} the restore will be aborted.
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-export([
+ open_write/1, write/2, commit_write/1, abort_write/1,
+ open_read/1, read/1, close_read/1
+ ]).
+
+-record(backup, {name, mode, items}).
+
+open_write(Name) ->
+ file:delete(Name),
+ {ok, #backup{name = Name, mode = write, items = []}}.
+
+write(Opaque, Item) when Opaque#backup.mode == write ->
+ %% Build the list in reverse order
+ {ok, Opaque#backup{items = [Item | Opaque#backup.items]}}.
+
+commit_write(Opaque) when Opaque#backup.mode == write ->
+ Bin = term_to_binary(Opaque#backup.items),
+ case file:write_file(Opaque#backup.name, Bin) of
+ ok ->
+ {ok, Opaque#backup{mode = closed, items = []}};
+ {error, Reason} ->
+ {error, {commit_write, Reason}}
+ end.
+
+abort_write(Opaque) ->
+ {ok, Opaque#backup{mode = closed, items = []}}.
+
+open_read(Name) ->
+ case file:read_file(Name) of
+ {ok, Bin} ->
+ ReverseList = binary_to_term(Bin),
+ List = lists:reverse(ReverseList),
+ {ok, #backup{name = Name, mode = read, items = List}};
+ {error, Reason} ->
+ {error, {open_read, Reason}}
+ end.
+
+read(Opaque) when Opaque#backup.mode == read ->
+ case Opaque#backup.items of
+ [Head | Tail] ->
+ {ok, Opaque#backup{items = Tail}, Head};
+ [] ->
+ {ok, Opaque#backup{mode = closed}, []}
+ end.
+
+close_read(Opaque) ->
+ {ok, Opaque#backup{mode = closed, items = []}}.
diff --git a/lib/mnesia/test/mnesia_config_event.erl b/lib/mnesia/test/mnesia_config_event.erl
new file mode 100644
index 0000000000..6c1dea7ed5
--- /dev/null
+++ b/lib/mnesia/test/mnesia_config_event.erl
@@ -0,0 +1,74 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_config_event).
+-author('[email protected]').
+
+-behaviour(gen_event).
+
+%%
+%% This module was stolen from Mnesia
+%%
+
+
+%% gen_event callback interface
+-export([init/1, handle_event/2, handle_call/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+
+init(_Args) ->
+ {ok, []}.
+
+handle_event(Msg, State) ->
+ handle_any_event(Msg, State).
+
+handle_info(Msg, State) ->
+ handle_any_event(Msg, State).
+
+
+handle_call(Msg, State) ->
+ handle_any_event(Msg, State).
+
+
+%% The main...
+
+handle_any_event({get_log, Pid}, State) ->
+ Pid ! {log, State},
+ {ok, State};
+handle_any_event(Msg, State) ->
+ io:format("Got event: ~p~n", [Msg]),
+ {ok, [Msg | State]}.
+
+%%-----------------------------------------------------------------
+%% terminate(Reason, State) ->
+%% AnyVal
+%%-----------------------------------------------------------------
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%----------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Upgrade process when its code is to be changed
+%% Returns: {ok, NewState}
+%%----------------------------------------------------------------------
+code_change(_OldVsn, _State, _Extra) ->
+ exit(not_supported).
+
diff --git a/lib/mnesia/test/mnesia_config_test.erl b/lib/mnesia/test/mnesia_config_test.erl
new file mode 100644
index 0000000000..7b62c63a62
--- /dev/null
+++ b/lib/mnesia/test/mnesia_config_test.erl
@@ -0,0 +1,1466 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_config_test).
+-author('[email protected]').
+
+-include("mnesia_test_lib.hrl").
+
+-record(test_table,{i,a1,a2,a3}).
+-record(test_table2,{i, b}).
+
+-export([
+ all/1,
+ access_module/1,
+ auto_repair/1,
+ backup_module/1,
+ debug/1,
+ dir/1,
+ dump_log_load_regulation/1,
+ dump_log_thresholds/1,
+ dump_log_update_in_place/1,
+ embedded_mnemosyne/1,
+ event_module/1,
+ ignore_fallback_at_startup/1,
+ inconsistent_database/1,
+ max_wait_for_decision/1,
+ send_compressed/1,
+
+ app_test/1,
+ schema_config/1,
+ schema_merge/1,
+ unknown_config/1,
+
+ dump_log_time_threshold/1,
+ dump_log_write_threshold/1,
+
+ start_one_disc_full_then_one_disc_less/1,
+ start_first_one_disc_less_then_one_disc_full/1,
+ start_first_one_disc_less_then_two_more_disc_less/1,
+ schema_location_and_extra_db_nodes_combinations/1,
+ table_load_to_disc_less_nodes/1,
+ dynamic_connect/1,
+ dynamic_basic/1,
+ dynamic_ext/1,
+ dynamic_bad/1,
+
+ init_per_testcase/2,
+ fin_per_testcase/2,
+ c_nodes/0
+ ]).
+
+-export([check_logs/1]).
+
+-define(init(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ {reload_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+-define(acquire(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ {reload_appls, [mnesia]},
+ create_schema,
+ {start_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+-define(acquire_schema(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ {reload_appls, [mnesia]},
+ create_schema],
+ N, Config, ?FILE, ?LINE)).
+-define(cleanup(N, Config),
+ mnesia_test_lib:prepare_test_case([{reload_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+-define(trans(Fun),
+ ?match({atomic, ok}, mnesia:transaction(Fun))).
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+all(doc) ->
+ [
+ "Test all configuration parameters",
+ "Perform an exhaustive test of all the various parameters that",
+ "may be used to configure the Mnesia application.",
+ "",
+ "Hint: Check out the unofficial function mnesia:start/1.",
+ " But be careful to cleanup all configuration parameters",
+ " afterwards since the rest of the test suite may rely on",
+ " these default configurations. Perhaps it is best to run",
+ " these tests in a separate node which is dropped afterwards.",
+ "Are really all configuration parameters covered?"];
+
+all(suite) ->
+ [
+ access_module,
+ auto_repair,
+ backup_module,
+ debug,
+ dir,
+ dump_log_load_regulation,
+ dump_log_thresholds,
+ dump_log_update_in_place,
+ embedded_mnemosyne,
+ event_module,
+ ignore_fallback_at_startup,
+ inconsistent_database,
+ max_wait_for_decision,
+ send_compressed,
+
+ app_test,
+ schema_config,
+ unknown_config
+ ].
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+access_module(doc) ->
+ ["Replace the activity access module with another module and ",
+ "use it to read and write to some alternate table storage"];
+access_module(suite) -> [];
+access_module(Config) when is_list(Config) ->
+ Nodes = ?acquire_schema(1, Config),
+ ?match(ok, mnesia:start([{access_module, mnesia_frag}])),
+
+ ?match(mnesia_frag, mnesia:system_info(access_module)),
+
+ access_tab(ram_copies, Nodes),
+ case mnesia_test_lib:diskless(Config) of
+ true -> skip;
+ false ->
+ access_tab(disc_copies, Nodes)
+ , access_tab(disc_only_copies, Nodes)
+ end,
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config).
+
+access_tab(Storage, Nodes) ->
+ Tab = list_to_atom(lists:concat([access_tab_, Storage])),
+ RecName = some_access,
+ Attr = val,
+ TabDef = [{Storage, Nodes},
+ {type, bag},
+ {index, [Attr]},
+ {record_name, RecName}],
+ ?match({atomic,ok}, mnesia:create_table(Tab, TabDef)),
+
+ Activity = fun(Kind) ->
+ A = [Kind, Tab, RecName, Attr, Nodes],
+ io:format("kind: ~w, storage: ~w~n", [Kind, Storage]),
+ mnesia:activity(Kind, fun do_access/5, A)
+ end,
+ ModActivity = fun(Kind, M) ->
+ io:format("kind: ~w, storage: ~w. module: ~w~n",
+ [Kind, Storage, M]),
+ A = [Kind, Tab, RecName, Attr, Nodes],
+ mnesia:activity(Kind, fun do_access/5, A, M)
+ end,
+ ?match(ok, Activity(transaction)),
+ ?match(ok, Activity({transaction, 47})),
+ ?match(ok, ModActivity(transaction, mnesia)),
+ ?match(ok, ModActivity(transaction, mnesia_frag)),
+
+ ?match(ok, Activity(async_dirty)),
+ ?match(ok, Activity(sync_dirty)),
+ case Storage of
+ ram_copies ->
+ ?match(ok, Activity(ets));
+ _ ->
+ ignore
+ end.
+
+do_access(Kind, Tab, RecName, Attr, Nodes) ->
+ Tens = lists:sort([{RecName, 1, 10}, {RecName, 3, 10}]),
+ {OptNodes, OptTens} =
+ case Kind of
+ transaction -> {Nodes, Tens};
+ {transaction, _} -> {Nodes, Tens};
+ async_dirty -> {[], Tens};
+ sync_dirty -> {[], Tens};
+ ets -> {[], []}
+ end,
+ ?match(RecName, mnesia:table_info(Tab, record_name)),
+
+ ?match(ok, mnesia:write(Tab, {RecName, 1, 10}, write)),
+ ?match(ok, mnesia:write(Tab, {RecName, 2, 20}, sticky_write)),
+ ?match(ok, mnesia:write(Tab, {RecName, 2, 21}, sticky_write)),
+ ?match(ok, mnesia:write(Tab, {RecName, 2, 22}, write)),
+ ?match(ok, mnesia:write(Tab, {RecName, 3, 10}, write)),
+
+ Twos = [{RecName, 2, 20}, {RecName, 2, 21}, {RecName, 2, 22}],
+ ?match(Twos, lists:sort(mnesia:read(Tab, 2, read))),
+
+ ?match(ok, mnesia:delete_object(Tab, {RecName, 2, 21}, sticky_write)),
+
+ TenPat = {RecName, '_', 10},
+ ?match(Tens, lists:sort(mnesia:match_object(Tab, TenPat, read))),
+ ?match(OptTens, lists:sort(mnesia:index_match_object(Tab, TenPat, Attr, read) )),
+ ?match(OptTens, lists:sort(mnesia:index_read(Tab, 10, Attr))),
+ Keys = [1, 2, 3],
+ ?match(Keys, lists:sort(mnesia:all_keys(Tab))),
+
+ First = mnesia:first(Tab),
+ Mid = mnesia:next(Tab, First),
+ Last = mnesia:next(Tab, Mid),
+ ?match('$end_of_table', mnesia:next(Tab, Last)),
+ ?match(Keys, lists:sort([First,Mid,Last])),
+
+ %% For set and bag these last, prev works as first and next
+ First2 = mnesia:last(Tab),
+ Mid2 = mnesia:prev(Tab, First2),
+ Last2 = mnesia:prev(Tab, Mid2),
+ ?match('$end_of_table', mnesia:prev(Tab, Last2)),
+ ?match(Keys, lists:sort([First2,Mid2,Last2])),
+
+ ?match([ok, ok, ok], [mnesia:delete(Tab, K, write) || K <- Keys]),
+ W = wild_pattern,
+ ?match([], mnesia:match_object(Tab, mnesia:table_info(Tab, W), read)),
+ ?log("Safe fixed ~p~n", [catch ets:info(Tab, safe_fixed)]),
+ ?log("Fixed ~p ~n", [catch ets:info(Tab, fixed)]),
+
+ ?match(OptNodes, mnesia:lock({global, some_lock_item, Nodes}, write)),
+ ?match(OptNodes, mnesia:lock({global, some_lock_item, Nodes}, read)),
+ ?match(OptNodes, mnesia:lock({table, Tab}, read)),
+ ?match(OptNodes, mnesia:lock({table, Tab}, write)),
+
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+auto_repair(doc) ->
+ ["Try the auto_repair mechanism on the various disk_logs and dets files.",
+ "",
+ "The case tests both normal values of the parameter, and also",
+ "one crazy value.",
+ "The test of the real auto_repair functionality is made in the",
+ "dets suite"
+ ];
+auto_repair(suite) -> [];
+auto_repair(Config) when is_list(Config) ->
+ ?init(1, Config),
+ ?match(ok, mnesia:start()), % Check default true
+ ?match(true, mnesia:system_info(auto_repair)),
+ ?match(stopped, mnesia:stop()),
+ ?match(ok, mnesia:start([{auto_repair, true}])),
+ ?match(true, mnesia:system_info(auto_repair)),
+ ?match(stopped, mnesia:stop()),
+ ?match(ok, mnesia:start([{auto_repair, false}])),
+ ?match(false, mnesia:system_info(auto_repair)),
+ ?match(stopped, mnesia:stop()),
+ ?match({error, {bad_type, auto_repair, your_mama}},
+ mnesia:start([{auto_repair, your_mama}])),
+ ?match(stopped, mnesia:stop()),
+ ?cleanup(1, Config),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+backup_module(doc) ->
+ ["Replace the backup module with another module and use it to",
+ "read and write to an alternate backup media, e.g stored in",
+ "the internal state of a simple process."];
+backup_module(suite) -> [];
+backup_module(Config) when is_list(Config) ->
+ Nodes = ?acquire_schema(1, Config),
+ ?match(ok, mnesia:start([{backup_module, mnesia_config_backup}])),
+ ?match({atomic,ok},
+ mnesia:create_table(test_table,
+ [{disc_copies, Nodes},
+ {attributes,
+ record_info(fields,test_table)}])),
+
+ ?match({atomic,ok},
+ mnesia:create_table(test_table2,
+ [{disc_copies, Nodes},
+ {attributes,
+ record_info(fields,test_table2)}])),
+ %% Write in test table
+ ?trans(fun() -> mnesia:write(#test_table{i=1}) end),
+ ?trans(fun() -> mnesia:write(#test_table{i=2}) end),
+
+ %% Write in test table 2
+ ?trans(fun() -> mnesia:write(#test_table2{i=3}) end),
+ ?trans(fun() -> mnesia:write(#test_table2{i=4}) end),
+ mnesia_test_lib:sync_tables(Nodes, [test_table, test_table2]),
+
+ File = whow,
+ %% Now make a backup
+ ?match(ok, mnesia:backup(File)),
+
+ ?match(ok, mnesia:install_fallback(File)),
+
+ %% Now add things
+ ?trans(fun() -> mnesia:write(#test_table{i=2.5}) end),
+ ?trans(fun() -> mnesia:write(#test_table2{i=3.5}) end),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [test_table, test_table2])),
+
+ %% Now check newly started tables
+ ?match({atomic, [1,2]},
+ mnesia:transaction(fun() -> lists:sort(mnesia:all_keys(test_table)) end)),
+ ?match({atomic, [3,4]},
+ mnesia:transaction(fun() -> lists:sort(mnesia:all_keys(test_table2)) end)),
+
+ file:delete(File),
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+debug(doc) ->
+ ["Try out the four debug levels and ensure that the",
+ "expected events are generated."];
+debug(suite) -> [];
+debug(Config) when is_list(Config) ->
+ Nodes = ?init(1, Config),
+ case application:get_env(mnesia,debug) of
+ undefined ->
+ ?match(none, mnesia:system_info(debug));
+ {ok, false} ->
+ ?match(none, mnesia:system_info(debug));
+ {ok, true} ->
+ ?match(debug, mnesia:system_info(debug));
+ {ok, Env} ->
+ ?match(Env, mnesia:system_info(debug))
+ end,
+
+ ?match(ok, mnesia:start([{debug, verbose}])),
+ ?match(verbose, mnesia:system_info(debug)),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+
+ ?match(ok, mnesia:start([{debug, debug}])),
+ ?match(debug, mnesia:system_info(debug)),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+
+ ?match(ok, mnesia:start([{debug, trace}])),
+ ?match(trace, mnesia:system_info(debug)),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+
+ ?match(ok, mnesia:start([{debug, true}])),
+ ?match(debug, mnesia:system_info(debug)),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+
+ ?match(ok, mnesia:start([{debug, false}])),
+ ?match(none, mnesia:system_info(debug)),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dir(doc) ->
+ ["Try to use alternate Mnesia directories"];
+dir(suite) -> [];
+dir(Config) when is_list(Config) ->
+ Nodes = ?init(1, Config),
+
+ ?match(ok, mnesia:start([{dir, tuff}])),
+ Dir = filename:join([element(2, file:get_cwd()), "tuff"]),
+ ?match(Dir, mnesia:system_info(directory)),
+ mnesia_test_lib:kill_mnesia(Nodes),
+
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dump_log_update_in_place(doc) ->
+ ["Change the update in place policy for the transaction log dumper."];
+dump_log_update_in_place(suite) -> [];
+dump_log_update_in_place(Config) when is_list(Config) ->
+ Nodes = ?acquire(1, Config),
+ ?match(true, mnesia:system_info(dump_log_update_in_place)),
+ ?match({atomic,ok},
+ mnesia:create_table(test_table,
+ [{disc_copies, Nodes},
+ {attributes,
+ record_info(fields,test_table)}])),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+
+ ?match(ok, mnesia:start([{dump_log_update_in_place, false}])),
+ ?match(false, mnesia:system_info(dump_log_update_in_place)),
+
+ mnesia_test_lib:sync_tables(Nodes, [schema, test_table]),
+
+ %% Now provoke some log dumps
+
+ L = lists:map(
+ fun(Num) ->
+ %% Write something on one end ...
+ mnesia:transaction(
+ fun() ->
+ mnesia:write(#test_table{i=Num}) end
+ ) end,
+ lists:seq(1, 110)),
+
+ L2 = lists:duplicate(110, {atomic, ok}),
+
+ %% If this fails then some of the 110 writes above failed
+ ?match(true, L==L2),
+ if L==L2 -> ok;
+ true ->
+ ?verbose("***** List1 len: ~p, List2 len: ~p~n",
+ [length(L), length(L2)]),
+ ?verbose("L: ~p~nL2:~p~n", [L, L2])
+ end,
+
+ %% If we still can write, then Mnesia is probably alive
+ ?trans(fun() -> mnesia:write(#test_table{i=115}) end),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dump_log_thresholds(doc) ->
+ ["Elaborate with various values of the dump log thresholds and how",
+ "they affects each others. Both the dump_log_time_threshold and the",
+ "dump_log_write_threshold must be covered. Do also check that both",
+ "kinds of overload events are generated as expected.",
+ "",
+ "Logs are checked by first doing whatever has to be done to trigger ",
+ "a dump, and then stopping Mnesia and then look in the ",
+ "data files and see that the correct amount of transactions ",
+ "have been done."];
+dump_log_thresholds(suite) ->
+ [
+ dump_log_time_threshold,
+ dump_log_write_threshold
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dump_log_write_threshold(doc)->
+ ["This test case must be rewritten.",
+ "Dump logs are tested by doing transactions, then killing Mnesia and ",
+ "then examining the table data files and see if they are correct.",
+ "The test_table is used as a counter, test_table. is stepped once ",
+ "for each transaction."];
+dump_log_write_threshold(suite)->[];
+dump_log_write_threshold(Config) when is_list(Config) ->
+ [N1] = ?acquire_schema(1, Config),
+
+ Threshold = 3,
+ ?match(ok,mnesia:start([{dump_log_write_threshold, Threshold}])),
+
+ ?match({atomic,ok},
+ mnesia:create_table(test_table,
+ [{disc_copies, [N1]},
+ {attributes,
+ record_info(fields,test_table)}])),
+ ?match(dumped, mnesia:dump_log()),
+
+ ?match(ok, do_trans(2)), % Shall not have dumped
+ check_logs(0),
+
+ ?match(ok, do_trans(Threshold - 2)), % Trigger a dump
+ receive after 1000 -> ok end,
+ check_logs(Threshold),
+
+
+ ?match(ok, do_trans(Threshold - 1)),
+ ?match(dumped, mnesia:dump_log()), %% This should trigger ets2dcd dump
+ check_logs(0), %% and leave no dcl file
+
+ ?match(stopped, mnesia:stop()),
+
+ %% Check bad threshold value
+ ?match({error,{bad_type,dump_log_write_threshold,0}},
+ mnesia:start([{dump_log_write_threshold,0}])),
+
+ ?verify_mnesia([], [N1]),
+ ?cleanup(1, Config),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dump_log_time_threshold(doc)->
+ ["See doc on above."];
+dump_log_time_threshold(suite)->[];
+dump_log_time_threshold(Config) when is_list(Config) ->
+ Nodes = ?acquire_schema(1, Config),
+ Time = 4000,
+
+ %% Check bad threshold value
+ ?match({error,{bad_type,dump_log_time_threshold,0}},
+ mnesia:start([{dump_log_time_threshold,0}])),
+
+
+ ?match(ok,mnesia:start([{dump_log_write_threshold,100},
+ {dump_log_time_threshold, Time}])),
+
+ ?match({atomic,ok},mnesia:create_table(test_table,
+ [{disc_copies, Nodes},
+ {attributes,
+ record_info(fields,
+ test_table)}])),
+
+ %% Check that nothing is dumped when within time threshold
+ ?match(ok, do_trans(1)),
+ check_logs(0),
+
+ ?match(Time, mnesia:system_info(dump_log_time_threshold)),
+
+ %% Check that things get dumped when time threshold exceeded
+ ?match(ok, do_trans(5)),
+ receive after Time+2000 -> ok end,
+ check_logs(6),
+
+ ?verify_mnesia([node()], []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%
+%%
+%% Help functions for dump log
+
+%% Do a transaction N times
+do_trans(0) -> ok;
+do_trans(N) ->
+ Fun = fun() ->
+ XX=incr(),
+ mnesia:write(#test_table{i=XX})
+ end,
+ {atomic, ok} = mnesia:transaction(Fun),
+ do_trans(N-1).
+
+%% An increasing number
+incr() ->
+ case get(bloody_counter) of
+ undefined -> put(bloody_counter, 2), 1;
+ Num -> put(bloody_counter, Num+1)
+ end.
+
+%%
+%% Check that the correct number of transactions have been recorded.
+%%-record(test_table,{i,a1,a2,a3}).
+check_logs(N) ->
+ File = mnesia_lib:tab2dcl(test_table),
+ Args = [{file, File}, {name, testing}, {repair, true}, {mode, read_only}],
+
+ if N == 0 ->
+ ?match(false, mnesia_lib:exists(File));
+ true ->
+ ?match(true, mnesia_lib:exists(File)),
+ ?match({ok, _Log}, disk_log:open(Args)),
+
+ {Cont, Terms} = disk_log:chunk(testing, start),
+ ?match(eof, disk_log:chunk(testing, Cont)),
+ %%?verbose("N: ~p, L: ~p~n", [N, L]),
+ disk_log:close(testing),
+
+ %% Correct number of records in file
+ ?match({N, N}, {N, length(Terms) -1 }) %% Ignore Header
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+dump_log_load_regulation(doc) ->
+ ["Test the load regulation of the dumper"];
+dump_log_load_regulation(suite) ->
+ [];
+dump_log_load_regulation(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ Param = dump_log_load_regulation,
+
+ %% Normal
+ NoReg = false,
+ ?match(NoReg, mnesia:system_info(Param)),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+
+ %% Bad
+ Bad = arne_anka,
+ ?match({error, {bad_type, Param, Bad}},
+ mnesia:start([{Param, Bad}])),
+
+ %% Regulation activated
+ Reg = true,
+ ?match(ok,mnesia:start([{Param, Reg}])),
+ ?match(Reg, mnesia:system_info(Param)),
+
+ Args =
+ [{db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 5},
+ {n_branches, length(Nodes) * 10},
+ {n_accounts_per_branch, 5},
+ {replica_type, disc_copies},
+ {stop_after, timer:seconds(30)},
+ {report_interval, timer:seconds(10)},
+ {use_running_mnesia, true},
+ {reuse_history_id, true}],
+
+ ?match({ok, _}, mnesia_tpcb:start(Args)),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+embedded_mnemosyne(doc) ->
+ ["Start Mnemosyne as an embedded part of Mnesia",
+ "on some of the nodes"];
+embedded_mnemosyne(suite) ->
+ [];
+embedded_mnemosyne(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ Param = embedded_mnemosyne,
+
+ %% Normal
+ NoMnem = false,
+ ?match(NoMnem, mnesia:system_info(Param)),
+ ?match(undefined, whereis(mnemosyne_catalog)),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+
+ %% Bad
+ Bad = arne_anka,
+ ?match({error, {bad_type, Param, Bad}},
+ mnesia:start([{Param, Bad}])),
+
+ case code:priv_dir(mnemosyne) of
+ {error, _} -> %% No mnemosyne on later systems
+ ok;
+ _ ->
+ %% Mnemosyne as embedded application
+ Mnem = true,
+ ?match(undefined, whereis(mnemosyne_catalog)),
+ ?match(ok,mnesia:start([{Param, Mnem}])),
+ ?match(Mnem, mnesia:system_info(Param)),
+ ?match(Pid when is_pid(Pid), whereis(mnemosyne_catalog)),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ ?match(undefined, whereis(mnemosyne_catalog))
+ end,
+ ?verify_mnesia([], Nodes),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ignore_fallback_at_startup(doc) ->
+ ["Start Mnesia without rollback of the database to the fallback. ",
+ "Once Mnesia has been (re)started the installed fallback should",
+ "be handled as a normal active fallback.",
+ "Install a customized event module which disables the termination",
+ "of Mnesia when mnesia_down occurrs with an active fallback."].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+max_wait_for_decision(doc) ->
+ ["Provoke Mnesia to make a forced decision of the outome",
+ "of a heavy weight transaction."].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+send_compressed(doc) -> [];
+send_compressed(suite) -> [];
+send_compressed(Config) ->
+ [N1,N2] = Nodes = ?acquire_nodes(2, Config),
+ ?match({atomic,ok}, mnesia:create_table(t0, [{ram_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t1, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t2, [{disc_only_copies,[N1,N2]}])),
+
+ Max = 1000,
+ Create = fun(Tab) -> [mnesia:write({Tab, N, {N, "FILLER-123490878345asdasd"}})
+ || N <- lists:seq(1, Max)],
+ ok
+ end,
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ ?match(ok, mnesia:start([{send_compressed, 9}])),
+ ?match(ok, mnesia:wait_for_tables([t0,t1,t2], 5000)),
+
+ ?match({atomic, ok}, mnesia:transaction(Create, [t0])),
+ ?match({atomic, ok}, mnesia:transaction(Create, [t1])),
+ ?match({atomic, ok}, mnesia:transaction(Create, [t2])),
+
+ ?match([], mnesia_test_lib:start_mnesia([N2], [t0,t1,t2])),
+
+ Verify = fun(Tab) ->
+ [ [{Tab,N,{N,_}}] = mnesia:read(Tab, N) || N <- lists:seq(1, Max)],
+ ok
+ end,
+ ?match({atomic, ok}, rpc:call(N1, mnesia, transaction, [Verify, [t0]])),
+ ?match({atomic, ok}, rpc:call(N1, mnesia, transaction, [Verify, [t1]])),
+ ?match({atomic, ok}, rpc:call(N1, mnesia, transaction, [Verify, [t2]])),
+
+ ?match({atomic, ok}, rpc:call(N2, mnesia, transaction, [Verify, [t0]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, transaction, [Verify, [t1]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, transaction, [Verify, [t2]])),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+app_test(doc) -> [];
+app_test(suite) -> [];
+app_test(_Config) ->
+ ?match(ok,test_server:app_test(mnesia)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+event_module(doc) ->
+ ["Replace the event module with another module and use it as",
+ "receiver of the various system and table events. Provoke",
+ "coverage of all kinds of events."];
+event_module(suite) -> [];
+event_module(Config) when is_list(Config) ->
+ Filter = fun({mnesia_system_event,{mnesia_info, _, _}}) -> false;
+ (_) -> true
+ end,
+
+ [_N1, N2]=Nodes=?acquire_schema(2, Config),
+
+ Def = case mnesia_test_lib:diskless(Config) of
+ true -> [{event_module, mnesia_config_event},
+ {extra_db_nodes, Nodes}];
+ false ->
+ [{event_module, mnesia_config_event}]
+ end,
+
+ ?match({[ok, ok], []}, rpc:multicall(Nodes, mnesia, start, [Def])),
+ receive after 1000 -> ok end,
+ mnesia_event ! {get_log, self()},
+ DebugLog1 = receive
+ {log, L1} -> L1
+ after 10000 -> [timeout]
+ end,
+ ?match([{mnesia_system_event,{mnesia_up,N2}}],
+ lists:filter(Filter, DebugLog1)),
+ mnesia_test_lib:kill_mnesia([N2]),
+ receive after 2000 -> ok end,
+
+ ?match({[ok], []}, rpc:multicall([N2], mnesia, start, [])),
+
+ receive after 1000 -> ok end,
+ mnesia_event ! {get_log, self()},
+ DebugLog = receive
+ {log, L} -> L
+ after 10000 -> [timeout]
+ end,
+ ?match([{mnesia_system_event,{mnesia_up,N2}},
+ {mnesia_system_event,{mnesia_down,N2}},
+ {mnesia_system_event,{mnesia_up, N2}}],
+ lists:filter(Filter, DebugLog)),
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+schema_config(doc) ->
+ ["Try many configurations with various schema_location's with and",
+ "without explicit extra_db_nodes. Do also provoke various schema merge",
+ "situations. Most of the other test suites focusses on tests where the",
+ "schema is residing on disc. Now it is time to perform an exhaustive",
+ "elaboration with various disc less configurations."];
+schema_config(suite) ->
+ [
+ start_one_disc_full_then_one_disc_less,
+ start_first_one_disc_less_then_one_disc_full,
+ start_first_one_disc_less_then_two_more_disc_less,
+ schema_location_and_extra_db_nodes_combinations,
+ table_load_to_disc_less_nodes,
+ schema_merge,
+ dynamic_connect
+ ].
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+start_one_disc_full_then_one_disc_less(doc)->
+ ["Start a disk node and then a disk less one. Distribute some",
+ "tables between them."];
+start_one_disc_full_then_one_disc_less(suite) -> [];
+start_one_disc_full_then_one_disc_less(Config) when is_list(Config) ->
+ [N1, N2] = ?init(2, Config),
+ ?match(ok, mnesia:create_schema([N1])),
+ ?match([], mnesia_test_lib:start_mnesia([N1])),
+
+ ?match({atomic, ok}, mnesia:add_table_copy(schema, N2, ram_copies)),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, [N1]}]])),
+ mnesia_test_lib:sync_tables([N1, N2], [schema]),
+
+ %% Now create some tables
+ ?match({atomic,ok},
+ mnesia:create_table(test_table,
+ [{ram_copies, [N1, N2]},
+ {attributes,
+ record_info(fields,test_table)}])),
+
+ ?match({atomic,ok},
+ rpc:call(
+ N2, mnesia,create_table, [test_table2,
+ [{ram_copies, [N1, N2]},
+ {attributes,
+ record_info(fields,test_table2)}]])),
+
+ %% Write something on one end ...
+ Rec = #test_table{i=55},
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec) end)),
+
+ %% ... and read it in the other
+ ?match({atomic, [Rec]},
+ rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 55}) end])),
+
+
+ %% Then do the same but start at the other end
+ Rec2 = #test_table2{i=155},
+ ?match({atomic, ok},
+ rpc:call(N2, mnesia, transaction,
+ [fun() ->
+ mnesia:write(Rec2) end
+ ])),
+
+ ?match({atomic, [Rec2]},
+ mnesia:transaction(fun() -> mnesia:read({test_table2, 155}) end)),
+
+ ?verify_mnesia([N1, N2], []),
+ ?cleanup(2, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+start_first_one_disc_less_then_one_disc_full(doc)->
+ ["no_doc"];
+start_first_one_disc_less_then_one_disc_full(suite) -> [];
+start_first_one_disc_less_then_one_disc_full(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?init(2, Config),
+ ?match(ok, mnesia:create_schema([N1])),
+ ?match([], mnesia_test_lib:start_mnesia([N1])),
+
+ ?match({atomic, ok}, mnesia:add_table_copy(schema, N2, ram_copies)),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, Nodes}]])),
+
+ mnesia_test_lib:sync_tables([N1, N2], [schema]),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ receive after 2000 -> ok end,
+ ?match([], mnesia_test_lib:start_mnesia(Nodes)),
+
+ mnesia_test_lib:sync_tables([N1, N2], [schema]),
+
+ %% Now create some tables
+ ?match({atomic,ok},
+ rpc:call(
+ N1, mnesia,create_table, [test_table,
+ [%%{disc_copies, [node()]},
+ {ram_copies, [N1, N2]},
+ {attributes,
+ record_info(fields,test_table)}]])),
+ mnesia_test_lib:sync_tables([N1, N2], [test_table]),
+
+ ?match({atomic,ok},
+ rpc:call(
+ N2, mnesia,create_table, [test_table2,
+ [%%{disc_copies, [node()]},
+ {ram_copies, [N1, N2]},
+ {attributes,
+ record_info(fields,test_table2)}]])),
+
+ mnesia_test_lib:sync_tables([N1, N2], [test_table, test_table2]),
+
+ %% Assure tables loaded
+ ?match({[ok, ok], []},
+ rpc:multicall([N1, N2], mnesia, wait_for_tables,
+ [[schema, test_table, test_table2], 10000])),
+
+ %% Write something on one end ...
+ Rec = #test_table{i=55},
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write(Rec) end])),
+
+ %% ... and read it in the other
+ ?match({atomic, [Rec]},
+ rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 55}) end])),
+
+ %% Then do the same but start at the other end
+ Rec2 = #test_table2{i=155},
+ ?match({atomic, ok},
+ rpc:call(N2, mnesia, transaction,
+ [fun() ->
+ mnesia:write(Rec2) end
+ ])),
+
+ ?match({atomic, [Rec2]},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:read({test_table2, 155}) end])),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+start_first_one_disc_less_then_two_more_disc_less(doc)->
+ ["no doc"];
+start_first_one_disc_less_then_two_more_disc_less(suite) -> [];
+start_first_one_disc_less_then_two_more_disc_less(Config) when is_list(Config) ->
+ Nodes = [N1, N2, N3] = ?init(3, Config),
+
+ ?match(ok, rpc:call(N1, mnesia, start, [[{schema_location, ram}]])),
+
+ %% Really should use test_lib:mnesia_start for these ones but ...
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia,add_table_copy, [schema, N2, ram_copies])),
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia,add_table_copy, [schema, N3, ram_copies])),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, [N1]}]])),
+ ?match(ok, rpc:call(N3, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, [N1, N2]}]])),
+
+ %% Now create some tables
+ ?match({atomic,ok},
+ rpc:call(
+ N1, mnesia,create_table, [test_table,
+ [%%{disc_copies, [node()]},
+ {ram_copies, [N1, N2, N3]},
+ {attributes,
+ record_info(fields,test_table)}]])),
+
+ %% Assure tables loaded
+ ?match({[ok, ok, ok], []},
+ rpc:multicall([N1, N2, N3], mnesia, wait_for_tables,
+ [[test_table], 1000])),
+
+ %% Write something on one end ...
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write(#test_table{i=44}) end])),
+
+ %% Force synchronicity
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write_lock_table(test_table) end])),
+
+ %% ... and read it in the others
+ ?match({[{atomic, [{test_table, 44, _, _, _}]},
+ {atomic, [{test_table, 44, _, _, _}]}], []},
+ rpc:multicall([N2, N3], mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 44}) end])),
+
+ %% Then do the other way around
+ ?match({atomic, ok},
+ rpc:call(N3, mnesia, transaction,
+ [fun() -> mnesia:write(#test_table{i=33}) end])),
+ %% Force synchronicity
+ ?match({atomic, ok},
+ rpc:call(N3, mnesia, transaction,
+ [fun() -> mnesia:write_lock_table(test_table) end])),
+
+ ?match({[{atomic, [{test_table, 44, _, _, _}]},
+ {atomic, [{test_table, 44, _, _, _}]}], []},
+ rpc:multicall([N1, N2], mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 44}) end])),
+
+ mnesia_test_lib:reload_appls([mnesia], Nodes),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+schema_location_and_extra_db_nodes_combinations(doc)->
+ ["Test schema loaction and extra_db_nodes combinations."];
+schema_location_and_extra_db_nodes_combinations(suite) -> [];
+schema_location_and_extra_db_nodes_combinations(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?init(2, Config),
+ ?match(ok, mnesia:create_schema([N1])),
+ ?match([], mnesia_test_lib:start_mnesia([N1])),
+
+ %% Really should use test_lib:mnesia_start for these ones but ...
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia,add_table_copy, [schema, N2, ram_copies])),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, [N1]}]])),
+
+ %% Assure tables loaded
+ ?match({[ok, ok], []},
+ rpc:multicall([N1, N2], mnesia, wait_for_tables,
+ [[schema], 10000])),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(2, Config),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+table_load_to_disc_less_nodes(doc)->
+ ["Load tables to disc less nodes"];
+table_load_to_disc_less_nodes(suite) -> [];
+table_load_to_disc_less_nodes(Config) when is_list(Config) ->
+ [N1, N2] = ?init(2, Config),
+
+ ?match(ok, rpc:call(N1, mnesia, start, [[{schema_location, ram}]])),
+
+ %% Really should use test_lib:mnesia_start for these ones but ...
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia,add_table_copy, [schema, N2, ram_copies])),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{schema_location, ram},
+ {extra_db_nodes, [N1]}]])),
+
+ %% Now create some tables
+ ?match({atomic,ok},
+ rpc:call(
+ N1, mnesia,create_table, [test_table,
+ [%%{disc_copies, [node()]},
+ {ram_copies, [N1, N2]},
+ {attributes,
+ record_info(fields,test_table)}]])),
+
+ %% Assure tables loaded
+ ?match({[ok, ok], []},
+ rpc:multicall([N1, N2], mnesia, wait_for_tables,
+ [[test_table], 1000])),
+
+ %% Write something on one end ...
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write(#test_table{i=44}) end])),
+
+ %% Force synchronicity
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write_lock_table(test_table) end])),
+
+ %% ... and read it in the others
+ ?match({atomic, [{test_table, 44, _, _, _}]},
+ rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 44}) end])),
+
+ ?cleanup(2, Config),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+schema_merge(doc) ->
+ ["Provoke various schema merge situations.",
+ "Perform various schema updates while some nodes are down,",
+ "stop the started nodes, start the stopped nodes and perform",
+ "schema updates. Now we have a situation were some of the table",
+ "definitions have been changed on two or more nodes independently",
+ "of each other and when Mnesia on the nodes tries to connect",
+ "to each other at restart the schema will be merged.",
+ "Do also try to provoke schema merge situations were the",
+ "schema cannot be merged."];
+
+schema_merge(suite) -> [];
+
+schema_merge(Config) when is_list(Config) ->
+ [N1, N2]=Nodes=?acquire(2,Config),
+
+ mnesia_test_lib:kill_mnesia([N2]),
+ receive after 1000 -> ok end,
+
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ ?match({atomic,ok},
+ rpc:call(
+ N1, mnesia,create_table,
+ [test_table,
+ [{Storage, [N1]},
+ {attributes,
+ record_info(fields,test_table)}]])),
+
+ ?match({atomic, ok},
+ rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:write(#test_table{i=44}) end])),
+
+ mnesia_test_lib:kill_mnesia([N1]),
+ receive after 2000 -> ok end,
+ %% Can't use std start because it waits for schema
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+
+ ?match({atomic,ok},
+ rpc:call(
+ N2, mnesia,create_table,
+ [test_table2,
+ [{Storage, [N2]},
+ {attributes,
+ record_info(fields,test_table2)}]])),
+
+ receive after 5000 -> ok end,
+
+ ?match({atomic, ok},
+ rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:write(#test_table2{i=33}) end])),
+
+ %% Can't use std start because it waits for schema
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+
+ %% Assure tables loaded
+ ?match({[ok, ok], []},
+ rpc:multicall([N1, N2], mnesia, wait_for_tables,
+ [[schema, test_table, test_table2], 10000])),
+
+ %% ... and read it in the others
+ ?match({[{atomic, [{test_table, 44, _, _, _}]},
+ {atomic, [{test_table, 44, _, _, _}]}], []},
+ rpc:multicall([N1, N2], mnesia, transaction,
+ [fun() -> mnesia:read({test_table, 44}) end])),
+
+ ?match({[{atomic, [{test_table2, 33, _}]},
+ {atomic, [{test_table2, 33, _}]}], []},
+ rpc:multicall([N1, N2], mnesia, transaction,
+ [fun() -> mnesia:read({test_table2, 33}) end])),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(2, Config),
+ ok.
+
+
+-define(connect(Nodes), mnesia:change_config(extra_db_nodes, Nodes)).
+-define(rpc_connect(From, Nodes),
+ rpc:call(From, mnesia, change_config, [extra_db_nodes, Nodes])).
+
+
+sort({ok, NS}) ->
+ {ok, lists:sort(NS)};
+sort(Ns) when is_tuple(Ns) ->
+ Ns;
+sort(NS) when is_list(NS) ->
+ lists:sort(NS).
+
+
+dynamic_connect(doc) ->
+ ["Test the new functionality where we start mnesia first and then "
+ "connect to the other mnesia nodes"];
+dynamic_connect(suite) ->
+ [
+ dynamic_basic,
+ dynamic_ext,
+ dynamic_bad
+ ].
+
+
+dynamic_basic(suite) -> [];
+dynamic_basic(Config) when is_list(Config) ->
+ Nodes = [N1, N2, N3] = ?acquire_nodes(3, Config),
+ SNs = lists:sort(Nodes),
+
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, Nodes--[N1]}, {disc_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{disc_copies, Nodes}])),
+
+ ?match({ok, SNs}, sort(?rpc_connect(N1, Nodes))), %% What shall happen?
+ ?match({ok, []}, sort(?rpc_connect(N1, [nonode@nothosted]))), %% What shall happen?
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match(ok, mnesia:delete_schema([N2])),
+
+ ?match(ok, mnesia:dirty_write({tab1, 1, 1})),
+ ?match(ok, mnesia:dirty_write({tab2, 1, 1})),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{extra_db_nodes, [N1]}]])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab1,tab2],5000])),
+ io:format("Here ~p ~n",[?LINE]),
+ check_storage(N2, N1, [N3]),
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+ ?match(ok, mnesia:delete_schema([N3])),
+
+ io:format("T1 ~p ~n",[rpc:call(N3,?MODULE,c_nodes,[])]),
+ ?match(ok, rpc:call(N3, mnesia, start, [])),
+ io:format("T2 ~p ~n",[rpc:call(N3,?MODULE,c_nodes,[])]),
+ timer:sleep(2000),
+ io:format("T3 ~p ~n",[rpc:call(N3,?MODULE,c_nodes,[])]),
+ ?match({ok, [N1]}, sort(?rpc_connect(N3, [N1]))),
+ io:format("T4 ~p ~n",[rpc:call(N3,?MODULE,c_nodes,[])]),
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [[tab1,tab2],5000])),
+ io:format("Here ~p ~n",[?LINE]),
+ check_storage(N3, N1, [N2]),
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+ ?match(ok, mnesia:delete_schema([N3])),
+
+ ?match(ok, rpc:call(N3, mnesia, start, [])),
+ ?match({ok, [N3]}, sort(?rpc_connect(N1, [N3]))),
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [[tab1,tab2],5000])),
+ io:format("Here ~p ~n",[?LINE]),
+ check_storage(N3, N1, [N2]),
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match(ok, mnesia:delete_schema([N2])),
+ ?match({atomic, ok}, mnesia:del_table_copy(schema, N2)),
+
+ % Ok, we have now removed references to node N2 from the other nodes
+ % mnesia should come up now.
+ ?match({atomic, ok}, mnesia:add_table_copy(tab1, N2, ram_copies)),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match({ok, _}, sort(?rpc_connect(N2, [N3]))),
+
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N3, mnesia, system_info, [running_db_nodes]))),
+
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab1], 1000])),
+ ?match([{tab1, 1, 1}], rpc:call(N2, mnesia, dirty_read, [tab1, 1])),
+
+ mnesia_test_lib:kill_mnesia([N2]),
+
+ %%% SYNC!!!
+ timer:sleep(1000),
+
+ ?match([N3,N1], sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match([N3,N1], sort(rpc:call(N3, mnesia, system_info, [running_db_nodes]))),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match({ok, _}, sort(?rpc_connect(N3, [N2]))),
+
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N3, mnesia, system_info, [running_db_nodes]))),
+
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab1], 1000])),
+ ?match([{tab1, 1, 1}], rpc:call(N2, mnesia, dirty_read, [tab1, 1])),
+
+ ?verify_mnesia(Nodes, []),
+%% ?cleanup(3, Config).
+ ok.
+
+c_nodes() ->
+ {mnesia_lib:val({current, db_nodes}),mnesia_lib:val(recover_nodes)}.
+
+
+dynamic_ext(suite) -> [];
+dynamic_ext(Config) when is_list(Config) ->
+ Ns = [N1,N2] = ?acquire_nodes(2, Config),
+ SNs = lists:sort([N1,N2]),
+
+ ?match({atomic, ok}, mnesia:create_table(tab0, [{disc_copies, [N1,N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{disc_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab3, [{disc_only_copies, [N2]}])),
+
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match(ok, mnesia:delete_schema([N2])),
+ ?match(ok, rpc:call(N2, mnesia, start, [[{extra_db_nodes, [N1]}]])),
+
+ ?match(SNs, sort(rpc:call(N1, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab0,tab1,tab2,tab3], 2000])),
+
+ Check = fun({Tab,Storage}) ->
+ ?match(Storage, rpc:call(N2, mnesia, table_info, [Tab, storage_type])),
+ ?match([{N2,Storage}],
+ lists:sort(rpc:call(N2, mnesia, table_info, [Tab, where_to_commit])))
+ end,
+ [Check(Test) || Test <- [{tab1, ram_copies},{tab2, disc_copies},{tab3, disc_only_copies}]],
+
+ T = now(),
+ ?match(ok, mnesia:dirty_write({tab0, 42, T})),
+ ?match(ok, mnesia:dirty_write({tab1, 42, T})),
+ ?match(ok, mnesia:dirty_write({tab2, 42, T})),
+ ?match(ok, mnesia:dirty_write({tab3, 42, T})),
+
+ ?match(stopped, rpc:call(N2, mnesia, stop, [])),
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+ ?match(ok, mnesia:wait_for_tables([tab0,tab1,tab2,tab3], 10000)),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab1,tab2,tab3], 100])),
+ ?match([], mnesia:dirty_read({tab1, 41})),
+ ?match([{tab2,42,T}], mnesia:dirty_read({tab2, 42})),
+ ?match([{tab3,42,T}], mnesia:dirty_read({tab3, 42})),
+
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match(ok, mnesia:delete_schema([N2])),
+
+ ?match(stopped, rpc:call(N1, mnesia, stop, [])),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{extra_db_nodes,[N1,N2]}]])),
+ ?match({timeout,[tab0]}, rpc:call(N2, mnesia, wait_for_tables, [[tab0], 500])),
+
+ ?match(ok, rpc:call(N1, mnesia, start, [[{extra_db_nodes, [N1,N2]}]])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[tab0], 1500])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab0], 1500])),
+ ?match([{tab0,42,T}], mnesia:dirty_read({tab0, 42})),
+ ?match([{tab0,42,T}], rpc:call(N2, mnesia,dirty_read,[{tab0,42}])),
+
+ ?match(stopped, rpc:call(N1, mnesia, stop, [])),
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match(ok, mnesia:delete_schema([N2])),
+ ?match(ok, rpc:call(N1, mnesia, start, [[{extra_db_nodes, [N1,N2]}]])),
+ ?match({timeout,[tab0]}, rpc:call(N1, mnesia, wait_for_tables, [[tab0], 500])),
+
+ ?match(ok, rpc:call(N2, mnesia, start, [[{extra_db_nodes,[N1,N2]}]])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[tab0], 1500])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab0], 1500])),
+ ?match([{tab0,42,T}], mnesia:dirty_read({tab0, 42})),
+ ?match([{tab0,42,T}], rpc:call(N2,mnesia,dirty_read,[{tab0,42}])),
+
+ ?verify_mnesia(Ns, []),
+ ok.
+
+check_storage(Me, Orig, Other) ->
+ io:format("Nodes ~p ~p ~p~n",[Me,Orig,Other]),
+ rpc:multicall(Other, sys, status, [mnesia_locker]),
+ rpc:call(Me, sys, status, [mnesia_locker]),
+ rpc:call(Orig, sys, status, [mnesia_locker]),
+ rpc:multicall(Other, sys, status, [mnesia_controller]),
+ rpc:call(Me, sys, status, [mnesia_controller]),
+ rpc:call(Orig, sys, status, [mnesia_controller]),
+ %% Verify disc_copies
+ W2C = lists:sort([{Node,disc_copies} || Node <- [Me,Orig|Other]]),
+ W2W = lists:sort([Me,Orig|Other]),
+ ?match(disc_copies, rpc:call(Orig, mnesia, table_info, [schema, storage_type])),
+ ?match(disc_copies, rpc:call(Me, mnesia, table_info, [schema, storage_type])),
+ ?match(W2C, lists:sort(rpc:call(Orig, mnesia, table_info, [schema, where_to_commit]))),
+ ?match(W2C, lists:sort(rpc:call(Me, mnesia, table_info, [schema, where_to_commit]))),
+
+ ?match(disc_copies, rpc:call(Orig, mnesia, table_info, [tab2, storage_type])),
+ ?match(disc_copies, rpc:call(Me, mnesia, table_info, [tab2, storage_type])),
+ ?match(W2W, lists:sort(rpc:call(Me, mnesia, table_info, [tab2, where_to_write]))),
+ ?match(Me, rpc:call(Me, mnesia, table_info, [tab2, where_to_read])),
+
+ ?match(W2C, lists:sort(rpc:call(Orig, mnesia, table_info, [tab2, where_to_commit]))),
+ ?match(W2C, lists:sort(rpc:call(Me, mnesia, table_info, [tab2, where_to_commit]))),
+
+ ?match([{tab1,1,1}], mnesia:dirty_read(tab1,1)),
+ ?match([{tab2,1,1}], mnesia:dirty_read(tab2,1)),
+ ?match([{tab1,1,1}], rpc:call(Me, mnesia, dirty_read, [tab1,1])),
+ ?match([{tab2,1,1}], rpc:call(Me, mnesia, dirty_read, [tab2,1])),
+
+ ?match(true, rpc:call(Me, mnesia_monitor, use_dir, [])),
+ ?match(disc_copies, rpc:call(Me, mnesia_lib, val, [{schema, storage_type}])),
+
+ mnesia_test_lib:kill_mnesia([Orig]),
+ mnesia_test_lib:kill_mnesia(Other),
+ T = now(),
+ ?match(ok, rpc:call(Me, mnesia, dirty_write, [{tab2, 42, T}])),
+ ?match(stopped, rpc:call(Me, mnesia, stop, [])),
+ ?match(ok, rpc:call(Me, mnesia, start, [])),
+ ?match([], mnesia_test_lib:start_mnesia([Orig|Other], [tab1,tab2])),
+ ?match([{tab2,42,T}], rpc:call(Me, mnesia, dirty_read, [{tab2, 42}])),
+ ?match([{tab2,42,T}], rpc:call(Orig, mnesia, dirty_read, [{tab2, 42}])),
+
+ ?match([{tab1,1,1}], mnesia:dirty_read(tab1,1)),
+ ?match([{tab2,1,1}], mnesia:dirty_read(tab2,1)),
+ ?match([{tab1,1,1}], rpc:call(Me, mnesia, dirty_read, [tab1,1])),
+ ?match([{tab2,1,1}], rpc:call(Me, mnesia, dirty_read, [tab2,1])),
+ ok.
+
+
+dynamic_bad(suite) -> [];
+dynamic_bad(Config) when is_list(Config) ->
+ Ns = [N1, N2, N3] = ?acquire_nodes(3, Config),
+ SNs = lists:sort([N2,N3]),
+
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, N2, ram_copies)),
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, N3, ram_copies)),
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, Ns -- [N1]},
+ {disc_copies, [N1]}])),
+ ?match(ok, mnesia:dirty_write({tab1, 1, 1})),
+
+ mnesia_test_lib:kill_mnesia(Ns),
+ ?match({[ok, ok], []}, rpc:multicall(Ns -- [N1], mnesia, start, [])),
+ ?match({ok, [N2]}, ?rpc_connect(N3, [N2])),
+ ?match(SNs, sort(rpc:call(N2, mnesia, system_info, [running_db_nodes]))),
+ ?match(SNs, sort(rpc:call(N3, mnesia, system_info, [running_db_nodes]))),
+ ?match({badrpc, {'EXIT', {aborted, {no_exists, _, _}}}},
+ rpc:call(N2, mnesia, table_info, [tab1, where_to_read])),
+
+ ?match(ok, mnesia:start()),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[tab1], 1000])),
+ ?match(N2, rpc:call(N2, mnesia, table_info, [tab1, where_to_read])),
+ ?match([{tab1, 1, 1}], rpc:call(N2, mnesia, dirty_read, [tab1, 1])),
+
+ mnesia_test_lib:kill_mnesia(Ns),
+ ?match({[ok, ok], []}, rpc:multicall(Ns -- [N1], mnesia, start, [])),
+ ?match({ok, [N2]}, ?rpc_connect(N3, [N2])),
+ % Make a merge conflict
+ ?match({atomic, ok}, rpc:call(N3, mnesia, create_table, [tab1, []])),
+
+ io:format("We expect a mnesia crash here~n", []),
+ ?match({error,{_, _}}, mnesia:start()),
+
+ ?verify_mnesia(Ns -- [N1], []),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+unknown_config(doc) ->
+ ["Try some unknown configuration parameters and see that expected",
+ "things happens."];
+unknown_config(suite)-> [];
+unknown_config(Config) when is_list(Config) ->
+ ?init(1, Config),
+ %% NOTE: case 1 & 2 below do not respond the same
+ ?match({error, Res} when element(1, Res) == bad_type,
+ mnesia:start([{undefined_config,[]}])),
+ %% Below does not work, but the "correct" behaviour would be to have
+ %% case 1 above to behave as the one below.
+
+ %% in mnesia-1.3 {error,{bad_type,{[],undefined_config}}}
+ ?match({error, Res} when element(1, Res) == bad_type,
+ mnesia:start([{[],undefined_config}])),
+ ?cleanup(1, Config),
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+inconsistent_database(doc) ->
+ ["Replace the event module with another module and use it as",
+ "receiver of the various system and table events. Provoke",
+ "coverage of all kinds of events."];
+inconsistent_database(suite) -> [];
+inconsistent_database(Config) when is_list(Config) ->
+ Nodes = mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]}],
+ 2, Config, ?FILE, ?LINE),
+ KillAfter = length(Nodes) * timer:minutes(5),
+ ?acquire_schema(2, Config ++ [{tc_timeout, KillAfter}]),
+
+ Ok = [ok || _N <- Nodes],
+ StartArgs = [{event_module, mnesia_inconsistent_database_test}],
+ ?match({Ok, []}, rpc:multicall(Nodes, mnesia, start, [StartArgs])),
+ ?match([], mnesia_test_lib:kill_mnesia(Nodes)),
+
+ ?match(ok, mnesia_meter:go(ram_copies, Nodes)),
+
+ mnesia_test_lib:reload_appls([mnesia], Nodes),
+ ok.
+
diff --git a/lib/mnesia/test/mnesia_consistency_test.erl b/lib/mnesia/test/mnesia_consistency_test.erl
new file mode 100644
index 0000000000..ffe8ab7ac3
--- /dev/null
+++ b/lib/mnesia/test/mnesia_consistency_test.erl
@@ -0,0 +1,1612 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_consistency_test).
+-author('[email protected]').
+-compile([export_all]).
+
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Verify transaction consistency",
+ "Consistency is the property of the application that requires any",
+ "execution of the transaction to take the database from one",
+ "consistent state to another. Verify that the database is",
+ "consistent at any point in time.",
+ "Verify for various configurations.",
+ " Verify for both set and bag"];
+all(suite) ->
+ [
+ consistency_after_restart,
+ consistency_after_dump_tables,
+ consistency_after_add_replica,
+ consistency_after_del_replica,
+ consistency_after_move_replica,
+ consistency_after_transform_table,
+ consistency_after_change_table_copy_type,
+ consistency_after_fallback,
+ consistency_after_restore,
+ consistency_after_rename_of_node,
+ checkpoint_retainer_consistency,
+ backup_consistency
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% stolen from mnesia_tpcb.erl:
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Account record, total size must be at least 100 bytes
+
+-define(ACCOUNT_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234}).
+
+-record(account,
+ {
+ id = 0, %% Unique account id
+ branch_id = 0, %% Branch where the account is held
+ balance = 0, %% Account balance
+ filler = ?ACCOUNT_FILLER %% Gap filler to ensure size >= 100 bytes
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Branch record, total size must be at least 100 bytes
+
+-define(BRANCH_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890}).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Teller record, total size must be at least 100 bytes
+
+-define(TELLER_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 1234567890123456789012345678901234567890123456789012345678}).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% History record, total size must be at least 50 bytes
+
+-define(HISTORY_FILLER, 1234567890).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-record(tab_config,
+ {
+ db_nodes = [node()],
+ replica_nodes = [node()],
+ replica_type = ram_copies,
+ use_running_mnesia = false,
+ n_branches = 1,
+ n_tellers_per_branch = 10, %% Must be 10
+ n_accounts_per_branch = 100000, %% Must be 100000
+ branch_filler = ?BRANCH_FILLER,
+ account_filler = ?ACCOUNT_FILLER,
+ teller_filler = ?TELLER_FILLER
+ }).
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% stolen from mnesia_tpcb.erl:
+
+list2rec(List, Fields, DefaultTuple) ->
+ [Name|Defaults] = tuple_to_list(DefaultTuple),
+ List2 = list2rec(List, Fields, Defaults, []),
+ list_to_tuple([Name] ++ List2).
+
+list2rec(_List, [], [], Acc) ->
+ Acc;
+list2rec(List, [F|Fields], [D|Defaults], Acc) ->
+ {Val, List2} =
+ case lists:keysearch(F, 1, List) of
+ false ->
+ {D, List};
+ {value, {F, NewVal}} ->
+ {NewVal, lists:keydelete(F, 1, List)}
+ end,
+ list2rec(List2, Fields, Defaults, Acc ++ [Val]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+tpcb_config(ReplicaType, _NodeConfig, Nodes, NoDriverNodes) ->
+ [{n_branches, 10},
+ {n_drivers_per_node, 10},
+ {replica_nodes, Nodes},
+ {driver_nodes, Nodes -- NoDriverNodes},
+ {use_running_mnesia, true},
+ {report_interval, infinity},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {reuse_history_id, true}].
+
+%% Stolen from mnesia_tpcb:dist
+tpcb_config_dist(ReplicaType, _NodeConfig, Nodes, _Config) ->
+ [{db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 10},
+ {n_branches, 1},
+ {use_running_mnesia, true},
+ {n_accounts_per_branch, 10},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(15)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% stolen from mnesia_recovery_test.erl:
+
+receive_messages([]) -> [];
+receive_messages(ListOfMsgs) ->
+ receive
+ {Pid, Msg} ->
+ case lists:member(Msg, ListOfMsgs) of
+ false ->
+ ?warning("I (~p) have received unexpected msg~n ~p ~n",
+ [self(),{Pid, Msg}]),
+ receive_messages(ListOfMsgs);
+ true ->
+ ?verbose("I (~p) got msg ~p from ~p ~n", [self(),Msg, Pid]),
+ [{Pid, Msg} | receive_messages(ListOfMsgs -- [Msg])]
+ end;
+ Else -> ?warning("Recevied unexpected Msg~n ~p ~n", [Else])
+ after timer:minutes(3) ->
+ ?error("Timeout in receive msgs while waiting for ~p~n",
+ [ListOfMsgs])
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_restart(suite) ->
+ [
+ consistency_after_restart_1_ram,
+ consistency_after_restart_1_disc,
+ consistency_after_restart_1_disc_only,
+ consistency_after_restart_2_ram,
+ consistency_after_restart_2_disc,
+ consistency_after_restart_2_disc_only
+ ].
+
+consistency_after_restart_1_ram(suite) -> [];
+consistency_after_restart_1_ram(Config) when is_list(Config) ->
+ consistency_after_restart(ram_copies, 2, Config).
+
+consistency_after_restart_1_disc(suite) -> [];
+consistency_after_restart_1_disc(Config) when is_list(Config) ->
+ consistency_after_restart(disc_copies, 2, Config).
+
+consistency_after_restart_1_disc_only(suite) -> [];
+consistency_after_restart_1_disc_only(Config) when is_list(Config) ->
+ consistency_after_restart(disc_only_copies, 2, Config).
+
+consistency_after_restart_2_ram(suite) -> [];
+consistency_after_restart_2_ram(Config) when is_list(Config) ->
+ consistency_after_restart(ram_copies, 3, Config).
+
+consistency_after_restart_2_disc(suite) -> [];
+consistency_after_restart_2_disc(Config) when is_list(Config) ->
+ consistency_after_restart(disc_copies, 3, Config).
+
+consistency_after_restart_2_disc_only(suite) -> [];
+consistency_after_restart_2_disc_only(Config) when is_list(Config) ->
+ consistency_after_restart(disc_only_copies, 3, Config).
+
+consistency_after_restart(ReplicaType, NodeConfig, Config) ->
+ [Node1 | _] = Nodes = ?acquire_nodes(NodeConfig, Config),
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_restart with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes, [Node1]),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(10)),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ %% Start and wait for tables to be loaded on all nodes
+ timer:sleep(timer:seconds(3)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes,[account,branch,teller, history])),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_dump_tables(suite) ->
+ [
+ consistency_after_dump_tables_1_ram,
+ consistency_after_dump_tables_2_ram
+ ].
+
+consistency_after_dump_tables_1_ram(suite) -> [];
+consistency_after_dump_tables_1_ram(Config) when is_list(Config) ->
+ consistency_after_dump_tables(ram_copies, 1, Config).
+
+consistency_after_dump_tables_2_ram(suite) -> [];
+consistency_after_dump_tables_2_ram(Config) when is_list(Config) ->
+ consistency_after_dump_tables(ram_copies, 2, Config).
+
+consistency_after_dump_tables(ReplicaType, NodeConfig, Config) ->
+ [Node1 | _] = Nodes = ?acquire_nodes(NodeConfig, Config),
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_dump_tables with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes, []),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun() -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(10)),
+ ?match({atomic, ok}, rpc:call(Node1, mnesia, dump_tables,
+ [[branch, teller, account, history]])),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ timer:sleep(timer:seconds(1)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes,[account, branch,
+ teller, history])),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_add_replica(suite) ->
+ [
+ consistency_after_add_replica_2_ram,
+ consistency_after_add_replica_2_disc,
+ consistency_after_add_replica_2_disc_only,
+ consistency_after_add_replica_3_ram,
+ consistency_after_add_replica_3_disc,
+ consistency_after_add_replica_3_disc_only
+ ].
+
+consistency_after_add_replica_2_ram(suite) -> [];
+consistency_after_add_replica_2_ram(Config) when is_list(Config) ->
+ consistency_after_add_replica(ram_copies, 2, Config).
+
+consistency_after_add_replica_2_disc(suite) -> [];
+consistency_after_add_replica_2_disc(Config) when is_list(Config) ->
+ consistency_after_add_replica(disc_copies, 2, Config).
+
+consistency_after_add_replica_2_disc_only(suite) -> [];
+consistency_after_add_replica_2_disc_only(Config) when is_list(Config) ->
+ consistency_after_add_replica(disc_only_copies, 2, Config).
+
+consistency_after_add_replica_3_ram(suite) -> [];
+consistency_after_add_replica_3_ram(Config) when is_list(Config) ->
+ consistency_after_add_replica(ram_copies, 3, Config).
+
+consistency_after_add_replica_3_disc(suite) -> [];
+consistency_after_add_replica_3_disc(Config) when is_list(Config) ->
+ consistency_after_add_replica(disc_copies, 3, Config).
+
+consistency_after_add_replica_3_disc_only(suite) -> [];
+consistency_after_add_replica_3_disc_only(Config) when is_list(Config) ->
+ consistency_after_add_replica(disc_only_copies, 3, Config).
+
+consistency_after_add_replica(ReplicaType, NodeConfig, Config) ->
+ Nodes0 = ?acquire_nodes(NodeConfig, Config),
+ AddNode = lists:last(Nodes0),
+ Nodes = Nodes0 -- [AddNode],
+ Node1 = hd(Nodes),
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_add_replica with ~p on ~p~n",
+ [ReplicaType, Nodes0]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes, []),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(10)),
+ ?match({atomic, ok}, mnesia:add_table_copy(account, AddNode, ReplicaType)),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes0, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_del_replica(suite) ->
+ [
+ consistency_after_del_replica_2_ram,
+ consistency_after_del_replica_2_disc,
+ consistency_after_del_replica_2_disc_only,
+ consistency_after_del_replica_3_ram,
+ consistency_after_del_replica_3_disc,
+ consistency_after_del_replica_3_disc_only
+ ].
+
+consistency_after_del_replica_2_ram(suite) -> [];
+consistency_after_del_replica_2_ram(Config) when is_list(Config) ->
+ consistency_after_del_replica(ram_copies, 2, Config).
+
+consistency_after_del_replica_2_disc(suite) -> [];
+consistency_after_del_replica_2_disc(Config) when is_list(Config) ->
+ consistency_after_del_replica(disc_copies, 2, Config).
+
+consistency_after_del_replica_2_disc_only(suite) -> [];
+consistency_after_del_replica_2_disc_only(Config) when is_list(Config) ->
+ consistency_after_del_replica(disc_only_copies, 2, Config).
+
+consistency_after_del_replica_3_ram(suite) -> [];
+consistency_after_del_replica_3_ram(Config) when is_list(Config) ->
+ consistency_after_del_replica(ram_copies, 3, Config).
+
+consistency_after_del_replica_3_disc(suite) -> [];
+consistency_after_del_replica_3_disc(Config) when is_list(Config) ->
+ consistency_after_del_replica(disc_copies, 3, Config).
+
+consistency_after_del_replica_3_disc_only(suite) -> [];
+consistency_after_del_replica_3_disc_only(Config) when is_list(Config) ->
+ consistency_after_del_replica(disc_only_copies, 3, Config).
+
+consistency_after_del_replica(ReplicaType, NodeConfig, Config) ->
+ Nodes = ?acquire_nodes(NodeConfig, Config),
+ Node1 = hd(Nodes),
+ Node2 = lists:last(Nodes),
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_del_replica with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes, []),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(10)),
+ ?match({atomic, ok}, mnesia:del_table_copy(account, Node2)),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_move_replica(suite) ->
+ [
+ consistency_after_move_replica_2_ram,
+ consistency_after_move_replica_2_disc,
+ consistency_after_move_replica_2_disc_only,
+ consistency_after_move_replica_3_ram,
+ consistency_after_move_replica_3_disc,
+ consistency_after_move_replica_3_disc_only
+ ].
+
+consistency_after_move_replica_2_ram(suite) -> [];
+consistency_after_move_replica_2_ram(Config) when is_list(Config) ->
+ consistency_after_move_replica(ram_copies, 2, Config).
+
+consistency_after_move_replica_2_disc(suite) -> [];
+consistency_after_move_replica_2_disc(Config) when is_list(Config) ->
+ consistency_after_move_replica(disc_copies, 2, Config).
+
+consistency_after_move_replica_2_disc_only(suite) -> [];
+consistency_after_move_replica_2_disc_only(Config) when is_list(Config) ->
+ consistency_after_move_replica(disc_only_copies, 2, Config).
+
+consistency_after_move_replica_3_ram(suite) -> [];
+consistency_after_move_replica_3_ram(Config) when is_list(Config) ->
+ consistency_after_move_replica(ram_copies, 3, Config).
+
+consistency_after_move_replica_3_disc(suite) -> [];
+consistency_after_move_replica_3_disc(Config) when is_list(Config) ->
+ consistency_after_move_replica(disc_copies, 3, Config).
+
+consistency_after_move_replica_3_disc_only(suite) -> [];
+consistency_after_move_replica_3_disc_only(Config) when is_list(Config) ->
+ consistency_after_move_replica(disc_only_copies, 3, Config).
+
+consistency_after_move_replica(ReplicaType, NodeConfig, Config) ->
+ Nodes = ?acquire_nodes(NodeConfig, Config ++ [{tc_timeout, timer:minutes(10)}]),
+ Node1 = hd(Nodes),
+ Node2 = lists:last(Nodes),
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_move_replica with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes -- [Node2], []),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(10)),
+ ?match({atomic, ok}, mnesia:move_table_copy(account, Node1, Node2)),
+ ?log("First move completed from node ~p to ~p ~n", [Node1, Node2]),
+ ?match({atomic, ok}, mnesia:move_table_copy(account, Node2, Node1)),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_transform_table(doc) ->
+ ["Check that the database is consistent after transform_table.",
+ " While applications are updating the involved tables. "];
+
+consistency_after_transform_table(suite) ->
+ [
+ consistency_after_transform_table_ram,
+ consistency_after_transform_table_disc,
+ consistency_after_transform_table_disc_only
+ ].
+
+
+consistency_after_transform_table_ram(suite) -> [];
+consistency_after_transform_table_ram(Config) when is_list(Config) ->
+ consistency_after_transform_table(ram_copies, Config).
+
+consistency_after_transform_table_disc(suite) -> [];
+consistency_after_transform_table_disc(Config) when is_list(Config) ->
+ consistency_after_transform_table(disc_copies, Config).
+
+consistency_after_transform_table_disc_only(suite) -> [];
+consistency_after_transform_table_disc_only(Config) when is_list(Config) ->
+ consistency_after_transform_table(disc_only_copies, Config).
+
+consistency_after_transform_table(Type, Config) ->
+ Nodes = [N1, N2,_N3] = ?acquire_nodes(3, Config),
+
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{index, [3]}, {Type, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{index, [3]}, {Type, [N1,N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab3, [{index, [3]}, {Type, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(empty, [{index, [3]},{Type, Nodes}])),
+
+ Tabs = lists:sort([tab1, tab2, tab3, empty]),
+
+ [[mnesia:dirty_write({Tab, N, N}) || N <- lists:seq(1,10)] ||
+ Tab <- Tabs -- [empty, tab4]],
+ mnesia:dump_log(),
+
+ Ok = lists:duplicate(4, {atomic, ok}),
+ ?match(Ok, [mnesia:transform_table(Tab, fun({T, N, N}) -> {T, N, N, ok} end,
+ [k,a,n]) || Tab <- Tabs]),
+ [?match([k,a,n], mnesia:table_info(Tab, attributes)) || Tab <- Tabs],
+
+ Filter = fun(Tab) -> mnesia:foldl(fun(A, Acc) when size(A) == 3 -> [A|Acc];
+ (A, Acc) when size(A) == 4 -> Acc
+ end, [], Tab)
+ end,
+
+ ?match([[],[],[],[]], [element(2,mnesia:transaction(Filter, [Tab])) || Tab <- Tabs]),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ mnesia_test_lib:start_mnesia(Nodes, Tabs),
+
+ ?match([Tabs, Tabs, Tabs],
+ [lists:sort(rpc:call(Node, mnesia,system_info, [tables]) -- [schema]) || Node <- Nodes]),
+
+ ?match([[],[],[],[]], [element(2,mnesia:transaction(Filter, [Tab])) || Tab <- Tabs]),
+ [?match([k,a,n], mnesia:table_info(Tab, attributes)) || Tab <- Tabs],
+
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_change_table_copy_type(doc) ->
+ ["Check that the database is consistent after change of copy type.",
+ " While applications are updating the involved tables. "].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_fallback(doc) ->
+ ["Check that installed fallbacks are consistent. Check this by starting ",
+ "some nodes, run tpcb on them, take a backup at any time, install it ",
+ "as a fallback, kill all nodes, start mnesia again and check for ",
+ "any inconsistencies"];
+consistency_after_fallback(suite) ->
+ [
+ consistency_after_fallback_2_ram,
+ consistency_after_fallback_2_disc,
+ consistency_after_fallback_2_disc_only,
+ consistency_after_fallback_3_ram,
+ consistency_after_fallback_3_disc
+ , consistency_after_fallback_3_disc_only
+ ].
+
+consistency_after_fallback_2_ram(suite) -> [];
+consistency_after_fallback_2_ram(Config) when is_list(Config) ->
+ consistency_after_fallback(ram_copies, 2, Config).
+
+consistency_after_fallback_2_disc(suite) -> [];
+consistency_after_fallback_2_disc(Config) when is_list(Config) ->
+ consistency_after_fallback(disc_copies, 2, Config).
+
+consistency_after_fallback_2_disc_only(suite) -> [];
+consistency_after_fallback_2_disc_only(Config) when is_list(Config) ->
+ consistency_after_fallback(disc_only_copies, 2, Config).
+
+consistency_after_fallback_3_ram(suite) -> [];
+consistency_after_fallback_3_ram(Config) when is_list(Config) ->
+ consistency_after_fallback(ram_copies, 3, Config).
+
+consistency_after_fallback_3_disc(suite) -> [];
+consistency_after_fallback_3_disc(Config) when is_list(Config) ->
+ consistency_after_fallback(disc_copies, 3, Config).
+
+consistency_after_fallback_3_disc_only(suite) -> [];
+consistency_after_fallback_3_disc_only(Config) when is_list(Config) ->
+ consistency_after_fallback(disc_only_copies, 3, Config).
+
+consistency_after_fallback(ReplicaType, NodeConfig, Config) ->
+ %%?verbose("Starting consistency_after_fallback2 at ~p~n", [self()]),
+ Delay = 5,
+ Nodes = ?acquire_nodes(NodeConfig, [{tc_timeout, timer:minutes(10)} | Config]),
+ Node1 = hd(Nodes),
+ %%?verbose("Mnesia info: ~p~n", [mnesia:info()]),
+
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_fallback with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes, []),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(Delay)),
+
+ %% Make a backup
+ ?verbose("Doing backup~n", []),
+ ?match(ok, mnesia:backup(consistency_after_fallback2)),
+
+ %% Install the backup as a fallback
+ ?verbose("Doing fallback~n", []),
+ ?match(ok, mnesia:install_fallback(consistency_after_fallback2)),
+ timer:sleep(timer:seconds(Delay)),
+
+ %% Stop tpcb
+ ?verbose("Stopping TPC-B~n", []),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+
+ %% Stop and then start mnesia and check table consistency
+ %%?verbose("Restarting Mnesia~n", []),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ mnesia_test_lib:start_mnesia(Nodes,[account,branch,teller,history]),
+
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ if
+ ReplicaType == ram_copies ->
+ %% Test that change_table_copy work i.e. no account.dcd file exists.
+ ?match({atomic, ok}, mnesia:change_table_copy_type(account, node(), disc_copies));
+ true ->
+ ignore
+ end,
+ file:delete(consistency_after_fallback2),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_restore(doc) ->
+ ["Verify consistency after restore operations."];
+
+consistency_after_restore(suite) ->
+ [
+ consistency_after_restore_clear_ram,
+ consistency_after_restore_clear_disc,
+ consistency_after_restore_clear_disc_only,
+ consistency_after_restore_recreate_ram,
+ consistency_after_restore_recreate_disc,
+ consistency_after_restore_recreate_disc_only
+ ].
+
+consistency_after_restore_clear_ram(suite) -> [];
+consistency_after_restore_clear_ram(Config) when is_list(Config) ->
+ consistency_after_restore(ram_copies, clear_tables, Config).
+
+consistency_after_restore_clear_disc(suite) -> [];
+consistency_after_restore_clear_disc(Config) when is_list(Config) ->
+ consistency_after_restore(disc_copies, clear_tables, Config).
+
+consistency_after_restore_clear_disc_only(suite) -> [];
+consistency_after_restore_clear_disc_only(Config) when is_list(Config) ->
+ consistency_after_restore(disc_only_copies, clear_tables, Config).
+
+consistency_after_restore_recreate_ram(suite) -> [];
+consistency_after_restore_recreate_ram(Config) when is_list(Config) ->
+ consistency_after_restore(ram_copies, recreate_tables, Config).
+
+consistency_after_restore_recreate_disc(suite) -> [];
+consistency_after_restore_recreate_disc(Config) when is_list(Config) ->
+ consistency_after_restore(disc_copies, recreate_tables, Config).
+
+consistency_after_restore_recreate_disc_only(suite) -> [];
+consistency_after_restore_recreate_disc_only(Config) when is_list(Config) ->
+ consistency_after_restore(disc_only_copies, recreate_tables, Config).
+
+consistency_after_restore(ReplicaType, Op, Config) ->
+ Delay = 1,
+ Nodes = ?acquire_nodes(3, [{tc_timeout, timer:minutes(10)} | Config]),
+ [Node1, Node2, _Node3] = Nodes,
+ File = "cons_backup_restore",
+
+ ?log("consistency_after_restore with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ Tabs = [carA, carB, carC, carD],
+
+ ?match({atomic, ok}, mnesia:create_table(carA, [{ReplicaType, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(carB, [{ReplicaType, Nodes -- [Node1]}])),
+ ?match({atomic, ok}, mnesia:create_table(carC, [{ReplicaType, Nodes -- [Node2]}])),
+ ?match({atomic, ok}, mnesia:create_table(carD, [{ReplicaType, [Node2]}])),
+
+ NList = lists:seq(0, 20),
+ [lists:foreach(fun(E) -> ok = mnesia:dirty_write({Tab, E, 1}) end, NList) ||
+ Tab <- Tabs],
+
+ {ok, Name, _} = mnesia:activate_checkpoint([{max, [schema | Tabs]},
+ {ram_overrides_dump, true}]),
+ ?verbose("Doing backup~n", []),
+ ?match(ok, mnesia:backup_checkpoint(Name, File)),
+ ?match(ok, mnesia:deactivate_checkpoint(Name)),
+
+ [lists:foreach(fun(E) -> ok = mnesia:dirty_write({Tab, E, 2}) end, NList) ||
+ Tab <- Tabs],
+
+ Pids1 = [{'EXIT', spawn_link(?MODULE, change_tab, [self(), carA, Op]), ok} || _ <- lists:seq(1, 5)],
+ Pids2 = [{'EXIT', spawn_link(?MODULE, change_tab, [self(), carB, Op]), ok} || _ <- lists:seq(1, 5)],
+ Pids3 = [{'EXIT', spawn_link(?MODULE, change_tab, [self(), carC, Op]), ok} || _ <- lists:seq(1, 5)],
+ Pids4 = [{'EXIT', spawn_link(?MODULE, change_tab, [self(), carD, Op]), ok} || _ <- lists:seq(1, 5)],
+
+ AllPids = Pids1 ++ Pids2 ++ Pids3 ++ Pids4,
+
+ Restore = fun(F, Args) ->
+ case mnesia:restore(F, Args) of
+ {atomic, List} -> lists:sort(List);
+ Else -> Else
+ end
+ end,
+
+ timer:sleep(timer:seconds(Delay)), %% Let changers grab locks
+ ?verbose("Doing restore~n", []),
+ ?match(Tabs, Restore(File, [{default_op, Op}])),
+
+ timer:sleep(timer:seconds(Delay)), %% Let em die
+
+ ?match_multi_receive(AllPids),
+
+ case ?match(ok, restore_verify_tabs(Tabs)) of
+ {success, ok} ->
+ file:delete(File);
+ _ ->
+ {T, M, S} = time(),
+ File2 = ?flat_format("consistency_error~w~w~w.BUP", [T, M, S]),
+ file:rename(File, File2)
+ end,
+ ?verify_mnesia(Nodes, []).
+
+change_tab(Father, Tab, Test) ->
+ Key = random:uniform(20),
+ Update = fun() ->
+ case mnesia:read({Tab, Key}) of
+ [{Tab, Key, 1}] ->
+ quit;
+ [{Tab, Key, _N}] ->
+ mnesia:write({Tab, Key, 3})
+ end
+ end,
+ case mnesia:transaction(Update) of
+ {atomic, quit} ->
+ exit(ok);
+ {aborted, {no_exists, Tab}} when Test == recreate_tables ->%% I'll allow this
+ change_tab(Father, Tab, Test);
+ {atomic, ok} ->
+ change_tab(Father, Tab, Test)
+ end.
+
+restore_verify_tabs([Tab | R]) ->
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:foldl(fun({_, _, 1}, ok) ->
+ ok;
+ (Else, Acc) ->
+ [Else|Acc]
+ end, ok, Tab)
+ end)),
+ restore_verify_tabs(R);
+restore_verify_tabs([]) ->
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consistency_after_rename_of_node(doc) ->
+ ["Skipped because it is an unimportant case."].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+checkpoint_retainer_consistency(doc) ->
+ ["Verify that the contents of a checkpoint retainer has the expected",
+ "contents in various situations."];
+checkpoint_retainer_consistency(suite) ->
+ [
+ updates_during_checkpoint_activation,
+ updates_during_checkpoint_iteration,
+ load_table_with_activated_checkpoint,
+ add_table_copy_to_table_with_activated_checkpoint
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+updates_during_checkpoint_activation(doc) ->
+ ["Perform updates while the checkpoint getting activated",
+ "and verify that all checkpoint retainers associated with",
+ "different replicas of the same table really has the same",
+ "contents."];
+updates_during_checkpoint_activation(suite) ->
+ [
+ updates_during_checkpoint_activation_2_ram,
+ updates_during_checkpoint_activation_2_disc,
+ updates_during_checkpoint_activation_2_disc_only,
+ updates_during_checkpoint_activation_3_ram,
+ updates_during_checkpoint_activation_3_disc
+ , updates_during_checkpoint_activation_3_disc_only
+ ].
+
+updates_during_checkpoint_activation_2_ram(suite) -> [];
+updates_during_checkpoint_activation_2_ram(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(ram_copies, 2, Config).
+
+updates_during_checkpoint_activation_2_disc(suite) -> [];
+updates_during_checkpoint_activation_2_disc(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(disc_copies, 2, Config).
+
+updates_during_checkpoint_activation_2_disc_only(suite) -> [];
+updates_during_checkpoint_activation_2_disc_only(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(disc_only_copies, 2, Config).
+
+updates_during_checkpoint_activation_3_ram(suite) -> [];
+updates_during_checkpoint_activation_3_ram(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(ram_copies, 3, Config).
+
+updates_during_checkpoint_activation_3_disc(suite) -> [];
+updates_during_checkpoint_activation_3_disc(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(disc_copies, 3, Config).
+
+updates_during_checkpoint_activation_3_disc_only(suite) -> [];
+updates_during_checkpoint_activation_3_disc_only(Config) when is_list(Config) ->
+ updates_during_checkpoint_activation(disc_only_copies, 3, Config).
+
+updates_during_checkpoint_activation(ReplicaType,NodeConfig,Config) ->
+ %%?verbose("updates_during_checkpoint_activation2 at ~p~n", [self()]),
+ Delay = 5,
+ Nodes = ?acquire_nodes(NodeConfig, Config),
+ Node1 = hd(Nodes),
+ %%?verbose("Mnesia info: ~p~n", [mnesia:info()]),
+
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("consistency_after_fallback with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config_dist(ReplicaType, NodeConfig, Nodes, Config),
+ %%TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ timer:sleep(timer:seconds(Delay)),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)}]),
+ timer:sleep(timer:seconds(Delay)),
+
+ %% Stop tpcb
+ ?verbose("Stopping TPC-B~n", []),
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+
+ ?match(ok, mnesia:backup_checkpoint(CPName,
+ updates_during_checkpoint_activation2)),
+ timer:sleep(timer:seconds(Delay)),
+
+ ?match(ok, mnesia:install_fallback(updates_during_checkpoint_activation2)),
+
+ %% Stop and then start mnesia and check table consistency
+ %%?verbose("Restarting Mnesia~n", []),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ file:delete(updates_during_checkpoint_activation2),
+ mnesia_test_lib:start_mnesia(Nodes,[account,branch,teller, history]),
+
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+updates_during_checkpoint_iteration(doc) ->
+ ["Perform updates while someone is iterating over a checkpoint",
+ "and verify that the iterator really finds the expected data",
+ "regardless of ongoing upates."];
+
+updates_during_checkpoint_iteration(suite) ->
+ [
+ updates_during_checkpoint_iteration_2_ram,
+ updates_during_checkpoint_iteration_2_disc
+ , updates_during_checkpoint_iteration_2_disc_only
+ ].
+
+updates_during_checkpoint_iteration_2_ram(suite) -> [];
+updates_during_checkpoint_iteration_2_ram(Config) when is_list(Config) ->
+ updates_during_checkpoint_iteration(ram_copies, 2, Config).
+
+updates_during_checkpoint_iteration_2_disc(suite) -> [];
+updates_during_checkpoint_iteration_2_disc(Config) when is_list(Config) ->
+ updates_during_checkpoint_iteration(disc_copies, 2, Config).
+
+updates_during_checkpoint_iteration_2_disc_only(suite) -> [];
+updates_during_checkpoint_iteration_2_disc_only(Config) when is_list(Config) ->
+ updates_during_checkpoint_iteration(disc_only_copies, 2, Config).
+
+updates_during_checkpoint_iteration(ReplicaType,NodeConfig,Config) ->
+ %?verbose("updates_during_checkpoint_iteration2 at ~p~n", [self()]),
+ Delay = 5,
+ Nodes = ?acquire_nodes(NodeConfig, Config),
+ Node1 = hd(Nodes),
+ %?verbose("Mnesia info: ~p~n", [mnesia:info()]),
+ File = updates_during_checkpoint_iteration2,
+ {success, [A]} = ?start_activities([Node1]),
+ ?log("updates_during_checkpoint_iteration with ~p on ~p~n",
+ [ReplicaType, Nodes]),
+ TpcbConfig = tpcb_config_dist(ReplicaType, NodeConfig, Nodes, Config),
+ %%TpcbConfig = tpcb_config(ReplicaType, NodeConfig, Nodes),
+ TpcbConfigRec = list2rec(TpcbConfig,
+ record_info(fields,tab_config),
+ #tab_config{}),
+ mnesia_tpcb:init(TpcbConfig),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+ A ! fun () -> mnesia:backup_checkpoint(CPName, File) end,
+
+ do_changes_during_backup(TpcbConfigRec),
+
+ ?match_receive({A,ok}),
+
+ timer:sleep(timer:seconds(Delay)),
+ ?match(ok, mnesia:install_fallback(File)),
+ timer:sleep(timer:seconds(Delay)),
+
+ ?match({error,{"Bad balance",_,_}}, mnesia_tpcb:verify_tabs()),
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ mnesia_test_lib:start_mnesia(Nodes,[account,branch,teller, history]),
+
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+
+ ?match(ok, file:delete(File)),
+ ?verify_mnesia(Nodes, []).
+
+do_changes_during_backup(TpcbConfig) ->
+ loop_branches(TpcbConfig#tab_config.n_branches,
+ TpcbConfig#tab_config.n_accounts_per_branch).
+
+loop_branches(N_br,N_acc) when N_br >= 1 ->
+ loop_accounts(N_br,N_acc),
+ loop_branches(N_br-1,N_acc);
+loop_branches(_,_) -> done.
+
+loop_accounts(N_br, N_acc) when N_acc >= 1 ->
+ A = #account{id=N_acc, branch_id=N_br, balance = 4711},
+ ok = mnesia:dirty_write(A),
+ loop_accounts(N_br, N_acc-1);
+
+loop_accounts(_,_) -> done.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+load_table_with_activated_checkpoint(doc) ->
+ ["Load a table with a checkpoint attached to it and verify that the",
+ "newly loaded replica also gets a checkpoint retainer attached to it",
+ "and that it is consistent with the original retainer."];
+
+load_table_with_activated_checkpoint(suite) ->
+ [
+ load_table_with_activated_checkpoint_ram,
+ load_table_with_activated_checkpoint_disc,
+ load_table_with_activated_checkpoint_disc_only
+ ].
+
+load_table_with_activated_checkpoint_ram(suite) -> [];
+load_table_with_activated_checkpoint_ram(Config) when is_list(Config) ->
+ load_table_with_activated_checkpoint(ram_copies, Config).
+
+load_table_with_activated_checkpoint_disc(suite) -> [];
+load_table_with_activated_checkpoint_disc(Config) when is_list(Config) ->
+ load_table_with_activated_checkpoint(disc_copies, Config).
+
+load_table_with_activated_checkpoint_disc_only(suite) -> [];
+load_table_with_activated_checkpoint_disc_only(Config) when is_list(Config) ->
+ load_table_with_activated_checkpoint(disc_only_copies, Config).
+
+load_table_with_activated_checkpoint(Type, Config) ->
+ Nodes = ?acquire_nodes(2, Config),
+ Node1 = hd(Nodes),
+ Tab = load_test,
+ Def = [{attributes, [key, value]},
+ {Type, Nodes}], %% ??? important that RAM ???
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ timer:sleep(timer:seconds(1)),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ mnesia_test_lib:stop_mnesia([Node1]),
+ mnesia_test_lib:start_mnesia([Node1],[Tab]),
+ %%--- check, whether the checkpiont is attached to both replicas
+ {success, [A,B]} = ?start_activities(Nodes),
+
+ A ! fun () ->
+ mnesia:table_info(Tab,checkpoints)
+ end,
+ ?match_receive({A,[CPName]}),
+
+ B ! fun () ->
+ mnesia:table_info(Tab,checkpoints)
+ end,
+ ?match_receive({B,[CPName]}),
+
+ %%--- check, whether both retainers are consistent
+ ?match(ok, mnesia:dirty_write({Tab, 1, 815})),
+ A ! fun () ->
+ mnesia:backup_checkpoint(CPName, load_table_a)
+ end,
+ ?match_receive({A,ok}),
+ B ! fun () ->
+ mnesia:backup_checkpoint(CPName, load_table_b)
+ end,
+ ?match_receive({B,ok}),
+
+ Mod = mnesia_backup, %% Assume local files
+ List_a = view(load_table_a, Mod),
+ List_b = view(load_table_b, Mod),
+
+ ?match(List_a, List_b),
+
+ ?match(ok,file:delete(load_table_a)),
+ ?match(ok,file:delete(load_table_b)),
+ ?verify_mnesia(Nodes, []).
+
+view(Source, Mod) ->
+ View = fun(Item, Acc) ->
+ ?verbose("tab - item : ~p ~n",[Item]),
+ case Item of
+ {schema, Tab, Cs} -> %% Remove cookie information
+ NewCs = lists:keyreplace(cookie, 1, Cs,
+ {cookie, skip_cookie}),
+ Item2 = {schema, Tab, NewCs},
+ {[Item], [Item2|Acc]};
+ _ ->
+ {[Item], [Item|Acc]}
+ end
+ end,
+ {ok,TabList} =
+ mnesia:traverse_backup(Source, Mod, dummy, read_only, View, []),
+ lists:sort(TabList).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+add_table_copy_to_table_with_activated_checkpoint(doc) ->
+ ["Add a replica to a table with a checkpoint attached to it",
+ "and verify that the new replica also gets a checkpoint",
+ "retainer attached to it and that it is consistent with the",
+ "original retainer."];
+
+add_table_copy_to_table_with_activated_checkpoint(suite) ->
+ [
+ add_table_copy_to_table_with_activated_checkpoint_ram,
+ add_table_copy_to_table_with_activated_checkpoint_disc,
+ add_table_copy_to_table_with_activated_checkpoint_disc_only
+ ].
+
+add_table_copy_to_table_with_activated_checkpoint_ram(suite) -> [];
+add_table_copy_to_table_with_activated_checkpoint_ram(Config) when is_list(Config) ->
+ add_table_copy_to_table_with_activated_checkpoint(ram_copies, Config).
+
+add_table_copy_to_table_with_activated_checkpoint_disc(suite) -> [];
+add_table_copy_to_table_with_activated_checkpoint_disc(Config) when is_list(Config) ->
+ add_table_copy_to_table_with_activated_checkpoint(disc_copies, Config).
+
+add_table_copy_to_table_with_activated_checkpoint_disc_only(suite) -> [];
+add_table_copy_to_table_with_activated_checkpoint_disc_only(Config) when is_list(Config) ->
+ add_table_copy_to_table_with_activated_checkpoint(disc_only_copies, Config).
+
+add_table_copy_to_table_with_activated_checkpoint(Type,Config) ->
+ Nodes = ?acquire_nodes(2, Config),
+ %?verbose("NODES = ~p ~n",[Nodes]),
+ [Node1,Node2] = Nodes,
+
+ Tab = add_test,
+ Def = [{attributes, [key, value]},
+ {Type, [Node1]}], %% ??? important that RAM ???
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ ?match({atomic,ok},mnesia:add_table_copy(Tab,Node2,ram_copies)),
+
+ %%--- check, whether the checkpiont is attached to both replicas
+ {success, [A,B]} = ?start_activities(Nodes),
+
+ A ! fun () ->
+ mnesia:table_info(Tab,checkpoints)
+ end,
+ ?match_receive({A,[CPName]}),
+
+ B ! fun () ->
+ mnesia:table_info(Tab,checkpoints)
+ end,
+ ?match_receive({B,[CPName]}),
+
+ %%--- check, whether both retainers are consistent
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 815})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 815})),
+
+ A ! fun () ->
+ mnesia:backup_checkpoint(CPName, add_table_a)
+ end,
+ ?match_receive({A,ok}),
+ B ! fun () ->
+ mnesia:backup_checkpoint(CPName, add_table_b)
+ end,
+ ?match_receive({B,ok}),
+
+ Mod = mnesia_backup, %% Assume local files
+
+ List_a = view(add_table_a, Mod),
+ List_b = view(add_table_b, Mod),
+
+ ?match(List_a, List_b),
+
+ ?match(ok,file:delete(add_table_a)),
+ ?match(ok, file:delete(add_table_b)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+backup_consistency(suite) ->
+ [
+ interupted_install_fallback,
+ interupted_uninstall_fallback,
+ mnesia_down_during_backup_causes_switch,
+ mnesia_down_during_backup_causes_abort,
+ schema_transactions_during_backup
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+interupted_install_fallback(doc) ->
+ ["Verify that a interrupted install_fallback really",
+ "is performed on all nodes or none"];
+
+interupted_install_fallback(suite) ->
+ [
+ inst_fallback_process_dies,
+ fatal_when_inconsistency
+ ].
+
+inst_fallback_process_dies(suite) ->
+ [];
+inst_fallback_process_dies(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ {success, [A,_B,_C]} = ?start_activities(Nodes),
+
+ TestPid = self(),
+ DebugId = {mnesia_bup, fallback_receiver_loop, pre_swap},
+ DebugFun =
+ fun(PrevContext, _EvalContext) ->
+ ?verbose("fallback_receiver_loop - pre_swap pid ~p #~p~n",
+ [self(),PrevContext]),
+ TestPid ! {self(),fallback_preswap},
+ case receive_messages([fallback_continue]) of
+ [{TestPid,fallback_continue}] ->
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end
+ end,
+ ?activate_debug_fun(DebugId, DebugFun, 1),
+
+ Tab = install_table,
+ Def = [{attributes, [key, value]}, {disc_copies, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ ?match(ok, mnesia:backup_checkpoint(CPName, install_backup)),
+
+ A ! fun() -> mnesia:install_fallback(install_backup) end,
+ [{AnsPid,fallback_preswap}] = receive_messages([fallback_preswap]),
+ exit(A, kill),
+ AnsPid ! {self(), fallback_continue},
+ ?match_receive({'EXIT', A, killed}),
+ timer:sleep(2000), %% Wait till fallback is installed everywhere
+
+ mnesia_test_lib:kill_mnesia(Nodes),
+ ?verbose("~n---->Mnesia is stopped everywhere<-----~n", []),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes,[Tab])),
+
+ check_data(Nodes, Tab),
+ ?match(ok, file:delete(install_backup)),
+ ?verify_mnesia(Nodes, []).
+
+check_data([N1 | R], Tab) ->
+ ?match([{Tab, 1, 4711}], rpc:call(N1, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 2, 42}], rpc:call(N1, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{Tab, 3, 256}], rpc:call(N1, mnesia, dirty_read, [{Tab, 3}])),
+ check_data(R, Tab);
+check_data([], _Tab) ->
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+fatal_when_inconsistency(suite) ->
+ [];
+fatal_when_inconsistency(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+
+ [Node1, Node2, Node3] = Nodes =
+ ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ {success, [A,_B,_C]} = ?start_activities(Nodes),
+
+ TestPid = self(),
+ DebugId = {mnesia_bup, fallback_receiver_loop, pre_swap},
+ DebugFun =
+ fun(PrevContext, _EvalContext) ->
+ ?verbose("fallback_receiver_loop - pre_swap pid ~p #~p~n",
+ [self(),PrevContext]),
+ TestPid ! {self(),fallback_preswap},
+ case receive_messages([fallback_continue]) of
+ [{TestPid,fallback_continue}] ->
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end
+ end,
+ ?activate_debug_fun(DebugId, DebugFun, 1),
+
+ Tab = install_table,
+ Def = [{attributes, [key, value]}, {disc_copies, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ ?match(ok, mnesia:backup_checkpoint(CPName, install_backup)),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42424242})),
+
+ A ! fun() ->
+ mnesia:install_fallback(install_backup)
+ end,
+
+ [{AnsPid,fallback_preswap}] = receive_messages([fallback_preswap]),
+ exit(AnsPid, kill), %% Kill install-fallback on local node will
+ AnsPid ! {self(), fallback_continue},
+ ?deactivate_debug_fun(DebugId),
+
+ ?match_receive({A,{error,{"Cannot install fallback",
+ {'EXIT',AnsPid,killed}}}}),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ ?verbose("EXPECTING FATAL from 2 nodes WITH CORE DUMP~n", []),
+
+ ?match([], mnesia_test_lib:start_mnesia([Node1],[])),
+ is_running(Node1, yes),
+ ?match([{Node2, mnesia, _}], mnesia_test_lib:start_mnesia([Node2],[])),
+ is_running(Node2, no),
+ ?match([{Node3, mnesia, _}], mnesia_test_lib:start_mnesia([Node3],[])),
+ is_running(Node3, no),
+ mnesia_test_lib:kill_mnesia(Nodes),
+
+ ?match(ok, mnesia:install_fallback(install_backup)),
+ mnesia_test_lib:start_mnesia(Nodes,[Tab]),
+
+ check_data(Nodes, Tab),
+
+ ?match(ok,file:delete(install_backup)),
+ ?verify_mnesia(Nodes, []).
+
+is_running(Node, Shouldbe) ->
+ timer:sleep(1000),
+ Running = rpc:call(Node, mnesia, system_info, [is_running]),
+ case Running of
+ Shouldbe -> ok;
+ _ -> is_running(Node, Shouldbe)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+interupted_uninstall_fallback(doc) ->
+ ["Verify that a interrupted uninstall_fallback really",
+ "is performed on all nodes or none"];
+interupted_uninstall_fallback(suite) ->
+ [
+ after_delete
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+after_delete(doc) ->
+ ["interrupt the uninstall after deletion of ",
+ "fallback files - there shall be no fallback"];
+after_delete(suite) -> [];
+after_delete(Config) when is_list(Config) ->
+ do_uninstall(Config, post_delete).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+
+do_uninstall(Config,DebugPoint) ->
+ ?is_debug_compiled,
+
+ Nodes = ?acquire_nodes(3, Config),
+ %%?verbose("NODES = ~p ~n",[Nodes]),
+
+ {success, [P1,P2,P3]} = ?start_activities(Nodes),
+
+ NP1 = node(P1),
+ NP2 = node(P2),
+
+ {A,B,C} = case node() of
+ NP1 ->
+ %%?verbose("first case ~n"),
+ {P3,P2,P1};
+ NP2 ->
+ %%?verbose("second case ~n"),
+ {P3, P1, P2};
+ _ ->
+ { P1, P2, P3}
+ end,
+
+ Node1 = node(A),
+ Node2 = node(B),
+ Node3 = node(C),
+
+ ?verbose(" A pid:~p node:~p ~n",[A,Node1]),
+ ?verbose(" B pid:~p node:~p ~n",[B,Node2]),
+ ?verbose(" C pid:~p node:~p ~n",[C,Node3]),
+
+
+ TestPid = self(),
+ %%?verbose("TestPid : ~p~n",[TestPid]),
+ DebugId = {mnesia_bup, uninstall_fallback2, DebugPoint},
+ DebugFun = fun(PrevContext, _EvalContext) ->
+ ?verbose("uninstall_fallback pid ~p #~p~n"
+ ,[self(),PrevContext]),
+ TestPid ! {self(),uninstall_predelete},
+ case receive_messages([uninstall_continue]) of
+ [{TestPid,uninstall_continue}] ->
+ ?deactivate_debug_fun(DebugId),
+ %%?verbose("uninstall_fallback continues~n"),
+ PrevContext+1
+ end
+ end,
+ ?remote_activate_debug_fun(Node1,DebugId, DebugFun, 1),
+
+ Tab = install_table,
+ Def = [{attributes, [key, value]},
+ {ram_copies, Nodes}], %% necessary to test different types ???
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ ?match(ok, mnesia:backup_checkpoint(CPName,install_backup)),
+ timer:sleep(timer:seconds(1)),
+
+ A ! fun () ->
+ mnesia:install_fallback(install_backup)
+ end,
+ ?match_receive({A,ok}),
+
+ A ! fun () ->
+ mnesia:uninstall_fallback()
+ end,
+ %%
+ %% catch the debug entry in mnesia and kill one Mnesia node
+ %%
+
+
+ [{AnsPid,uninstall_predelete}] = receive_messages([uninstall_predelete]),
+
+ ?verbose("AnsPid : ~p~n",[AnsPid]),
+
+ mnesia_test_lib:kill_mnesia([Node2]),
+ timer:sleep(timer:seconds(1)),
+
+ AnsPid ! {self(),uninstall_continue},
+
+ ?match_receive({A,ok}),
+
+ mnesia_test_lib:kill_mnesia(Nodes) ,
+ mnesia_test_lib:start_mnesia(Nodes,[Tab]),
+
+ A ! fun () ->
+ R1 = mnesia:dirty_read({Tab,1}),
+ R2 = mnesia:dirty_read({Tab,2}),
+ R3 = mnesia:dirty_read({Tab,3}),
+ {R1,R2,R3}
+ end,
+ ?match_receive({ A, {[],[],[]} }),
+
+ B ! fun () ->
+ R1 = mnesia:dirty_read({Tab,1}),
+ R2 = mnesia:dirty_read({Tab,2}),
+ R3 = mnesia:dirty_read({Tab,3}),
+ {R1,R2,R3}
+ end,
+ ?match_receive({ B, {[],[],[]} }),
+
+ C ! fun () ->
+ R1 = mnesia:dirty_read({Tab,1}),
+ R2 = mnesia:dirty_read({Tab,2}),
+ R3 = mnesia:dirty_read({Tab,3}),
+ {R1,R2,R3}
+ end,
+ ?match_receive({ C, {[],[],[]} }),
+
+ ?match(ok,file:delete(install_backup)),
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+mnesia_down_during_backup_causes_switch(doc) ->
+ ["Verify that an ongoing backup is not disturbed",
+ "even if the node hosting the replica that currently",
+ "is being backup'ed is stopped. The backup utility",
+ "is expected to switch over to another replica and",
+ "fulfill the backup."];
+mnesia_down_during_backup_causes_switch(suite) ->
+ [
+ cause_switch_before,
+ cause_switch_after
+ ].
+
+%%%%%%%%%%%%%%%
+
+cause_switch_before(doc) ->
+ ["interrupt the backup before iterating the retainer"];
+cause_switch_before(suite) -> [];
+cause_switch_before(Config) when is_list(Config) ->
+ do_something_during_backup(cause_switch,pre,Config).
+
+%%%%%%%%%%%%%%%
+
+cause_switch_after(doc) ->
+ ["interrupt the backup after iterating the retainer"];
+cause_switch_after(suite) -> [];
+cause_switch_after(Config) when is_list(Config) ->
+ do_something_during_backup(cause_switch,post,Config).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+mnesia_down_during_backup_causes_abort(doc) ->
+ ["Verify that an ongoing backup is aborted nicely",
+ "without leaving any backup file if the last replica",
+ "of a table becomes unavailable due to a node down",
+ "or some crash."];
+mnesia_down_during_backup_causes_abort(suite) ->
+ [
+ cause_abort_before,
+ cause_abort_after
+ ].
+
+%%%%%%%%%%%%%%%%%%
+
+cause_abort_before(doc) ->
+ ["interrupt the backup before iterating the retainer"];
+
+cause_abort_before(suite) -> [];
+cause_abort_before(Config) when is_list(Config) ->
+ do_something_during_backup(cause_abort,pre,Config).
+
+%%%%%%%%%%%%%%%%%%
+
+cause_abort_after(doc) ->
+ ["interrupt the backup after iterating the retainer"];
+
+cause_abort_after(suite) -> [];
+cause_abort_after(Config) when is_list(Config) ->
+ do_something_during_backup(cause_abort,post,Config).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+schema_transactions_during_backup(doc) ->
+ ["Verify that an schema transactions does not",
+ "affect an ongoing backup."];
+schema_transactions_during_backup(suite) ->
+ [
+ change_schema_before,
+ change_schema_after
+ ].
+
+%%%%%%%%%%%%%
+
+change_schema_before(doc) ->
+ ["interrupt the backup before iterating the retainer"];
+change_schema_before(suite) -> [];
+change_schema_before(Config) when is_list(Config) ->
+ do_something_during_backup(change_schema,pre,Config).
+
+%%%%%%%%%%%%%%%%
+
+change_schema_after(doc) ->
+ ["interrupt the backup after iterating the retainer"];
+change_schema_after(suite) -> [];
+change_schema_after(Config) when is_list(Config) ->
+ do_something_during_backup(change_schema,post,Config).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+do_something_during_backup(Action,DebugPoint,Config) ->
+ ?is_debug_compiled,
+
+ Nodes = ?acquire_nodes(3, Config),
+
+ {success, [A,B,C]} = ?start_activities(Nodes),
+
+ Node1 = node(A),
+ Node2 = node(B),
+ Node3 = node(C),
+
+ TestPid = self(),
+ %%?verbose("TestPid : ~p~n",[TestPid]),
+
+ Tab = interrupt_table,
+ Bak = interrupt_backup,
+ Def = [{attributes, [key, value]},
+ {ram_copies, [Node2,Node3]}],
+ %% necessary to test different types ???
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+
+
+ DebugId = {mnesia_log, tab_copier, DebugPoint},
+ DebugFun = fun(PrevContext, EvalContext) ->
+ ?verbose("interrupt backup pid ~p #~p ~n context ~p ~n"
+ ,[self(),PrevContext,EvalContext]),
+ TestPid ! {self(),interrupt_backup_pre},
+ global:set_lock({{lock_for_backup, Tab}, self()},
+ Nodes,
+ infinity),
+
+ %%?verbose("interrupt backup - continues ~n"),
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end,
+ ?remote_activate_debug_fun(Node1,DebugId, DebugFun, 1),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ {ok, CPName, _NodeList} =
+ mnesia:activate_checkpoint([{max, mnesia:system_info(tables)},
+ {ram_overrides_dump,true}]),
+
+ A ! fun () ->
+ %%?verbose("node: ~p pid: ~p ~n",[node(),self()]),
+ mnesia:table_info(Tab,where_to_read)
+ end,
+
+ ReadNode_a = receive { A, ReadNode_a_tmp } -> ReadNode_a_tmp end,
+ ?verbose("ReadNode ~p ~n",[ReadNode_a]),
+
+ global:set_lock({{lock_for_backup, Tab}, self()}, Nodes, infinity),
+
+ A ! fun () -> %% A shall perform the backup, so the test proc is
+ %% able to do further actions in between
+ mnesia:backup_checkpoint(CPName, Bak)
+ end,
+
+ %% catch the debug function of mnesia, stop the backup process
+ %% kill the node ReadNode_a and continue the backup process
+ %% As there is a second replica of the table, the backup shall continue
+
+ case receive_messages([interrupt_backup_pre]) of
+ [{_AnsPid,interrupt_backup_pre}] -> ok
+ end,
+
+ case Action of
+ cause_switch ->
+ mnesia_test_lib:kill_mnesia([ReadNode_a]),
+ timer:sleep(timer:seconds(1));
+ cause_abort ->
+ mnesia_test_lib:kill_mnesia([Node2,Node3]),
+ timer:sleep(timer:seconds(1));
+ change_schema ->
+ Tab2 = second_interrupt_table,
+ Def2 = [{attributes, [key, value]},
+ {ram_copies, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2))
+ end,
+
+ %% AnsPid ! {self(),interrupt_backup_continue},
+ global:del_lock({{lock_for_backup, Tab}, self()}, Nodes),
+
+ case Action of
+ cause_abort ->
+
+ %% answer of A when finishing the backup
+ ?match_receive({A,{error, _}}),
+
+ ?match({error,{"Cannot install fallback",_}},
+ mnesia:install_fallback(Bak));
+ _ -> %% cause_switch, change_schema
+
+ ?match_receive({A,ok}), %% answer of A when finishing the backup
+
+ %% send a fun to that node where mnesia is still running
+ WritePid = case ReadNode_a of
+ Node2 -> C; %% node(C) == Node3
+ Node3 -> B
+ end,
+ WritePid ! fun () ->
+ ?match(ok, mnesia:dirty_write({Tab, 1, 815})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 816})),
+ ok
+ end,
+ ?match_receive({ WritePid, ok }),
+ ?match(ok, mnesia:install_fallback(Bak))
+ end,
+
+ %% Stop and then start mnesia and check table consistency
+ %%?verbose("Restarting Mnesia~n", []),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ mnesia_test_lib:start_mnesia(Nodes,[Tab]),
+
+ case Action of
+ cause_switch ->
+ %% the backup should exist
+ cross_check_tables([A,B,C],Tab,{[{Tab,1,4711}],
+ [{Tab,2,42}],
+ [{Tab,3,256}] }),
+ ?match(ok,file:delete(Bak));
+ cause_abort ->
+ %% the backup should NOT exist
+ cross_check_tables([A,B,C],Tab,{[],[],[]}),
+ %% file does not exist
+ ?match({error, _},file:delete(Bak));
+ change_schema ->
+ %% the backup should exist
+ cross_check_tables([A,B,C],Tab,{[{Tab,1,4711}],
+ [{Tab,2,42}],
+ [{Tab,3,256}] }),
+ ?match(ok,file:delete(Bak))
+ end,
+ ?verify_mnesia(Nodes, []).
+
+%% check the contents of the table
+cross_check_tables([],_tab,_elements) -> ok;
+cross_check_tables([Pid|Rest],Tab,{Val1,Val2,Val3}) ->
+ Pid ! fun () ->
+ R1 = mnesia:dirty_read({Tab,1}),
+ R2 = mnesia:dirty_read({Tab,2}),
+ R3 = mnesia:dirty_read({Tab,3}),
+ {R1,R2,R3}
+ end,
+ ?match_receive({ Pid, {Val1, Val2, Val3 } }),
+ cross_check_tables(Rest,Tab,{Val1,Val2,Val3} ).
diff --git a/lib/mnesia/test/mnesia_cost.erl b/lib/mnesia/test/mnesia_cost.erl
new file mode 100644
index 0000000000..54cb2b3064
--- /dev/null
+++ b/lib/mnesia/test/mnesia_cost.erl
@@ -0,0 +1,222 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_cost).
+-compile(export_all).
+
+%% This code exercises the mnesia system and produces a bunch
+%% of measurements on what various things cost
+
+-define(TIMES, 1000). %% set to at least 1000 when running for real !!
+
+%% This is the record we perform all ops on in this test
+
+-record(item, {a = 1234,
+ b = foobar,
+ c = "1.2.3.4",
+ d = {'Lennart', 'Hyland'},
+ e = true
+ }).
+
+go() ->
+ go([node() | nodes()]).
+
+go(Nodes) when hd(Nodes) == node() ->
+ {ok, Out} = file:open("MNESIA_COST", write),
+ put(out, Out),
+
+ rpc:multicall(Nodes, mnesia, lkill, []),
+ ok = mnesia:delete_schema(Nodes),
+ ok = mnesia:create_schema(Nodes),
+ rpc:multicall(Nodes, mnesia, start, []),
+ TabDef = [{attributes, record_info(fields, item)}],
+ {atomic, ok} = mnesia:create_table(item, TabDef),
+
+ round("single ram copy", "no index"),
+ {atomic, ok} = mnesia:add_table_index(item, #item.e),
+ round("single ram copy", "One index"),
+
+ {atomic, ok} = mnesia:add_table_index(item, #item.c),
+ round("single ram copy", "Two indexes"),
+
+ {atomic, ok} = mnesia:del_table_index(item, #item.e),
+ {atomic, ok} = mnesia:del_table_index(item, #item.c),
+
+ {atomic, ok} = mnesia:change_table_copy_type(item, node(), disc_copies),
+ round("single disc copy", "no index"),
+
+ {atomic, ok} = mnesia:change_table_copy_type(item, node(), ram_copies),
+
+ case length(Nodes) of
+ Len when Len < 2 ->
+ format("<WARNING> replication skipped. Too few nodes.", []);
+ _Len ->
+ N2 = lists:nth(2, Nodes),
+ {atomic, ok} = mnesia:add_table_copy(item, N2, ram_copies),
+ round("2 replicated ram copy", "no index")
+ end,
+ file:close(Out),
+ erase(out),
+ ok.
+
+round(Replication, Index) ->
+ run(Replication, Index, [write],
+ fun() -> mnesia:write(#item{}) end),
+
+
+ run(Replication, Index, [read],
+ fun() -> mnesia:read({item, 1234}) end),
+
+ run(Replication, Index, [read, write],
+ fun() -> mnesia:read({item, 1234}),
+ mnesia:write(#item{}) end),
+
+ run(Replication, Index, [wread, write],
+ fun() -> mnesia:wread({item, 1234}),
+ mnesia:write(#item{}) end),
+
+
+ run(Replication, Index, [match, write, write, write],
+ fun() -> mnesia:match_object({item, 1, '_', '_', '_', true}),
+ mnesia:write(#item{a =1}),
+ mnesia:write(#item{a =2}),
+ mnesia:write(#item{a =3}) end).
+
+
+format(F, As) ->
+ io:format(get(out), F, As).
+
+run(What, OtherInfo, Ops, F) ->
+ run(t, What, OtherInfo, Ops, F).
+
+run(How, What, OtherInfo, Ops, F) ->
+ T1 = erlang:now(),
+ statistics(runtime),
+ do_times(How, ?TIMES, F),
+ {_, RunTime} = statistics(runtime),
+ T2 = erlang:now(),
+ RealTime = subtr(T1, T2),
+ report(How, What, OtherInfo, Ops, RunTime, RealTime).
+
+report(t, What, OtherInfo, Ops, RunTime, RealTime) ->
+ format("~s, ~s, transaction call ", [What, OtherInfo]),
+ format("Ops is ", []),
+ lists:foreach(fun(Op) -> format("~w-", [Op]) end, Ops),
+
+ format("~n ~w/~w Millisecs/Trans ~w/~w MilliSecs/Operation ~n~n",
+ [RunTime/?TIMES,
+ RealTime/?TIMES,
+ RunTime/(?TIMES*length(Ops)),
+ RealTime/(?TIMES*length(Ops))]);
+
+report(dirty, What, OtherInfo, Ops, RunTime, RealTime) ->
+ format("~s, ~s, dirty calls ", [What, OtherInfo]),
+ format("Ops is ", []),
+ lists:foreach(fun(Op) -> format("~w-", [Op]) end, Ops),
+
+ format("~n ~w/~w Millisecs/Bunch ~w/~w MilliSecs/Operation ~n~n",
+ [RunTime/?TIMES,
+ RealTime/?TIMES,
+ RunTime/(?TIMES*length(Ops)),
+ RealTime/(?TIMES*length(Ops))]).
+
+
+subtr(Before, After) ->
+ E =(element(1,After)*1000000000000
+ +element(2,After)*1000000+element(3,After)) -
+ (element(1,Before)*1000000000000
+ +element(2,Before)*1000000+element(3,Before)),
+ E div 1000.
+
+do_times(t, I, F) ->
+ do_trans_times(I, F);
+do_times(dirty, I, F) ->
+ do_dirty(I, F).
+
+do_trans_times(I, F) when I /= 0 ->
+ {atomic, _} = mnesia:transaction(F),
+ do_trans_times(I-1, F);
+do_trans_times(_,_) -> ok.
+
+do_dirty(I, F) when I /= 0 ->
+ F(),
+ do_dirty(I-1, F);
+do_dirty(_,_) -> ok.
+
+
+
+table_load([N1,N2| _ ] = Ns) ->
+ Nodes = [N1,N2],
+ rpc:multicall(Ns, mnesia, lkill, []),
+ ok = mnesia:delete_schema(Ns),
+ ok = mnesia:create_schema(Nodes),
+ rpc:multicall(Nodes, mnesia, start, []),
+ TabDef = [{disc_copies,[N1]},{ram_copies,[N2]},
+ {attributes,record_info(fields,item)},{record_name,item}],
+ Tabs = [list_to_atom("tab" ++ integer_to_list(I)) || I <- lists:seq(1,400)],
+
+ [mnesia:create_table(Tab,TabDef) || Tab <- Tabs],
+
+%% InitTab = fun(Tab) ->
+%% mnesia:write_lock_table(Tab),
+%% InitRec = fun(Key) -> mnesia:write(Tab,#item{a=Key},write) end,
+%% lists:foreach(InitRec, lists:seq(1,100))
+%% end,
+%%
+%% {Time,{atomic,ok}} = timer:tc(mnesia,transaction, [fun() ->lists:foreach(InitTab, Tabs) end]),
+ mnesia:dump_log(),
+%% io:format("Init took ~p msec ~n", [Time/1000]),
+ rpc:call(N2, mnesia, stop, []), timer:sleep(1000),
+ mnesia:stop(), timer:sleep(500),
+ %% Warmup
+ ok = mnesia:start([{no_table_loaders, 1}]),
+ timer:tc(mnesia, wait_for_tables, [Tabs, infinity]),
+ mnesia:dump_log(),
+ rpc:call(N2, mnesia, dump_log, []),
+ io:format("Initialized ~n",[]),
+
+ mnesia:stop(), timer:sleep(1000),
+ ok = mnesia:start([{no_table_loaders, 1}]),
+ {T1, ok} = timer:tc(mnesia, wait_for_tables, [Tabs, infinity]),
+ io:format("Loading from disc with 1 loader ~p msec~n",[T1/1000]),
+ mnesia:stop(), timer:sleep(1000),
+ ok = mnesia:start([{no_table_loaders, 4}]),
+ {T2, ok} = timer:tc(mnesia, wait_for_tables, [Tabs, infinity]),
+ io:format("Loading from disc with 4 loader ~p msec~n",[T2/1000]),
+
+ %% Warmup
+ rpc:call(N2, ?MODULE, remote_load, [Tabs,4]),
+ io:format("Initialized ~n",[]),
+
+
+ T3 = rpc:call(N2, ?MODULE, remote_load, [Tabs,1]),
+ io:format("Loading from net with 1 loader ~p msec~n",[T3/1000]),
+
+ T4 = rpc:call(N2, ?MODULE, remote_load, [Tabs,4]),
+ io:format("Loading from net with 4 loader ~p msec~n",[T4/1000]),
+
+ ok.
+
+remote_load(Tabs,Loaders) ->
+ ok = mnesia:start([{no_table_loaders, Loaders}]),
+%% io:format("~p ~n", [mnesia_controller:get_info(500)]),
+ {Time, ok} = timer:tc(mnesia, wait_for_tables, [Tabs, infinity]),
+ timer:sleep(1000), mnesia:stop(), timer:sleep(1000),
+ Time.
diff --git a/lib/mnesia/test/mnesia_dbn_meters.erl b/lib/mnesia/test/mnesia_dbn_meters.erl
new file mode 100644
index 0000000000..feaf90ee75
--- /dev/null
+++ b/lib/mnesia/test/mnesia_dbn_meters.erl
@@ -0,0 +1,242 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(mnesia_dbn_meters).
+-export([
+ start/0,
+ local_start/0,
+ distr_start/1,
+ start/3
+ ]).
+
+-record(simple,{key,val=0}).
+-define(key,1).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Configuration and start
+
+start() ->
+ local_start(),
+ distr_start(nodes()).
+
+local_start() ->
+ start(one_ram_only,[node()],some_meters()),
+ start(one_disc_only,[node()],some_meters()).
+
+distr_start([]) ->
+ local_only;
+distr_start(OtherNodes) when is_list(OtherNodes) ->
+ start(ram_and_ram,[node()|OtherNodes],some_meters()),
+ start(disc_and_disc,[node()|OtherNodes],some_meters()).
+
+start(Config,Nodes,Meters) ->
+ Attrs = record_info(fields,simple),
+ Schema = [{name,simple},{type,set},{attributes,Attrs}] ++ config(Config,Nodes),
+ L = '====================',
+ io:format("~n~p dbn_meters: ~p ~p~nSchema = ~p.~n~n",[L,Config,L,Schema]),
+ ok = mnesia:delete_schema(Nodes),
+ ok = mnesia:create_schema(Nodes),
+ rpc:multicall(Nodes, mnesia, start, []),
+ {atomic,_} = mnesia:create_table(Schema),
+ lists:foreach(fun report_meter/1,Meters),
+ {atomic, ok} = mnesia:delete_table(simple),
+ rpc:multicall(Nodes, mnesia, stop, []),
+ ok.
+
+config(one_ram_only,[Single|_]) ->
+ [{ram_copies,[Single]}];
+config(ram_and_ram,[Master|[Slave|_]]) ->
+ [{ram_copies,[Master,Slave]}];
+config(one_disc_only,[Single|_]) ->
+ [{disc_copies,[Single]}];
+config(disc_and_disc,[Master|[Slave|_]]) ->
+ [{disc_copies,[Master,Slave]}];
+config(Config,Nodes) ->
+ io:format("<ERROR> Config ~p not supported or too few nodes ~p given~n",[Config,Nodes]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% The various DBN meters
+some_meters() ->
+ [create,
+ open_safe_read,
+ open_dirty_read,
+ get_int,
+ open_update,
+ put_int,
+ put_int_and_copy,
+ dirty_put_int_and_copy,
+ start_trans,
+ commit_one_update,
+ delete,
+ dirty_delete
+ ].
+
+report_meter(Meter) ->
+ Times = 100,
+ Micros = repeat_meter(Meter,{atomic,{0,ignore}},Times) div Times,
+ io:format("\t~-30w ~-10w micro seconds (mean of ~p repetitions)~n",[Meter,Micros,Times]).
+
+repeat_meter(_Meter,{atomic,{Micros,_Result}},0) ->
+ Micros;
+repeat_meter(Meter,{atomic,{Micros,_Result}},Times) when Times > 0 ->
+ repeat_meter(Meter,catch meter(Meter),Times-1) + Micros;
+repeat_meter(Meter,{aborted,Reason},Times) when Times > 0 ->
+ io:format("<ERROR>\t~-20w\t,aborted, because ~p~n",[Meter,Reason]),
+ 0;
+repeat_meter(Meter,{'EXIT',Reason},Times) when Times > 0 ->
+ io:format("<ERROR>\t~-20w\tcrashed, because ~p~n",[Meter,Reason]),
+ 0.
+
+meter(create) ->
+ Key = 1,
+ mnesia:transaction(fun() -> mnesia:delete({simple,Key}) end),
+ Fun = fun() ->
+ BeforeT = erlang:now(),
+ R = mnesia:write(#simple{key=Key}),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(open_safe_read) ->
+ Key = 2,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ BeforeT = erlang:now(),
+ R = mnesia:read({simple,Key}),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(open_dirty_read) ->
+ Key = 21,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ BeforeT = erlang:now(),
+ R = mnesia:dirty_read({simple,Key}),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(get_int) ->
+ Key = 3,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ [Simple] = mnesia:read({simple,Key}),
+ BeforeT = erlang:now(),
+ Int = Simple#simple.val,
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,Int)
+ end,
+ mnesia:transaction(Fun);
+
+meter(open_update) ->
+ Key = 3,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ BeforeT = erlang:now(),
+ R = mnesia:wread({simple,Key}),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(put_int) ->
+ Key = 4,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ [Simple] = mnesia:wread({simple,Key}),
+ BeforeT = erlang:now(),
+ R = Simple#simple{val=7},
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(put_int_and_copy) ->
+ Key = 5,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ [Simple] = mnesia:wread({simple,Key}),
+ BeforeT = erlang:now(),
+ Simple2 = Simple#simple{val=17},
+ R = mnesia:write(Simple2),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(dirty_put_int_and_copy) ->
+ Key = 55,
+ mnesia:dirty_write(#simple{key=Key}),
+ [Simple] = mnesia:dirty_read({simple,Key}),
+ BeforeT = erlang:now(),
+ Simple2 = Simple#simple{val=17},
+ R = mnesia:dirty_write(Simple2),
+ AfterT = erlang:now(),
+ {atomic,elapsed_time(BeforeT,AfterT,R)};
+
+meter(start_trans) ->
+ BeforeT = erlang:now(),
+ {atomic,AfterT} = mnesia:transaction(fun() -> erlang:now() end),
+ {atomic,elapsed_time(BeforeT,AfterT,ok)};
+
+meter(commit_one_update) ->
+ Key = 6,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ [Simple] = mnesia:wread({simple,Key}),
+ Simple2 = Simple#simple{val=27},
+ _R = mnesia:write(Simple2),
+ erlang:now()
+ end,
+ {atomic,BeforeT} = mnesia:transaction(Fun),
+ AfterT = erlang:now(),
+ {atomic,elapsed_time(BeforeT,AfterT,ok)};
+
+meter(delete) ->
+ Key = 7,
+ mnesia:transaction(fun() -> mnesia:write(#simple{key=Key}) end),
+ Fun = fun() ->
+ BeforeT = erlang:now(),
+ R = mnesia:delete({simple,Key}),
+ AfterT = erlang:now(),
+ elapsed_time(BeforeT,AfterT,R)
+ end,
+ mnesia:transaction(Fun);
+
+meter(dirty_delete) ->
+ Key = 75,
+ mnesia:dirty_write(#simple{key=Key}),
+ BeforeT = erlang:now(),
+ R = mnesia:dirty_delete({simple,Key}),
+ AfterT = erlang:now(),
+ {atomic, elapsed_time(BeforeT,AfterT,R)}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Calculate the elapsed time
+elapsed_time(BeforeT,AfterT,Result) ->
+ {(element(1,AfterT)*1000000000000
+ +element(2,AfterT)*1000000+element(3,AfterT)) -
+ (element(1,BeforeT)*1000000000000
+ +element(2,BeforeT)*1000000+element(3,BeforeT)),Result}.
diff --git a/lib/mnesia/test/mnesia_dirty_access_test.erl b/lib/mnesia/test/mnesia_dirty_access_test.erl
new file mode 100644
index 0000000000..5f9f2a9733
--- /dev/null
+++ b/lib/mnesia/test/mnesia_dirty_access_test.erl
@@ -0,0 +1,927 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_dirty_access_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Evil dirty access, regardless of transaction scope.",
+ "Invoke all functions in the API and try to cover all legal uses",
+ "cases as well the illegal dito. This is a complement to the",
+ "other more explicit test cases."];
+all(suite) ->
+ [
+ dirty_write,
+ dirty_read,
+ dirty_update_counter,
+ dirty_delete,
+ dirty_delete_object,
+ dirty_match_object,
+ dirty_index,
+ dirty_iter,
+ admin_tests
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Write records dirty
+
+dirty_write(suite) ->
+ [
+ dirty_write_ram,
+ dirty_write_disc,
+ dirty_write_disc_only
+ ].
+
+dirty_write_ram(suite) -> [];
+dirty_write_ram(Config) when is_list(Config) ->
+ dirty_write(Config, ram_copies).
+
+dirty_write_disc(suite) -> [];
+dirty_write_disc(Config) when is_list(Config) ->
+ dirty_write(Config, disc_copies).
+
+dirty_write_disc_only(suite) -> [];
+dirty_write_disc_only(Config) when is_list(Config) ->
+ dirty_write(Config, disc_only_copies).
+
+dirty_write(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_write,
+ Def = [{attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match({'EXIT', _}, mnesia:dirty_write([])),
+ ?match({'EXIT', _}, mnesia:dirty_write({Tab, 2})),
+ ?match({'EXIT', _}, mnesia:dirty_write({foo, 2})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() ->
+ mnesia:dirty_write({Tab, 1, 2}) end)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Read records dirty
+
+dirty_read(suite) ->
+ [
+ dirty_read_ram,
+ dirty_read_disc,
+ dirty_read_disc_only
+ ].
+
+dirty_read_ram(suite) -> [];
+dirty_read_ram(Config) when is_list(Config) ->
+ dirty_read(Config, ram_copies).
+
+dirty_read_disc(suite) -> [];
+dirty_read_disc(Config) when is_list(Config) ->
+ dirty_read(Config, disc_copies).
+
+dirty_read_disc_only(suite) -> [];
+dirty_read_disc_only(Config) when is_list(Config) ->
+ dirty_read(Config, disc_only_copies).
+
+dirty_read(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_read,
+ Def = [{type, bag}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match({'EXIT', _}, mnesia:dirty_read([])),
+ ?match({'EXIT', _}, mnesia:dirty_read({Tab})),
+ ?match({'EXIT', _}, mnesia:dirty_read({Tab, 1, 2})),
+ ?match([], mnesia:dirty_read({Tab, 1})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+ ?match([{Tab, 1, 2}], mnesia:dirty_read({Tab, 1})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 3})),
+ ?match([{Tab, 1, 2}, {Tab, 1, 3}], mnesia:dirty_read({Tab, 1})),
+
+ ?match({atomic, [{Tab, 1, 2}, {Tab, 1, 3}]},
+ mnesia:transaction(fun() -> mnesia:dirty_read({Tab, 1}) end)),
+
+ ?match(false, mnesia:async_dirty(fun() -> mnesia:is_transaction() end)),
+ ?match(false, mnesia:sync_dirty(fun() -> mnesia:is_transaction() end)),
+ ?match(false, mnesia:ets(fun() -> mnesia:is_transaction() end)),
+ ?match(false, mnesia:activity(async_dirty, fun() -> mnesia:is_transaction() end)),
+ ?match(false, mnesia:activity(sync_dirty, fun() -> mnesia:is_transaction() end)),
+ ?match(false, mnesia:activity(ets, fun() -> mnesia:is_transaction() end)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Update counter record dirty
+
+dirty_update_counter(suite) ->
+ [
+ dirty_update_counter_ram,
+ dirty_update_counter_disc,
+ dirty_update_counter_disc_only
+ ].
+
+dirty_update_counter_ram(suite) -> [];
+dirty_update_counter_ram(Config) when is_list(Config) ->
+ dirty_update_counter(Config, ram_copies).
+
+dirty_update_counter_disc(suite) -> [];
+dirty_update_counter_disc(Config) when is_list(Config) ->
+ dirty_update_counter(Config, disc_copies).
+
+dirty_update_counter_disc_only(suite) -> [];
+dirty_update_counter_disc_only(Config) when is_list(Config) ->
+ dirty_update_counter(Config, disc_only_copies).
+
+dirty_update_counter(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_update_counter,
+ Def = [{attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+
+ ?match({'EXIT', _}, mnesia:dirty_update_counter({Tab, 1}, [])),
+ ?match({'EXIT', _}, mnesia:dirty_update_counter({Tab}, 3)),
+ ?match({'EXIT', _}, mnesia:dirty_update_counter({foo, 1}, 3)),
+ ?match(5, mnesia:dirty_update_counter({Tab, 1}, 3)),
+ ?match([{Tab, 1, 5}], mnesia:dirty_read({Tab, 1})),
+
+ ?match({atomic, 8}, mnesia:transaction(fun() ->
+ mnesia:dirty_update_counter({Tab, 1}, 3) end)),
+
+ ?match(1, mnesia:dirty_update_counter({Tab, foo}, 1)),
+ ?match([{Tab, foo,1}], mnesia:dirty_read({Tab,foo})),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Delete record dirty
+
+dirty_delete(suite) ->
+ [
+ dirty_delete_ram,
+ dirty_delete_disc,
+ dirty_delete_disc_only
+ ].
+
+dirty_delete_ram(suite) -> [];
+dirty_delete_ram(Config) when is_list(Config) ->
+ dirty_delete(Config, ram_copies).
+
+dirty_delete_disc(suite) -> [];
+dirty_delete_disc(Config) when is_list(Config) ->
+ dirty_delete(Config, disc_copies).
+
+dirty_delete_disc_only(suite) -> [];
+dirty_delete_disc_only(Config) when is_list(Config) ->
+ dirty_delete(Config, disc_only_copies).
+
+dirty_delete(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_delete,
+ Def = [{type, bag}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match({'EXIT', _}, mnesia:dirty_delete([])),
+ ?match({'EXIT', _}, mnesia:dirty_delete({Tab})),
+ ?match({'EXIT', _}, mnesia:dirty_delete({Tab, 1, 2})),
+ ?match(ok, mnesia:dirty_delete({Tab, 1})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+ ?match(ok, mnesia:dirty_delete({Tab, 1})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+ ?match(ok, mnesia:dirty_delete({Tab, 1})),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 2})),
+ ?match({atomic, ok}, mnesia:transaction(fun() ->
+ mnesia:dirty_delete({Tab, 1}) end)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Delete matching record dirty
+
+dirty_delete_object(suite) ->
+ [
+ dirty_delete_object_ram,
+ dirty_delete_object_disc,
+ dirty_delete_object_disc_only
+ ].
+
+dirty_delete_object_ram(suite) -> [];
+dirty_delete_object_ram(Config) when is_list(Config) ->
+ dirty_delete_object(Config, ram_copies).
+
+dirty_delete_object_disc(suite) -> [];
+dirty_delete_object_disc(Config) when is_list(Config) ->
+ dirty_delete_object(Config, disc_copies).
+
+dirty_delete_object_disc_only(suite) -> [];
+dirty_delete_object_disc_only(Config) when is_list(Config) ->
+ dirty_delete_object(Config, disc_only_copies).
+
+dirty_delete_object(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_delete_object,
+ Def = [{type, bag}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ OneRec = {Tab, 1, 2},
+ ?match({'EXIT', _}, mnesia:dirty_delete_object([])),
+ ?match({'EXIT', _}, mnesia:dirty_delete_object({Tab})),
+ ?match({'EXIT', _}, mnesia:dirty_delete_object({Tab, 1})),
+ ?match(ok, mnesia:dirty_delete_object(OneRec)),
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match(ok, mnesia:dirty_delete_object(OneRec)),
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match(ok, mnesia:dirty_delete_object(OneRec)),
+
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match({atomic, ok}, mnesia:transaction(fun() ->
+ mnesia:dirty_delete_object(OneRec) end)),
+
+ ?match({'EXIT', {aborted, {bad_type, Tab, _}}}, mnesia:dirty_delete_object(Tab, {Tab, {['_']}, 21})),
+ ?match({'EXIT', {aborted, {bad_type, Tab, _}}}, mnesia:dirty_delete_object(Tab, {Tab, {['$5']}, 21})),
+
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Read matching records dirty
+
+dirty_match_object(suite) ->
+ [
+ dirty_match_object_ram,
+ dirty_match_object_disc,
+ dirty_match_object_disc_only
+ ].
+
+dirty_match_object_ram(suite) -> [];
+dirty_match_object_ram(Config) when is_list(Config) ->
+ dirty_match_object(Config, ram_copies).
+
+dirty_match_object_disc(suite) -> [];
+dirty_match_object_disc(Config) when is_list(Config) ->
+ dirty_match_object(Config, disc_copies).
+
+dirty_match_object_disc_only(suite) -> [];
+dirty_match_object_disc_only(Config) when is_list(Config) ->
+ dirty_match_object(Config, disc_only_copies).
+
+dirty_match_object(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_match,
+ Def = [{attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ OneRec = {Tab, 1, 2},
+ OnePat = {Tab, '$1', 2},
+ ?match([], mnesia:dirty_match_object(OnePat)),
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match([OneRec], mnesia:dirty_match_object(OnePat)),
+ ?match({atomic, [OneRec]}, mnesia:transaction(fun() ->
+ mnesia:dirty_match_object(OnePat) end)),
+
+ ?match({'EXIT', _}, mnesia:dirty_match_object({foo, '$1', 2})),
+ ?match({'EXIT', _}, mnesia:dirty_match_object({[], '$1', 2})),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+dirty_index(suite) ->
+ [
+ dirty_index_match_object,
+ dirty_index_read,
+ dirty_index_update
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Dirty read matching records by using an index
+
+dirty_index_match_object(suite) ->
+ [
+ dirty_index_match_object_ram,
+ dirty_index_match_object_disc,
+ dirty_index_match_object_disc_only
+ ].
+
+dirty_index_match_object_ram(suite) -> [];
+dirty_index_match_object_ram(Config) when is_list(Config) ->
+ dirty_index_match_object(Config, ram_copies).
+
+dirty_index_match_object_disc(suite) -> [];
+dirty_index_match_object_disc(Config) when is_list(Config) ->
+ dirty_index_match_object(Config, disc_copies).
+
+dirty_index_match_object_disc_only(suite) -> [];
+dirty_index_match_object_disc_only(Config) when is_list(Config) ->
+ dirty_index_match_object(Config, disc_only_copies).
+
+dirty_index_match_object(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_index_match_object,
+ ValPos = 3,
+ BadValPos = ValPos + 1,
+ Def = [{attributes, [k, v]}, {Storage, [Node1]}, {index, [ValPos]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match([], mnesia:dirty_index_match_object({Tab, '$1', 2}, ValPos)),
+ OneRec = {Tab, 1, 2},
+ ?match(ok, mnesia:dirty_write(OneRec)),
+
+ ?match([OneRec], mnesia:dirty_index_match_object({Tab, '$1', 2}, ValPos)),
+ ?match({'EXIT', _}, mnesia:dirty_index_match_object({Tab, '$1', 2}, BadValPos)),
+ ?match({'EXIT', _}, mnesia:dirty_index_match_object({foo, '$1', 2}, ValPos)),
+ ?match({'EXIT', _}, mnesia:dirty_index_match_object({[], '$1', 2}, ValPos)),
+ ?match({atomic, [OneRec]}, mnesia:transaction(fun() ->
+ mnesia:dirty_index_match_object({Tab, '$1', 2}, ValPos) end)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Read records by using an index
+
+dirty_index_read(suite) ->
+ [
+ dirty_index_read_ram,
+ dirty_index_read_disc,
+ dirty_index_read_disc_only
+ ].
+
+dirty_index_read_ram(suite) -> [];
+dirty_index_read_ram(Config) when is_list(Config) ->
+ dirty_index_read(Config, ram_copies).
+
+dirty_index_read_disc(suite) -> [];
+dirty_index_read_disc(Config) when is_list(Config) ->
+ dirty_index_read(Config, disc_copies).
+
+dirty_index_read_disc_only(suite) -> [];
+dirty_index_read_disc_only(Config) when is_list(Config) ->
+ dirty_index_read(Config, disc_only_copies).
+
+dirty_index_read(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_index_read,
+ ValPos = 3,
+ BadValPos = ValPos + 1,
+ Def = [{type, set},
+ {attributes, [k, v]},
+ {Storage, [Node1]},
+ {index, [ValPos]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ OneRec = {Tab, 1, 2},
+ ?match([], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match(ok, mnesia:dirty_write(OneRec)),
+ ?match([OneRec], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:dirty_index_read(Tab, 2, ValPos) end)),
+ ?match(42, mnesia:dirty_update_counter({Tab, 1}, 40)),
+ ?match([{Tab,1,42}], mnesia:dirty_read({Tab, 1})),
+ ?match([], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match([{Tab, 1, 42}], mnesia:dirty_index_read(Tab, 42, ValPos)),
+
+ ?match({'EXIT', _}, mnesia:dirty_index_read(Tab, 2, BadValPos)),
+ ?match({'EXIT', _}, mnesia:dirty_index_read(foo, 2, ValPos)),
+ ?match({'EXIT', _}, mnesia:dirty_index_read([], 2, ValPos)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+dirty_index_update(suite) ->
+ [
+ dirty_index_update_set_ram,
+ dirty_index_update_set_disc,
+ dirty_index_update_set_disc_only,
+ dirty_index_update_bag_ram,
+ dirty_index_update_bag_disc,
+ dirty_index_update_bag_disc_only
+ ];
+dirty_index_update(doc) ->
+ ["See Ticket OTP-2083, verifies that a table with a index is "
+ "update in the correct way i.e. the index finds the correct "
+ "records after a update"].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dirty_index_update_set_ram(suite) -> [];
+dirty_index_update_set_ram(Config) when is_list(Config) ->
+ dirty_index_update_set(Config, ram_copies).
+
+dirty_index_update_set_disc(suite) -> [];
+dirty_index_update_set_disc(Config) when is_list(Config) ->
+ dirty_index_update_set(Config, disc_copies).
+
+dirty_index_update_set_disc_only(suite) -> [];
+dirty_index_update_set_disc_only(Config) when is_list(Config) ->
+ dirty_index_update_set(Config, disc_only_copies).
+
+dirty_index_update_set(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = index_test,
+ ValPos = v1,
+ ValPos2 = v3,
+ Def = [{attributes, [k, v1, v2, v3]},
+ {Storage, [Node1]},
+ {index, [ValPos]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ Pat1 = {Tab, '$1', 2, '$2', '$3'},
+ Pat2 = {Tab, '$1', '$2', '$3', '$4'},
+
+ Rec1 = {Tab, 1, 2, 3, 4},
+ Rec2 = {Tab, 2, 2, 13, 14},
+ Rec3 = {Tab, 1, 12, 13, 14},
+ Rec4 = {Tab, 4, 2, 13, 14},
+
+ ?match([], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match(ok, mnesia:dirty_write(Rec1)),
+ ?match([Rec1], mnesia:dirty_index_read(Tab, 2, ValPos)),
+
+ ?match(ok, mnesia:dirty_write(Rec2)),
+ R1 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2], lists:sort(R1)),
+
+ ?match(ok, mnesia:dirty_write(Rec3)),
+ R2 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec2], lists:sort(R2)),
+ ?match([Rec2], mnesia:dirty_index_match_object(Pat1, ValPos)),
+
+ {atomic, R3} = mnesia:transaction(fun() -> mnesia:match_object(Pat2) end),
+ ?match([Rec3, Rec2], lists:sort(R3)),
+
+ ?match(ok, mnesia:dirty_write(Rec4)),
+ R4 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec2, Rec4], lists:sort(R4)),
+
+ ?match(ok, mnesia:dirty_delete({Tab, 4})),
+ ?match([Rec2], mnesia:dirty_index_read(Tab, 2, ValPos)),
+
+ ?match({atomic, ok}, mnesia:del_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos2)),
+
+ R5 = mnesia:dirty_match_object(Pat2),
+ ?match([Rec3, Rec2, Rec4], lists:sort(R5)),
+
+ R6 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec2, Rec4], lists:sort(R6)),
+ ?match([], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ R7 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec3, Rec2, Rec4], lists:sort(R7)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ R8 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R8)),
+ ?match([Rec1], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ R9 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec2, Rec4], lists:sort(R9)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec2) end)),
+ R10 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec4], lists:sort(R10)),
+ ?match([Rec1], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ ?match([Rec4], mnesia:dirty_index_read(Tab, 14, ValPos2)),
+
+ ?match(ok, mnesia:dirty_delete({Tab, 4})),
+ R11 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1], lists:sort(R11)),
+ ?match([Rec1], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ ?match([], mnesia:dirty_index_read(Tab, 14, ValPos2)),
+
+ ?verify_mnesia(Nodes, []).
+
+dirty_index_update_bag_ram(suite) -> [];
+dirty_index_update_bag_ram(Config)when is_list(Config) ->
+ dirty_index_update_bag(Config, ram_copies).
+
+dirty_index_update_bag_disc(suite) -> [];
+dirty_index_update_bag_disc(Config)when is_list(Config) ->
+ dirty_index_update_bag(Config, disc_copies).
+
+dirty_index_update_bag_disc_only(suite) -> [];
+dirty_index_update_bag_disc_only(Config)when is_list(Config) ->
+ dirty_index_update_bag(Config, disc_only_copies).
+
+dirty_index_update_bag(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = index_test,
+ ValPos = v1,
+ ValPos2 = v3,
+ Def = [{type, bag},
+ {attributes, [k, v1, v2, v3]},
+ {Storage, [Node1]},
+ {index, [ValPos]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ Pat1 = {Tab, '$1', 2, '$2', '$3'},
+ Pat2 = {Tab, '$1', '$2', '$3', '$4'},
+
+ Rec1 = {Tab, 1, 2, 3, 4},
+ Rec2 = {Tab, 2, 2, 13, 14},
+ Rec3 = {Tab, 1, 12, 13, 14},
+ Rec4 = {Tab, 4, 2, 13, 4},
+ Rec5 = {Tab, 1, 2, 234, 14},
+
+ %% Simple Index
+ ?match([], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match(ok, mnesia:dirty_write(Rec1)),
+ ?match([Rec1], mnesia:dirty_index_read(Tab, 2, ValPos)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec2) end)),
+ R1 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2], lists:sort(R1)),
+
+ ?match(ok, mnesia:dirty_write(Rec3)),
+ R2 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2], lists:sort(R2)),
+
+ R3 = mnesia:dirty_index_match_object(Pat1, ValPos),
+ ?match([Rec1, Rec2], lists:sort(R3)),
+
+ R4 = mnesia:dirty_match_object(Pat2),
+ ?match([Rec1, Rec3, Rec2], lists:sort(R4)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ R5 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R5)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete({Tab, 4}) end)),
+ R6 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2], lists:sort(R6)),
+
+ ?match(ok, mnesia:dirty_delete_object(Rec1)),
+ ?match([Rec2], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ R7 = mnesia:dirty_match_object(Pat2),
+ ?match([Rec3, Rec2], lists:sort(R7)),
+
+ %% Two indexies
+ ?match({atomic, ok}, mnesia:del_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos2)),
+
+ R8 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R8)),
+
+ R9 = mnesia:dirty_index_read(Tab, 4, ValPos2),
+ ?match([Rec1, Rec4], lists:sort(R9)),
+ R10 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec3, Rec2], lists:sort(R10)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec5) end)),
+ R11 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec1, Rec5, Rec2, Rec4], lists:sort(R11)),
+ R12 = mnesia:dirty_index_read(Tab, 4, ValPos2),
+ ?match([Rec1, Rec4], lists:sort(R12)),
+ R13 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec5, Rec3, Rec2], lists:sort(R13)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec1) end)),
+ R14 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec5, Rec2, Rec4], lists:sort(R14)),
+ ?match([Rec4], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ R15 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec5, Rec3, Rec2], lists:sort(R15)),
+
+ ?match(ok, mnesia:dirty_delete_object(Rec5)),
+ R16 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec2, Rec4], lists:sort(R16)),
+ ?match([Rec4], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ R17 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec3, Rec2], lists:sort(R17)),
+
+ ?match(ok, mnesia:dirty_write(Rec1)),
+ ?match(ok, mnesia:dirty_delete({Tab, 1})),
+ R18 = mnesia:dirty_index_read(Tab, 2, ValPos),
+ ?match([Rec2, Rec4], lists:sort(R18)),
+ ?match([Rec4], mnesia:dirty_index_read(Tab, 4, ValPos2)),
+ R19 = mnesia:dirty_index_read(Tab, 14, ValPos2),
+ ?match([Rec2], lists:sort(R19)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Dirty iteration
+%% dirty_slot, dirty_first, dirty_next
+
+dirty_iter(suite) ->
+ [
+ dirty_iter_ram,
+ dirty_iter_disc,
+ dirty_iter_disc_only
+ ].
+
+dirty_iter_ram(suite) -> [];
+dirty_iter_ram(Config) when is_list(Config) ->
+ dirty_iter(Config, ram_copies).
+
+dirty_iter_disc(suite) -> [];
+dirty_iter_disc(Config) when is_list(Config) ->
+ dirty_iter(Config, disc_copies).
+
+dirty_iter_disc_only(suite) -> [];
+dirty_iter_disc_only(Config) when is_list(Config) ->
+ dirty_iter(Config, disc_only_copies).
+
+dirty_iter(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = dirty_iter,
+ Def = [{type, bag}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match([], all_slots(Tab)),
+ ?match([], all_nexts(Tab)),
+
+ Keys = lists:seq(1, 5),
+ Records = [{Tab, A, B} || A <- Keys, B <- lists:seq(1, 2)],
+ lists:foreach(fun(Rec) -> ?match(ok, mnesia:dirty_write(Rec)) end, Records),
+
+ SortedRecords = lists:sort(Records),
+ ?match(SortedRecords, lists:sort(all_slots(Tab))),
+ ?match(Keys, lists:sort(all_nexts(Tab))),
+
+ ?match({'EXIT', _}, mnesia:dirty_first(foo)),
+ ?match({'EXIT', _}, mnesia:dirty_next(foo, foo)),
+ ?match({'EXIT', _}, mnesia:dirty_slot(foo, 0)),
+ ?match({'EXIT', _}, mnesia:dirty_slot(foo, [])),
+ ?match({atomic, Keys},
+ mnesia:transaction(fun() -> lists:sort(all_nexts(Tab)) end)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Returns a list of all keys in table
+all_slots(Tab) ->
+ all_slots(Tab, [], 0).
+
+all_slots(_Tab, '$end_of_table', _) ->
+ [];
+all_slots(Tab, PrevRecords, PrevSlot) ->
+ Records = mnesia:dirty_slot(Tab, PrevSlot),
+ PrevRecords ++ all_slots(Tab, Records, PrevSlot + 1).
+
+%% Returns a list of all keys in table
+
+all_nexts(Tab) ->
+ FirstKey = mnesia:dirty_first(Tab),
+ all_nexts(Tab, FirstKey).
+
+all_nexts(_Tab, '$end_of_table') ->
+ [];
+all_nexts(Tab, PrevKey) ->
+ Key = mnesia:dirty_next(Tab, PrevKey),
+ [PrevKey] ++ all_nexts(Tab, Key).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+admin_tests(doc) ->
+ ["Verifies that dirty operations work during schema operations"];
+
+admin_tests(suite) ->
+ [del_table_copy_1,
+ del_table_copy_2,
+ del_table_copy_3,
+ add_table_copy_1,
+ add_table_copy_2,
+ add_table_copy_3,
+ add_table_copy_4,
+ move_table_copy_1,
+ move_table_copy_2,
+ move_table_copy_3,
+ move_table_copy_4].
+
+update_trans(Tab, Key, Acc) ->
+ Update =
+ fun() ->
+ Res = (catch mnesia:read({Tab, Key})),
+ case Res of
+ [{Tab, Key, Extra, Acc}] ->
+ mnesia:write({Tab,Key,Extra, Acc+1});
+ Val ->
+ {read, Val, {acc, Acc}}
+ end
+ end,
+ receive
+ {Pid, quit} -> Pid ! {self(), Acc}
+ after
+ 3 ->
+ case catch mnesia:sync_dirty(Update) of
+ ok ->
+ update_trans(Tab, Key, Acc+1);
+ Else ->
+ ?error("Dirty Operation failed on ~p (update no ~p) with ~p~n"
+ "Info w2read ~p w2write ~p w2commit ~p storage ~p ~n",
+ [node(),
+ Acc,
+ Else,
+ mnesia:table_info(Tab, where_to_read),
+ mnesia:table_info(Tab, where_to_write),
+ mnesia:table_info(Tab, where_to_commit),
+ mnesia:table_info(Tab, storage_type)])
+ end
+ end.
+
+del_table_copy_1(suite) -> [];
+del_table_copy_1(Config) when is_list(Config) ->
+ [_Node1, Node2, _Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node2, Node2, Nodes). %Called on same Node as deleted
+del_table_copy_2(suite) -> [];
+del_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, _Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node1, Node2, Nodes). %Called from other Node
+del_table_copy_3(suite) -> [];
+del_table_copy_3(Config) when is_list(Config) ->
+ [_Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node3, Node2, Nodes). %Called from Node w.o. table
+
+del_table(CallFrom, DelNode, [Node1, Node2, Node3]) ->
+ Tab = schema_ops,
+ Def = [{disc_only_copies, [Node1]}, {ram_copies, [Node2]},
+ {attributes, [key, attr1, attr2]}],
+ ?log("Test case removing table from ~w, with ~w~n", [DelNode, Def]),
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 1000),
+
+ Pid1 = spawn_link(Node1, ?MODULE, update_trans, [Tab, 1, 0]),
+ Pid2 = spawn_link(Node2, ?MODULE, update_trans, [Tab, 2, 0]),
+ Pid3 = spawn_link(Node3, ?MODULE, update_trans, [Tab, 3, 0]),
+
+
+ dbg:tracer(process, {fun(Msg,_) -> tracer(Msg) end, void}),
+ %% dbg:n(Node2),
+ %% dbg:n(Node3),
+ %% dbg:tp('_', []),
+ %% dbg:tpl(dets, [timestamp]),
+ dbg:p(Pid1, [m,c,timestamp]),
+
+ ?match({atomic, ok},
+ rpc:call(CallFrom, mnesia, del_table_copy, [Tab, DelNode])),
+
+ Pid1 ! {self(), quit}, R1 =
+ receive {Pid1, Res1} -> Res1
+ after
+ 5000 -> io:format("~p~n",[process_info(Pid1)]),error
+ end,
+ Pid2 ! {self(), quit}, R2 =
+ receive {Pid2, Res2} -> Res2
+ after
+ 5000 -> error
+ end,
+ Pid3 ! {self(), quit}, R3 =
+ receive {Pid3, Res3} -> Res3
+ after
+ 5000 -> error
+ end,
+ verify_oids(Tab, Node1, Node2, Node3, R1, R2, R3),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+
+tracer({trace_ts, _, send, Msg, Pid, {_,S,Ms}}) ->
+ io:format("~p:~p ~p >> ~w ~n",[S,Ms,Pid,Msg]);
+tracer({trace_ts, _, 'receive', Msg, {_,S,Ms}}) ->
+ io:format("~p:~p << ~w ~n",[S,Ms,Msg]);
+
+
+tracer(Msg) ->
+ io:format("UMsg ~p ~n",[Msg]),
+ ok.
+
+
+
+add_table_copy_1(suite) -> [];
+add_table_copy_1(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node1, Node3, Nodes, Def).
+%% Not so much diff from 1 but I got a feeling of a bug
+%% should behave exactly the same but just checking the internal ordering
+add_table_copy_2(suite) -> [];
+add_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node2, Node3, Nodes, Def).
+add_table_copy_3(suite) -> [];
+add_table_copy_3(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node3, Node3, Nodes, Def).
+add_table_copy_4(suite) -> [];
+add_table_copy_4(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_only_copies, [Node1]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node2, Node3, Nodes, Def).
+
+add_table(CallFrom, AddNode, [Node1, Node2, Node3], Def) ->
+ ?log("Test case adding table at ~w, with ~w~n", [AddNode, Def]),
+ Tab = schema_ops,
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 1002),
+
+ Pid1 = spawn_link(Node1, ?MODULE, update_trans, [Tab, 1, 0]),
+ Pid2 = spawn_link(Node2, ?MODULE, update_trans, [Tab, 2, 0]),
+ Pid3 = spawn_link(Node3, ?MODULE, update_trans, [Tab, 3, 0]),
+
+ ?match({atomic, ok}, rpc:call(CallFrom, mnesia, add_table_copy,
+ [Tab, AddNode, ram_copies])),
+ Pid1 ! {self(), quit}, R1 = receive {Pid1, Res1} -> Res1 after 5000 -> error end,
+ Pid2 ! {self(), quit}, R2 = receive {Pid2, Res2} -> Res2 after 5000 -> error end,
+ Pid3 ! {self(), quit}, R3 = receive {Pid3, Res3} -> Res3 after 5000 -> error end,
+ verify_oids(Tab, Node1, Node2, Node3, R1, R2, R3),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+
+move_table_copy_1(suite) -> [];
+move_table_copy_1(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node1, Node1, Node3, Nodes, Def).
+move_table_copy_2(suite) -> [];
+move_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node2, Node1, Node3, Nodes, Def).
+move_table_copy_3(suite) -> [];
+move_table_copy_3(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node3, Node1, Node3, Nodes, Def).
+move_table_copy_4(suite) -> [];
+move_table_copy_4(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{ram_copies, [Node1]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node2, Node1, Node3, Nodes, Def).
+
+move_table(CallFrom, FromNode, ToNode, [Node1, Node2, Node3], Def) ->
+ ?log("Test case move table from ~w to ~w, with ~w~n", [FromNode, ToNode, Def]),
+ Tab = schema_ops,
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 1002),
+
+ Pid1 = spawn_link(Node1, ?MODULE, update_trans, [Tab, 1, 0]),
+ Pid2 = spawn_link(Node2, ?MODULE, update_trans, [Tab, 2, 0]),
+ Pid3 = spawn_link(Node3, ?MODULE, update_trans, [Tab, 3, 0]),
+
+ ?match({atomic, ok}, rpc:call(CallFrom, mnesia, move_table_copy,
+ [Tab, FromNode, ToNode])),
+ Pid1 ! {self(), quit},
+ R1 = receive {Pid1, Res1} -> Res1 after 5000 -> ?error("timeout pid1~n", []) end,
+ Pid2 ! {self(), quit},
+ R2 = receive {Pid2, Res2} -> Res2 after 5000 -> ?error("timeout pid2~n", []) end,
+ Pid3 ! {self(), quit},
+ R3 = receive {Pid3, Res3} -> Res3 after 5000 -> ?error("timeout pid3~n", []) end,
+ verify_oids(Tab, Node1, Node2, Node3, R1, R2, R3),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+
+% Verify consistency between different nodes
+% Due to limitations in the current dirty_ops this can wrong from time to time!
+verify_oids(Tab, N1, N2, N3, R1, R2, R3) ->
+ io:format("DEBUG 1=>~p 2=>~p 3=>~p~n", [R1,R2,R3]),
+ ?match([{_, _, _, R1}], rpc:call(N1, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{_, _, _, R1}], rpc:call(N2, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{_, _, _, R1}], rpc:call(N3, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{_, _, _, R2}], rpc:call(N1, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{_, _, _, R2}], rpc:call(N2, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{_, _, _, R2}], rpc:call(N3, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{_, _, _, R3}], rpc:call(N1, mnesia, dirty_read, [{Tab, 3}])),
+ ?match([{_, _, _, R3}], rpc:call(N2, mnesia, dirty_read, [{Tab, 3}])),
+ ?match([{_, _, _, R3}], rpc:call(N3, mnesia, dirty_read, [{Tab, 3}])).
+
+insert(_Tab, 0) -> ok;
+insert(Tab, N) when N > 0 ->
+ ok = mnesia:sync_dirty(fun() -> false = mnesia:is_transaction(), mnesia:write({Tab, N, N, 0}) end),
+ insert(Tab, N-1).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/mnesia/test/mnesia_durability_test.erl b/lib/mnesia/test/mnesia_durability_test.erl
new file mode 100644
index 0000000000..b917b0ca40
--- /dev/null
+++ b/lib/mnesia/test/mnesia_durability_test.erl
@@ -0,0 +1,1470 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_durability_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-record(test_rec,{key,val}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+all(doc) ->
+ ["Verify durability",
+ "Verify that the effects of committed transactions are durable.",
+ "The content of the tables tables must be restored at startup."];
+all(suite) ->
+ [
+ load_tables,
+ durability_of_dump_tables,
+ durability_of_disc_copies,
+ durability_of_disc_only_copies
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+load_tables(doc) ->
+ ["Try to provoke all kinds of table load scenarios."];
+load_tables(suite) ->
+ [
+ load_latest_data,
+ load_local_contents_directly,
+ load_directly_when_all_are_ram_copiesA,
+ load_directly_when_all_are_ram_copiesB,
+ late_load_when_all_are_ram_copies_on_ram_nodes,
+ load_when_last_replica_becomes_available,
+ load_when_we_have_down_from_all_other_replica_nodes,
+ late_load_transforms_into_disc_load,
+ late_load_leads_to_hanging,
+ force_load_when_nobody_intents_to_load,
+ force_load_when_someone_has_decided_to_load,
+ force_load_when_someone_else_already_has_loaded,
+ force_load_when_we_has_loaded,
+ force_load_on_a_non_local_table,
+ force_load_when_the_table_does_not_exist,
+ load_tables_with_master_tables
+ ].
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+load_latest_data(doc) ->
+ ["Base functionality, verify that the latest data is loaded"];
+load_latest_data(suite) -> [];
+load_latest_data(Config) when is_list(Config) ->
+ [N1,N2,N3] = Nodes = ?acquire_nodes(3, Config),
+ %%Create a replicated local table
+ ?match({atomic,ok}, mnesia:create_table(t0, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t1, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t2, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t3, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t4, [{disc_copies,[N1,N2]}])),
+ ?match({atomic,ok}, mnesia:create_table(t5, [{disc_copies,[N1,N2]}])),
+ Rec1 = {t1, test, ok},
+ Rec2 = {t1, test, 2},
+
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ ?match(ok, rpc:call(N2, mnesia, dirty_write, [Rec2])),
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+
+ ?match([], mnesia_test_lib:start_mnesia([N1], [])),
+ %% Should wait for N2
+ ?match({timeout, [t1]}, rpc:call(N1, mnesia, wait_for_tables, [[t1], 3000])),
+ ?match([], mnesia_test_lib:start_mnesia([N3], [])),
+ ?match({timeout, [t1]}, rpc:call(N1, mnesia, wait_for_tables, [[t1], 3000])),
+
+
+ ?match([], mnesia_test_lib:start_mnesia([N2], [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[t1], 3000])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[t1], 3000])),
+ %% We should find the record
+ ?match([Rec2], rpc:call(N1, mnesia, dirty_read, [t1, test])),
+ ?match([Rec2], rpc:call(N2, mnesia, dirty_read, [t1, test])),
+
+ %% ok, lets switch order
+ ?match(ok, mnesia:dirty_delete_object(Rec1)),
+ ?match(ok, mnesia:dirty_delete_object(Rec2)),
+ %% redo
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match(ok, mnesia:dirty_write(Rec1)),
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+
+ ?match([], mnesia_test_lib:start_mnesia([N2], [])),
+ %% Should wait for N1
+ ?match({timeout, [t1]}, rpc:call(N2, mnesia, wait_for_tables, [[t1], 2000])),
+ ?match([], mnesia_test_lib:start_mnesia([N3], [])),
+ ?match({timeout, [t1]}, rpc:call(N2, mnesia, wait_for_tables, [[t1], 2000])),
+ ?match([], mnesia_test_lib:start_mnesia([N1], [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[t1], 1000])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[t1], 1000])),
+ %% We should find the record
+ ?match([Rec1], rpc:call(N1, mnesia, dirty_read, [t1, test])),
+ ?match([Rec1], rpc:call(N2, mnesia, dirty_read, [t1, test])),
+
+ ?verify_mnesia(Nodes, []).
+
+
+load_local_contents_directly(doc) ->
+ ["Local contents shall always be loaded. Check this by having a local ",
+ "table on two nodes N1, N2, stopping N1 before N2, an then verifying ",
+ "that N1 can start without N2 being started."];
+load_local_contents_directly(suite) -> [];
+load_local_contents_directly(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ %%Create a replicated local table
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{local_content,true},
+ {disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ %%Verify that it has local contents.
+ ?match( true, mnesia:table_info(test_rec,local_content) ),
+ %%Helper Funs
+ Write_one = fun(Value) -> mnesia:write(#test_rec{key=1,val=Value}) end,
+ Read_one = fun(Key) -> mnesia:read( {test_rec, Key}) end,
+ %%Write a value one N1 that we may test against later
+ ?match({atomic,ok},
+ rpc:call( N1, mnesia, transaction, [Write_one,[11]] ) ),
+ %%Stop Mnesia on N1
+ %?match([], mnesia_test_lib:stop_mnesia([N1])),
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+
+ %%Write a value on N2, same key but a different value
+ ?match({atomic,ok},
+ rpc:call( N2, mnesia, transaction, [Write_one,[22]] ) ),
+ %%Stop Mnesia on N2
+ %?match([], mnesia_test_lib:stop_mnesia([N2])),
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+
+ %%Restart Mnesia on N1 verify that we can read from it without
+ %%starting Mnesia on N2.
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[test_rec], 30000])),
+ %%Read back the value
+ ?match( {atomic,[#test_rec{key=1,val=11}]},
+ rpc:call(N1, mnesia, transaction, [Read_one,[1]] ) ),
+ %%Restart Mnesia on N2 and verify the contents there.
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[test_rec], 30000])),
+ ?match( {atomic,[#test_rec{key=1,val=22}]},
+ rpc:call(N2, mnesia, transaction, [Read_one,[1]] ) ),
+ %%Check that the start of Mnesai on N2 did not affect the contents on N1
+ ?match( {atomic,[#test_rec{key=1,val=11}]},
+ rpc:call(N1, mnesia, transaction, [Read_one,[1]] ) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+load_directly_when_all_are_ram_copiesA(doc) ->
+ ["Tables that are RAM copies only shall also be loaded directly. ",
+ "1. N1 and N2 has RAM copies of a table, stop N1 before N2. ",
+ "2. When N1 starts he shall have access to the table ",
+ " without having to start N2" ];
+load_directly_when_all_are_ram_copiesA(suite) -> [];
+load_directly_when_all_are_ram_copiesA(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{ram_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ ?match( Nodes, mnesia:table_info(test_rec,ram_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_only_copies) ),
+ Write_one = fun(Value) -> mnesia:write(#test_rec{key=2,val=Value}) end,
+ Read_one = fun() -> mnesia:read({test_rec,2}) end,
+ %%Write a value one N1 that we may test against later
+ ?match({atomic,ok},
+ rpc:call( N1, mnesia, transaction, [Write_one,[11]] ) ),
+ %%Stop Mnesia on N1
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ %%Write a value and check result (on N2; not possible on N1
+ %%since Mnesia is stopped there).
+ ?match({atomic,ok}, rpc:call(N2,mnesia,transaction,[Write_one,[22]]) ),
+ ?match({atomic,[#test_rec{key=2,val=22}]},
+ rpc:call(N2,mnesia,transaction,[Read_one]) ),
+ %%Stop Mnesia on N2
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ %%Restart Mnesia on N1 verify that we can access test_rec from
+ %%N1 without starting Mnesia on N2.
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[test_rec], 30000])),
+ ?match({atomic,[]}, rpc:call(N1,mnesia,transaction,[Read_one])),
+ ?match({atomic,ok}, rpc:call(N1,mnesia,transaction,[Write_one,[33]])),
+ ?match({atomic,[#test_rec{key=2,val=33}]},
+ rpc:call(N1,mnesia,transaction,[Read_one])),
+ %%Restart Mnesia on N2 and verify the contents there.
+ ?match([], mnesia_test_lib:start_mnesia([N2], [test_rec])),
+ ?match( {atomic,[#test_rec{key=2,val=33}]},
+ rpc:call(N2, mnesia, transaction, [Read_one] ) ),
+ %%Check that the start of Mnesai on N2 did not affect the contents on N1
+ ?match( {atomic,[#test_rec{key=2,val=33}]},
+ rpc:call(N1, mnesia, transaction, [Read_one] ) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+load_directly_when_all_are_ram_copiesB(doc) ->
+ ["Tables that are RAM copies only shall be loaded from a replicat ",
+ "when possible. ",
+ "1. N1 and N2 has RAM copies of a table, stop N1 before N2.",
+ "2. Now start N2 first and then N1, N1 shall then load the table ",
+ " from N2."];
+load_directly_when_all_are_ram_copiesB(suite) -> [];
+load_directly_when_all_are_ram_copiesB(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{ram_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ ?match( Nodes, mnesia:table_info(test_rec,ram_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_only_copies) ),
+ Write_one = fun(Value) -> mnesia:write(#test_rec{key=3,val=Value}) end,
+ Read_one = fun() -> mnesia:read( {test_rec, 3}) end,
+ %%Write a value one N1 that we may test against later
+ ?match({atomic,ok},
+ rpc:call( N1, mnesia, transaction, [Write_one,[11]] ) ),
+ ?match({atomic,[#test_rec{key=3,val=11}]},
+ rpc:call(N2,mnesia,transaction,[Read_one]) ),
+ %%Stop Mnesia on N1
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ %%Write a value and check result (on N2; not possible on N1
+ %%since Mnesia is stopped there).
+ ?match({atomic,ok}, rpc:call(N2,mnesia,transaction,[Write_one,[22]]) ),
+ ?match({atomic,[#test_rec{key=3,val=22}]},
+ rpc:call(N2,mnesia,transaction,[Read_one]) ),
+ %%Stop Mnesia on N2
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ %%Restart Mnesia on N2 verify that we can access test_rec from
+ %%N2 without starting Mnesia on N1.
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[test_rec], 30000])),
+ ?match({atomic,[]}, rpc:call(N2,mnesia,transaction,[Read_one])),
+ ?match({atomic,ok}, rpc:call(N2,mnesia,transaction,[Write_one,[33]])),
+ ?match({atomic,[#test_rec{key=3,val=33}]},
+ rpc:call(N2,mnesia,transaction,[Read_one])),
+ %%Restart Mnesia on N1 and verify the contents there.
+ ?match([], mnesia_test_lib:start_mnesia([N1], [test_rec])),
+ ?match( {atomic,[#test_rec{key=3,val=33}]},
+ rpc:call(N1,mnesia,transaction,[Read_one])),
+ %%Check that the start of Mnesai on N1 did not affect the contents on N2
+ ?match( {atomic,[#test_rec{key=3,val=33}]},
+ rpc:call(N2,mnesia,transaction,[Read_one])),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+late_load_when_all_are_ram_copies_on_ram_nodes(doc) ->
+ ["Load of ram_copies tables when all replicas resides on disc less nodes"];
+late_load_when_all_are_ram_copies_on_ram_nodes(suite) ->
+ [
+ late_load_when_all_are_ram_copies_on_ram_nodes1,
+ late_load_when_all_are_ram_copies_on_ram_nodes2
+ ].
+
+late_load_when_all_are_ram_copies_on_ram_nodes1(suite) -> [];
+late_load_when_all_are_ram_copies_on_ram_nodes1(Config) when is_list(Config) ->
+ [N1, N2] = mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ {reload_appls, [mnesia]}],
+ 2, Config, ?FILE, ?LINE),
+ Res = late_load_when_all_are_ram_copies_on_ram_nodes(N1, [N2], Config),
+ mnesia_test_lib:prepare_test_case([{reload_appls, [mnesia]}],
+ 2, Config, ?FILE, ?LINE),
+ Res.
+
+late_load_when_all_are_ram_copies_on_ram_nodes2(suite) -> [];
+late_load_when_all_are_ram_copies_on_ram_nodes2(Config) when is_list(Config) ->
+ [N1, N2, N3] = mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ {reload_appls, [mnesia]}],
+ 3, Config, ?FILE, ?LINE),
+ Res = late_load_when_all_are_ram_copies_on_ram_nodes(N1, [N2, N3], Config),
+ mnesia_test_lib:prepare_test_case([{reload_appls, [mnesia]}],
+ 3, Config, ?FILE, ?LINE),
+ Res.
+
+late_load_when_all_are_ram_copies_on_ram_nodes(DiscNode, RamNs, _Config)
+ when DiscNode == node() ->
+ ?match(ok, mnesia:create_schema([DiscNode])),
+ ?match(ok, mnesia:start()),
+ Nodes = [DiscNode | RamNs],
+ Extra = [{extra_db_nodes, Nodes}],
+ Ok = [ok || _ <- RamNs],
+ ?match({Ok, []}, rpc:multicall(RamNs, mnesia, start, [Extra])),
+ ?match([], wait_until_running(Nodes)),
+
+ LastRam = lists:last(RamNs),
+ %% ?match({atomic, ok},
+ %% mnesia:add_table_copy(schema, LastRam, ram_copies)),
+ Def = [{ram_copies, RamNs}, {attributes, record_info(fields, test_rec)}],
+ ?match({atomic,ok}, mnesia:create_table(test_rec, Def)),
+ ?verify_mnesia(Nodes, []),
+ ?match([], mnesia_test_lib:stop_mnesia(RamNs)),
+ ?match(stopped, mnesia:stop()),
+ ?match(ok, mnesia:start()),
+
+ Rec1 = #test_rec{key=3, val=33},
+ Rec2 = #test_rec{key=4, val=44},
+
+ FirstRam = hd(RamNs),
+ ?match(ok, rpc:call(FirstRam, mnesia, start, [Extra])),
+ ?match(ok, rpc:call(FirstRam, mnesia, wait_for_tables,
+ [[test_rec], 30000])),
+ ?match(ok, rpc:call(FirstRam, mnesia, dirty_write,[Rec1])),
+ ?match(ok, mnesia:wait_for_tables([test_rec], 30000)),
+ mnesia:dirty_write(Rec2),
+
+ if
+ FirstRam /= LastRam ->
+ ?match(ok, rpc:call(LastRam, mnesia, start, [Extra])),
+ ?match(ok, rpc:call(LastRam, mnesia, wait_for_tables,
+ [[test_rec], 30000]));
+ true ->
+ ignore
+ end,
+ ?match([Rec1], rpc:call(LastRam, mnesia, dirty_read, [{test_rec, 3}])),
+ ?match([Rec2], rpc:call(LastRam, mnesia, dirty_read, [{test_rec, 4}])),
+ ?verify_mnesia(Nodes, []).
+
+wait_until_running(Nodes) ->
+ wait_until_running(Nodes, 30).
+
+wait_until_running(Nodes, Times) when Times > 0->
+ Alive = mnesia:system_info(running_db_nodes),
+ case Nodes -- Alive of
+ [] ->
+ [];
+ Remaining ->
+ timer:sleep(timer:seconds(1)),
+ wait_until_running(Remaining, Times - 1)
+ end;
+wait_until_running(Nodes, _) ->
+ Nodes.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+load_when_last_replica_becomes_available(doc) ->
+ ["Check that when all Mnesia nodes die at the same instant, then the ",
+ "replicated table shall be accessible when the last node is started ",
+ "again.",
+ "Checked by cheating. Start Mnesia on N1, N2, N3. Have a table ",
+ "replicated on disc on all three nodes, fill in some transactions, ",
+ "install a fallback. Restart mnesia on all nodes"
+ "This is the cheat and it simulates that all nodes died at the same ",
+ "time. Check that the table is only accessible after the last node ",
+ "has come up."];
+load_when_last_replica_becomes_available(suite) -> [];
+load_when_last_replica_becomes_available(Config) when is_list(Config) ->
+ [N1, N2, N3] = Nodes = ?acquire_nodes(3, Config),
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ ?match( [], mnesia:table_info(test_rec,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(test_rec,disc_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_only_copies) ),
+ Write_one = fun(Key,Val)->mnesia:write(#test_rec{key=Key,val=Val}) end,
+ Read_one = fun(Key) ->mnesia:read( {test_rec, Key}) end,
+ %%Write one value from each node.
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[1,11]])),
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[2,22]])),
+ ?match({atomic,ok},rpc:call(N3,mnesia,transaction,[Write_one,[3,33]])),
+ %%Check the values
+ ?match({atomic,[#test_rec{key=1,val=11}]},
+ rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[#test_rec{key=2,val=22}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[#test_rec{key=3,val=33}]},
+ rpc:call(N1,mnesia,transaction,[Read_one,[3]]) ),
+
+ ?match(ok, mnesia:backup("test_last_replica")),
+ ?match(ok, mnesia:install_fallback("test_last_replica")),
+ file:delete("test_last_replica"),
+ %%Stop Mnesia on all three nodes
+ ?match([], mnesia_test_lib:kill_mnesia(Nodes)),
+
+ %%Start Mnesia on one node, make sure that test_rec is not available
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match({timeout,[test_rec]},
+ rpc:call(N2, mnesia, wait_for_tables, [[test_rec], 10000])),
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+ ?match({timeout,[test_rec]},
+ rpc:call(N1, mnesia, wait_for_tables, [[test_rec], 10000])),
+ %%Start the third node
+ ?match(ok, rpc:call(N3, mnesia, start, [])),
+ %%Make sure that the table is loaded everywhere
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [[test_rec], 30000])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[test_rec], 30000])),
+ ?match(ok, rpc:call(N1, mnesia, wait_for_tables, [[test_rec], 30000])),
+
+ %%Check the values
+ ?match({atomic,[#test_rec{key=1,val=11}]},
+ rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[#test_rec{key=2,val=22}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[#test_rec{key=3,val=33}]},
+ rpc:call(N1,mnesia,transaction,[Read_one,[3]]) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+load_when_we_have_down_from_all_other_replica_nodes(doc) ->
+ ["The table can be loaded if this node was the last one surviving. ",
+ "Check this by having N1, N2, N3 and a table replicated on all those ",
+ "nodes. Then kill them in the N1, N2, N3 order. Then start N3 and ",
+ "verify that the table is available with correct contents."];
+load_when_we_have_down_from_all_other_replica_nodes(suite) -> [];
+load_when_we_have_down_from_all_other_replica_nodes(Config) when is_list(Config) ->
+ [N1, N2, N3] = Nodes = ?acquire_nodes(3, Config),
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ ?match( [], mnesia:table_info(test_rec,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(test_rec,disc_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_only_copies) ),
+ Write_one = fun(Key,Val)->mnesia:write(#test_rec{key=Key,val=Val}) end,
+ Read_one = fun(Key) ->mnesia:read( {test_rec, Key}) end,
+ %%Write one value from each node.
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[1,111]])),
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[2,222]])),
+ ?match({atomic,ok},rpc:call(N3,mnesia,transaction,[Write_one,[3,333]])),
+ %%Check the values
+ ?match({atomic,[#test_rec{key=1,val=111}]},
+ rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[#test_rec{key=2,val=222}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[#test_rec{key=3,val=333}]},
+ rpc:call(N1,mnesia,transaction,[Read_one,[3]]) ),
+ %%Stop Mnesia on all three nodes
+ ?match([], mnesia_test_lib:kill_mnesia([N1])),
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[22,22]])),
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match({atomic,ok},rpc:call(N3,mnesia,transaction,[Write_one,[33,33]])),
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+ ?verbose("Mnesia stoped on all three nodes.~n",[]),
+
+ %%Start Mnesia on N3; wait for 'test_rec' table to load
+ ?match(ok, rpc:call(N3, mnesia, start, [])),
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [[test_rec], 30000])),
+
+ %%Check the values
+ ?match({atomic,[#test_rec{key=1,val=111}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[#test_rec{key=2,val=222}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[#test_rec{key=3,val=333}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[3]]) ),
+ ?match({atomic,[#test_rec{key=22,val=22}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[22]]) ),
+ ?match({atomic,[#test_rec{key=33,val=33}]},
+ rpc:call(N3,mnesia,transaction,[Read_one,[33]]) ),
+ ?verify_mnesia([N3], [N1, N2]).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+late_load_transforms_into_disc_load(doc) ->
+ ["Difficult case that needs instrumentation of Mnesia.",
+ "A table is force loaded, and Mnesia decides to load it from another ",
+ "Mnesia node because it is avaliable there. The other Mnesia node then ",
+ "dies in mid copy which shall make the first Mnesia node to really ",
+ "force load from disc.",
+ "Check this by starting N1 and N2 and replicating a table between ",
+ "them. Then kill N1 before N2. The idea is to start N2 first, then ",
+ "N1 and then do a force load on N1. This force load will load from ",
+ "N2 BUT N2 must be killed after the decision to load from it has ",
+ "been made. tricky."];
+
+late_load_transforms_into_disc_load(suite) -> [];
+late_load_transforms_into_disc_load(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ {success, [A, B]} = ?start_activities(Nodes),
+
+ ?match(Node1, node(A)),
+ ?match(Node2, node(B)),
+
+ Tab = late_load_table,
+ Def = [{attributes, [key, value]},
+ {disc_copies, Nodes}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 111, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 222, 42})),
+
+ TestPid = self(),
+ DebugId = {mnesia_loader, do_get_network_copy},
+ DebugFun = fun(PrevContext, EvalContext) ->
+ ?verbose("interrupt late load, pid ~p #~p ~n context ~p ~n",
+ [self(),PrevContext,EvalContext]),
+
+ mnesia_test_lib:kill_mnesia([Node2]),
+ TestPid ! {self(),debug_fun_was_called},
+
+ ?verbose("interrupt late_log - continues ~n",[]),
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end,
+ ?remote_activate_debug_fun(Node1,DebugId, DebugFun, 1),
+
+ %% kill mnesia on node1
+ mnesia_test_lib:kill_mnesia([Node1]),
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(1)),
+
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab, 222, 815}])),
+
+ %% start Mnesia on node1
+ ?match(ok,mnesia:start()),
+ ?match(yes, mnesia:force_load_table(Tab)),
+ ?match(ok, mnesia:wait_for_tables([Tab],timer:seconds(30))),
+
+ receive_messages([debug_fun_was_called]),
+
+ check_tables([A],[{Tab,111},{Tab,222}],[[{Tab,111,4711}],[{Tab,222,42}]]),
+ ?verify_mnesia([Node1], [Node2]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+late_load_leads_to_hanging(doc) ->
+ ["Difficult case that needs instrumentation of Mnesia.",
+ "A table is loaded, and Mnesia decides to load it from another ",
+ "Mnesia node because it has the latest copy there. ",
+ "The other Mnesia node then ",
+ "dies in mid copy which shall make the first Mnesia node not to ",
+ "force load from disc but to wait for the other node to come up again",
+ "Check this by starting N1 and N2 and replicating a table between ",
+ "them. Then kill N1 before N2. The idea is to start N2 first, then ",
+ "N1. This load will load from ",
+ "N2 BUT N2 must be killed after the decision to load from it has ",
+ "been made. tricky."];
+
+late_load_leads_to_hanging(suite) -> [];
+late_load_leads_to_hanging(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ Tab = late_load_table,
+ Def = [{attributes, [key, value]},
+ {disc_copies, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 111, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 222, 42})),
+
+ DebugId = {mnesia_loader, do_get_network_copy},
+ DebugFun = fun(PrevContext, EvalContext) ->
+ ?verbose("interrupt late load, pid ~p #~p ~n context ~p ~n",
+ [self(), PrevContext, EvalContext]),
+ mnesia_test_lib:kill_mnesia([Node2]),
+ ?verbose("interrupt late load - continues ~n",[]),
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end,
+
+ ?remote_activate_debug_fun(Node1,DebugId, DebugFun, 1),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(1)),
+
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab, 333, 666}])),
+
+ %% start Mnesia on node1
+ ?match(ok, mnesia:start()),
+
+ ?match({timeout, [Tab]}, mnesia:wait_for_tables([Tab], timer:seconds(2))),
+
+ ?match({'EXIT', {aborted, _}}, mnesia:dirty_read({Tab, 222})),
+ %% mnesia on node1 is waiting for node2 coming up
+
+ ?match(ok, rpc:call(Node2, mnesia, start, [])),
+ ?match(ok, mnesia:wait_for_tables([Tab], timer:seconds(30))),
+ ?match([{Tab, 333, 666}], mnesia:dirty_read({Tab, 333})),
+ ?verify_mnesia([Node2, Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_when_nobody_intents_to_load(doc) ->
+ ["Normal force load. Start N1 N2, kill in N1, N2 order. Start N1 do ",
+ "force load. Did it work?"];
+force_load_when_nobody_intents_to_load(suite) -> [];
+force_load_when_nobody_intents_to_load(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ Table = test_rec,
+ Trec1a = #test_rec{key=1,val=111},
+ Trec1b = #test_rec{key=1,val=333},
+ Trec2a = #test_rec{key=2,val=222},
+ Trec3a = #test_rec{key=3,val=333},
+ Trec3b = #test_rec{key=3,val=666},
+
+ ?match({atomic,ok}, rpc:call(N1, mnesia,create_table,
+ [Table,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}
+ ] ] ) ),
+ ?match( [], mnesia:table_info(Table,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(Table,disc_copies) ),
+ ?match( [], mnesia:table_info(Table,disc_only_copies) ),
+ Write_one = fun(Rec) -> mnesia:write(Rec) end,
+ Read_one = fun(Key) -> mnesia:read({Table, Key}) end,
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec1a]])),
+ %%Check it
+ ?match({atomic,[Trec1a]},rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ %%Shut down mnesia on N1
+ ?match([], mnesia_test_lib:stop_mnesia([N1])),
+ %%Write and check value while N1 is down
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[Trec1b]])),
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[Trec2a]])),
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[Trec3a]])),
+ ?match({aborted,{node_not_running,N1}},
+ rpc:call(N1,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[Trec1b]},rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[Trec2a]},rpc:call(N2,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[Trec3a]},rpc:call(N2,mnesia,transaction,[Read_one,[3]]) ),
+ %%Shut down Mnesia on N2
+ ?match([], mnesia_test_lib:stop_mnesia([N2])),
+
+ %%Restart Mnesia on N1
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+ %%Check that table is not available (waiting for N2)
+ ?match({timeout,[Table]},
+ rpc:call(N1, mnesia, wait_for_tables, [[Table], 3000])),
+
+ %%Force load on N1
+ ?match(yes,rpc:call(N1,mnesia,force_load_table,[Table])),
+ %%Check values
+ ?match({atomic,[Trec1a]},rpc:call(N1,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[]}, rpc:call(N1,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[]}, rpc:call(N1,mnesia,transaction,[Read_one,[3]]) ),
+ %%Write a value for key=3
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec3b]])),
+
+ %%Restart N2 and check values
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[Table], 30000])),
+
+ ?match({atomic,[Trec1a]},rpc:call(N1,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[Trec1a]},rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+
+ ?match({atomic,[]},rpc:call(N1,mnesia,transaction,[Read_one,[2]]) ),
+ ?match({atomic,[]},rpc:call(N2,mnesia,transaction,[Read_one,[2]]) ),
+
+ ?match({atomic,[Trec3b]},rpc:call(N1,mnesia,transaction,[Read_one,[3]]) ),
+ ?match({atomic,[Trec3b]},rpc:call(N2,mnesia,transaction,[Read_one,[3]]) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_when_someone_has_decided_to_load(doc) ->
+ ["Difficult case that needs instrumentation of Mnesia.",
+ "Start N1 and N2, replicate table, kill in N1, N2 order. Start N2 ",
+ "and start N1 before N2 has really loaded the table but after N2 has ",
+ "decided to load it."];
+
+force_load_when_someone_has_decided_to_load(suite) -> [];
+force_load_when_someone_has_decided_to_load(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ {success, [A, B]} = ?start_activities(Nodes),
+ ?match(Node1, node(A)), %% Just to check :)
+ ?match(Node2, node(B)),
+
+ Tab = late_load_table,
+ Def = [{attributes, [key, value]}, {disc_copies, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 111, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 222, 42})),
+
+ Self = self(),
+ DebugId = {mnesia_controller, late_disc_load},
+ DebugFun = fun(PrevContext, EvalContext) ->
+ ?verbose("interrupt late disc load,
+ pid ~p #~p ~n context ~p ~n",
+ [self(),PrevContext,EvalContext]),
+ Self ! {self(), fun_in_postion},
+ wait_for_signal(),
+ ?verbose("interrupt late disc load - continues ~n",[]),
+ ?deactivate_debug_fun(DebugId),
+ PrevContext+1
+ end,
+
+ %% kill mnesia on node1
+ mnesia_test_lib:kill_mnesia([Node1]),
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(1)),
+
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab, 222, 815}])),
+ %% kill mnesia on node2
+ mnesia_test_lib:kill_mnesia([Node2]),
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(1)),
+
+ ?remote_activate_debug_fun(Node2,DebugId, DebugFun, 1),
+
+ B ! fun() -> mnesia:start() end,
+ [{Mnesia_Pid, fun_in_postion}] = receive_messages([fun_in_postion]),
+
+ %% start Mnesia on node1
+ A ! fun() -> mnesia:start() end,
+ ?match_receive(timeout),
+% Got some problem with this testcase when we modified mnesia init
+% These test cases are very implementation dependent!
+% A ! fun() -> mnesia:wait_for_tables([Tab], 3000) end,
+% ?match_receive({A, {timeout, [Tab]}}),
+ A ! fun() -> mnesia:force_load_table(Tab) end,
+ ?match_receive(timeout),
+
+ Mnesia_Pid ! continue,
+ ?match_receive({B, ok}),
+ ?match_receive({A, ok}),
+ ?match_receive({A, yes}),
+
+ B ! fun() -> mnesia:wait_for_tables([Tab], 10000) end,
+ ?match_receive({B, ok}),
+ ?match(ok, mnesia:wait_for_tables([Tab], timer:seconds(30))),
+ ?match([{Tab, 222, 815}], mnesia:dirty_read({Tab, 222})),
+ ?verify_mnesia(Nodes, []).
+
+wait_for_signal() ->
+ receive
+ continue -> ok
+ %% Don't eat any other mnesia internal msg's
+ after
+ timer:minutes(2) -> ?error("Timedout in wait_for_signal~n", [])
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_when_someone_else_already_has_loaded(doc) ->
+ ["Normal case. Do a force load when somebody else has loaded the table. ",
+ "Start N1, N2, kill in N1, N2 order. Start N2 load the table, start N1 ",
+ "force load. Did it work? (i.e: did N1 load the table from N2 as that",
+ "one is the latest version and it is available on N2)"];
+
+force_load_when_someone_else_already_has_loaded(suite) -> [];
+force_load_when_someone_else_already_has_loaded(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ Table = test_rec,
+ Trec1 = #test_rec{key=1,val=111},
+ Trec2 = #test_rec{key=1,val=222},
+
+ ?match({atomic,ok}, rpc:call(N1, mnesia,create_table,
+ [Table,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}
+ ] ] ) ),
+ ?match( [], mnesia:table_info(Table,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(Table,disc_copies) ),
+ ?match( [], mnesia:table_info(Table,disc_only_copies) ),
+ Write_one = fun(Rec) -> mnesia:write(Rec) end,
+ Read_one = fun(Key) -> mnesia:read({Table, Key}) end,
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec1]])),
+ %%Check it
+ ?match({atomic,[Trec1]},rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ %%Shut down mnesia
+ ?match([], mnesia_test_lib:stop_mnesia([N1])),
+ timer:sleep(500),
+ ?match([], mnesia_test_lib:stop_mnesia([N2])),
+ %%Restart Mnesia on N2;wait for tables to load
+ ?match(ok, rpc:call(N2, mnesia, start, [])),
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [[test_rec], 30000])),
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N2,mnesia,transaction,[Write_one,[Trec2]])),
+ %%Start on N1; force load
+ ?match(ok, rpc:call(N1, mnesia, start, [])),
+ %%Force load from file
+ ?match(yes, rpc:call(N1,mnesia,force_load_table,[Table])),
+ %%Check the value
+ ?match({atomic,[Trec2]},rpc:call(N1,mnesia,transaction,[Read_one,[1]]) ),
+ %% === there must be a Trec2 here !!!!
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_when_we_has_loaded(doc) ->
+ ["Force load a table we already have loaded"];
+force_load_when_we_has_loaded(suite) -> [];
+force_load_when_we_has_loaded(Config) when is_list(Config) ->
+ [N1] = Nodes = ?acquire_nodes(1, Config),
+ Table = test_rec,
+ Trec1 = #test_rec{key=1,val=111},
+ Trec2 = #test_rec{key=1,val=222},
+
+ ?match({atomic,ok}, rpc:call(N1, mnesia,create_table,
+ [Table,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}
+ ] ] ) ),
+ ?match( [], mnesia:table_info(Table,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(Table,disc_copies) ),
+ ?match( [], mnesia:table_info(Table,disc_only_copies) ),
+ Write_one = fun(Rec) -> mnesia:write(Rec) end,
+ Read_one = fun(Key) -> mnesia:read({Table, Key}) end,
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec1]])),
+ %%Check it
+ ?match({atomic,[Trec1]},rpc:call(N1,mnesia,transaction,[Read_one,[1]]) ),
+ %%Shut down mnesia
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ %%Restart Mnesia;wait for tables to load
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Table])),
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec2]])),
+ %%Force load from file
+ ?match(yes, rpc:call(N1,mnesia,force_load_table,[Table])),
+ %%Check the value
+ ?match({atomic,[Trec2]},rpc:call(N1,mnesia,transaction,[Read_one,[1]]) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_on_a_non_local_table(doc) ->
+ ["This is NOT allowed, the test case is a negative test",
+ "Force load on a table that isn't replicated on this node."];
+force_load_on_a_non_local_table(suite) -> [];
+force_load_on_a_non_local_table(Config) when is_list(Config) ->
+ [N1, N2, N3] = Nodes = ?acquire_nodes( 3, Config),
+ TableNodes = lists:sublist(Nodes,2),
+ Table = test_rec,
+ Trec1 = #test_rec{key=1,val=11},
+
+ ?match({atomic,ok}, rpc:call(N1, mnesia,create_table,
+ [Table,
+ [{disc_copies,TableNodes},
+ {attributes,record_info(fields,test_rec)}
+ ] ] ) ),
+ ?match( [], mnesia:table_info(Table,ram_copies) ),
+ ?match( TableNodes, mnesia:table_info(Table,disc_copies) ),
+ ?match( [], mnesia:table_info(Table,disc_only_copies) ),
+ Write_one = fun(Rec) -> mnesia:write(Rec) end,
+ Read_one = fun(Key) -> mnesia:read({Table, Key}) end,
+ %%Write one value
+ ?match({atomic,ok},rpc:call(N1,mnesia,transaction,[Write_one,[Trec1]])),
+ %%Check it from the other nodes
+ ?match({atomic,[Trec1]},rpc:call(N2,mnesia,transaction,[Read_one,[1]]) ),
+ ?match({atomic,[Trec1]},rpc:call(N3,mnesia,transaction,[Read_one,[1]]) ),
+
+ %%Make sure that Table is non-local
+ ?match_inverse(N3, rpc:call(N3,mnesia,table_info,[Table,where_to_read])),
+ %%Try to force load it
+ ?match(yes, rpc:call(N3,mnesia,force_load_table,[Table])),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+force_load_when_the_table_does_not_exist(doc) ->
+ ["This is NOT allowed, the test case is a negative test",
+ "Force load on a table that doesn't exist."];
+force_load_when_the_table_does_not_exist(suite) -> [];
+force_load_when_the_table_does_not_exist(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes( 2, Config),
+
+ %%Dummy table
+ ?match({atomic,ok},
+ mnesia:create_table(test_rec,
+ [{disc_copies,Nodes},
+ {attributes,record_info(fields,test_rec)}]
+ ) ),
+ ?match( [], mnesia:table_info(test_rec,ram_copies) ),
+ ?match( Nodes, mnesia:table_info(test_rec,disc_copies) ),
+ ?match( [], mnesia:table_info(test_rec,disc_only_copies) ),
+ Tab = dummy,
+ %%Make sure that Tab is an unknown table
+ ?match( false, lists:member(Tab,mnesia:system_info(tables)) ),
+ ?match( {error, {no_exists, Tab}}, mnesia:force_load_table(Tab) ),
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+load_tables_with_master_tables(doc) ->
+ ["Verifies the semantics of different master nodes settings",
+ "The semantics should be:",
+ "1. Mnesia downs, Normally decides from where mnesia should load tables",
+ "2. Master tables (overrides mnesia downs) ",
+ "3. Force load (overrides Master tables) ",
+ "--- 1st from active master nodes",
+ "--- 2nd from active nodes",
+ "--- 3rd get local copy (if ram create new one)"
+ ];
+
+load_tables_with_master_tables(suite) ->
+ [master_nodes,
+ starting_master_nodes,
+ master_on_non_local_tables,
+ remote_force_load_with_local_master_node].
+
+
+-define(SDwrite(Tup), fun() -> mnesia:write(Tup) end).
+
+master_nodes(suite) -> [];
+master_nodes(Config) when is_list(Config) ->
+ [A, B, C] = Nodes = ?acquire_nodes(3, Config),
+ Tab = test_table_master_nodes,
+ ?match({atomic,ok}, mnesia:create_table(Tab, [{disc_copies, Nodes}])),
+
+ %% Test one: Master A and the table should be loaded from A
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [A]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match([{Tab, 1, init}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ %% Test 2: Master [A,B] and B is Up the table should be loaded from B
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [A, B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match([{Tab, 1, updated}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ %% Test 3: Master [A,B] and B is down the table should be loaded from A
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [A, B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ mnesia_test_lib:stop_mnesia([B]),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(B, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match([{Tab, 1, init}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, _Unknown}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ %% Test 4: Master [B] and B is Up the table should be loaded from B
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match([{Tab, 1, updated}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ %% Test 5: Master [B] and B is down the table should not be loaded
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ mnesia_test_lib:stop_mnesia([B]),
+ ?match({atomic, ok}, rpc:call(C, mnesia, sync_transaction, [?SDwrite({Tab, 1, update_2})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match({timeout, [Tab]}, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+
+ %% Test 6: Force load on table that couldn't be loaded due to master
+ %% table setttings, loads other active replicas i.e. from C
+
+ ?match(yes, rpc:call(A, mnesia, force_load_table, [Tab])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(B, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?match([{Tab, 1, update_2}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, update_2}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, update_2}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ %% Test 7: Master [B] and B is down the table should not be loaded,
+ %% force_load when there are no active replicas availible
+ %% should generate a load of a local table
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ mnesia_test_lib:stop_mnesia([B, C]),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match({timeout, [Tab]}, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+
+ ?match(yes, rpc:call(A, mnesia, force_load_table, [Tab])),
+ ?match([{Tab, 1, init}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+
+ ?verify_mnesia([A], [B,C]).
+
+starting_master_nodes(suite) -> [];
+starting_master_nodes(doc) ->
+ ["Complementory to TEST 5 and 6 above, if the master node (B) starts"
+ " and loads the table it should be loaded on the waiting node (A) "];
+starting_master_nodes(Config) when is_list(Config) ->
+ [A, B, C] = Nodes = ?acquire_nodes(3, Config),
+ Tab = starting_master_nodes,
+ ?match({atomic,ok}, mnesia:create_table(Tab, [{disc_copies, Nodes}])),
+ %% Start by checking TEST 5 above.
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+ mnesia_test_lib:stop_mnesia([A]),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ mnesia_test_lib:stop_mnesia([B]),
+ ?match({atomic, ok}, rpc:call(C, mnesia, sync_transaction, [?SDwrite({Tab, 1, update_2})])),
+
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match({timeout, [Tab]}, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+ %% Start the B node and the table should be loaded on A!
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(B, mnesia, wait_for_tables, [[Tab], 3000])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+
+ ?verify_mnesia([A,B,C], []).
+
+
+master_on_non_local_tables(suite) -> [];
+master_on_non_local_tables(Config) when is_list(Config) ->
+ [A, B, C] = Nodes = ?acquire_nodes(3, Config),
+ Tab = test_table_non_local,
+ ?match({atomic,ok}, mnesia:create_table(Tab, [{disc_copies, [B, C]}])),
+
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [B]])),
+ ?match({atomic, ok}, mnesia:sync_transaction(?SDwrite({Tab, 1, init}))),
+
+ %% Test 1: Test that table info are updated when master node comes up
+
+ mnesia_test_lib:stop_mnesia([A, B]),
+ ?match({atomic, ok}, rpc:call(C, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+
+ ?match({timeout, [Tab]}, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+ ErrorRead = {badrpc,{'EXIT', {aborted,{no_exists,[test_table_non_local,1]}}}},
+ ErrorWrite = {badrpc,{'EXIT', {aborted,{no_exists,test_table_non_local}}}},
+ ?match(ErrorRead, rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match(ErrorWrite, rpc:call(A, mnesia, dirty_write, [{Tab, 1, updated_twice}])),
+
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+
+ ?match([{Tab, 1, updated}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match(B, rpc:call(A, mnesia, table_info, [Tab, where_to_read])),
+ ?match({atomic, ok}, rpc:call(A, mnesia, sync_transaction, [?SDwrite({Tab, 1, init})])),
+
+ %% Test 2: Test that table info are updated after force_load
+
+ mnesia_test_lib:stop_mnesia([A, B]),
+ ?match({atomic, ok}, rpc:call(C, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated})])),
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+
+ ?match({timeout, [Tab]}, rpc:call(A, mnesia, wait_for_tables, [[Tab], 2000])),
+ ?match(yes, rpc:call(A, mnesia, force_load_table, [Tab])),
+ ?match(C, rpc:call(A, mnesia, table_info, [Tab, where_to_read])),
+
+ ?match([{Tab, 1, updated}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match({atomic, ok}, rpc:call(A, mnesia, sync_transaction, [?SDwrite({Tab, 1, updated_twice})])),
+
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(B, mnesia, wait_for_tables, [[Tab], 10000])),
+
+ ?match([{Tab, 1, updated_twice}], rpc:call(A, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated_twice}], rpc:call(B, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{Tab, 1, updated_twice}], rpc:call(C, mnesia, dirty_read, [{Tab, 1}])),
+
+ ?verify_mnesia(Nodes, []).
+
+remote_force_load_with_local_master_node(doc) ->
+ ["Force load a table on a remote node while the ",
+ "local node is down. Start the local node and ",
+ "verfify that the tables is loaded from disc locally "
+ "if the local node has itself as master node and ",
+ "the remote node has both the local and remote node ",
+ "as master nodes"];
+remote_force_load_with_local_master_node(suite) -> [];
+remote_force_load_with_local_master_node(Config) when is_list(Config) ->
+ [A, B] = Nodes = ?acquire_nodes(2, Config),
+
+ Tab = remote_force_load_with_local_master_node,
+ ?match({atomic,ok}, mnesia:create_table(Tab, [{disc_copies, Nodes}])),
+ ?match(ok, rpc:call(A, mnesia, set_master_nodes, [Tab, [A, B]])),
+ ?match(ok, rpc:call(B, mnesia, set_master_nodes, [Tab, [B]])),
+
+ W = fun(Who) -> mnesia:write({Tab, who, Who}) end,
+ ?match({atomic, ok}, rpc:call(A,mnesia, sync_transaction, [W, [a]])),
+ ?match(stopped, rpc:call(A, mnesia, stop, [])),
+ ?match({atomic, ok}, rpc:call(B, mnesia, sync_transaction, [W, [b]])),
+ ?match(stopped, rpc:call(B, mnesia, stop, [])),
+
+ ?match(ok, rpc:call(A, mnesia, start, [])),
+ ?match(ok, rpc:call(A, mnesia, wait_for_tables, [[Tab], 3000])),
+ ?match([{Tab, who, a}], rpc:call(A, mnesia, dirty_read, [{Tab, who}])),
+
+ ?match(ok, rpc:call(B, mnesia, start, [])),
+ ?match(ok, rpc:call(B, mnesia, wait_for_tables, [[Tab], 3000])),
+ ?match([{Tab, who, b}], rpc:call(B, mnesia, dirty_read, [{Tab, who}])),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+durability_of_dump_tables(doc) ->
+ [ "Verify that all tables contain the correct data when Mnesia",
+ "is restarted and tables are loaded from disc to recover",
+ " their previous contents. " ];
+durability_of_dump_tables(suite) -> [dump_ram_copies,
+ dump_disc_copies,
+ dump_disc_only].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+dump_ram_copies(doc) ->
+ ["Check that ram_copies tables are loaded with the"
+ "contents that had been dumped before Mnesia",
+ "was restarted. " ];
+dump_ram_copies(suite) -> [];
+dump_ram_copies(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config),
+ {success, [P1,P2,P3]} = ?start_activities(Nodes),
+
+ NP1 = node(P1),
+ NP2 = node(P2),
+
+ {A,B,C} = case node() of
+ NP1 ->
+ %?verbose("first case ~n"),
+ {P3,P2,P1};
+ NP2 ->
+ %?verbose("second case ~n"),
+ {P3,P1,P2};
+ _ ->
+ {P1,P2,P3}
+ end,
+
+ Node1 = node(A),
+ Node2 = node(B),
+ Node3 = node(C),
+
+ ?verbose(" A pid:~p node:~p ~n",[A,Node1]),
+ ?verbose(" B pid:~p node:~p ~n",[B,Node2]),
+ ?verbose(" C pid:~p node:~p ~n",[C,Node3]),
+
+
+ %% ram copies table on 2 nodes
+
+ Tab = dump_table,
+ Def = [{attributes, [key, value]},
+ {ram_copies, [Node1,Node2]}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ %% dump the table
+
+ ?match( {atomic,ok}, mnesia:dump_tables([Tab])),
+
+ %% perform updates (they shall be lost after kill Mnesia )
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 815})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 915})),
+
+ %% add another replica on node3
+ mnesia:add_table_copy(Tab,Node3,ram_copies),
+
+ %% all 3 replicas shall have the new contents
+ cross_check_tables([A,B,C],Tab,
+ {[{Tab,1,815}],[{Tab,2,915}],[{Tab,3,256}]}),
+
+ %% kill mnesia on node 3
+ mnesia_test_lib:kill_mnesia([Node3]),
+
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(2)),
+
+ mnesia_test_lib:kill_mnesia([Node1,Node2]), %% kill them as well
+ timer:sleep(timer:seconds(2)),
+
+ %% start Mnesia only on node 3
+ ?verbose("starting mnesia on Node3~n",[]),
+
+ %% test_lib:mnesia_start doesnt work, because it waits
+ %% for the schema on all nodes ... ???
+ ?match(ok,rpc:call(Node3,mnesia,start,[]) ),
+ ?match(ok,rpc:call(Node3,mnesia,wait_for_tables,
+ [[Tab],timer:seconds(30)] ) ),
+
+ %% node3 shall have the conents of the dump
+ cross_check_tables([C],Tab,{[{Tab,1,4711}],[{Tab,2,42}],[{Tab,3,256}]}),
+
+ %% start Mnesia on the other 2 nodes, too
+ mnesia_test_lib:start_mnesia([Node1,Node2],[Tab]),
+
+ cross_check_tables([A,B,C],Tab,
+ {[{Tab,1,4711}],[{Tab,2,42}],[{Tab,3,256}]}),
+ ?verify_mnesia(Nodes, []).
+
+%% check the contents of the table
+
+cross_check_tables([],_tab,_elements) -> ok;
+cross_check_tables([Pid|Rest],Tab,{Val1,Val2,Val3}) ->
+ Pid ! fun () ->
+ R1 = mnesia:dirty_read({Tab,1}),
+ R2 = mnesia:dirty_read({Tab,2}),
+ R3 = mnesia:dirty_read({Tab,3}),
+ {R1,R2,R3}
+ end,
+ ?match_receive({ Pid, {Val1, Val2, Val3 } }),
+ cross_check_tables(Rest,Tab,{Val1,Val2,Val3} ).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Should be in evil test suite !!!
+
+dump_disc_copies(doc) ->
+ ["Check that it is not possible to dump disc_copies tables"];
+dump_disc_copies(suite) -> [];
+dump_disc_copies(Config) when is_list(Config) ->
+ do_dump_copies(Config, disc_copies).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Should be in evil test suite !!!
+dump_disc_only(doc) ->
+ ["Check that it is not possible to dump disc_only_copies tables"];
+dump_disc_only(suite) -> [];
+dump_disc_only(Config) when is_list(Config) ->
+ do_dump_copies(Config,disc_only_copies).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+do_dump_copies(Config,Copies) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+
+ Tab = dump_copies,
+ Def = [{attributes, [key, value]},
+ {Copies, [Node1]}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 4711})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 42})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, 256})),
+
+ %% dump the table
+ ?match( {aborted, {"Only allowed on ram_copies",Tab,[Node1]}},
+ mnesia:dump_tables([Tab])),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, 815})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 915})),
+
+ %% kill mnesia on node1
+ mnesia_test_lib:kill_mnesia([Node1]),
+
+ %% wait a while, so that mnesia is really down
+ timer:sleep(timer:seconds(1)),
+
+ mnesia_test_lib:start_mnesia([Node1],[Tab]),
+
+ ?match([{Tab, 1, 815}], mnesia:dirty_read({Tab,1}) ),
+ ?match([{Tab, 2, 915}], mnesia:dirty_read({Tab,2}) ),
+ ?match([{Tab, 3, 256}], mnesia:dirty_read({Tab,3}) ),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+durability_of_disc_copies(doc) ->
+ ["Perform all possible kinds of updates on tables and check"
+ "whether no data is lost after a restart of Mnesia.",
+ "This test is done for disc_copies"];
+
+durability_of_disc_copies(suite) -> [];
+durability_of_disc_copies(Config) when is_list(Config) ->
+ do_disc_durability(Config,disc_copies).
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+durability_of_disc_only_copies(doc) ->
+ ["Perform all possible kinds of updates on tables and check"
+ "whether no data is lost after a restart of Mnesia.",
+ "This test is done for disc_only_copies"];
+durability_of_disc_only_copies(suite) -> [];
+durability_of_disc_only_copies(Config) when is_list(Config) ->
+ do_disc_durability(Config,disc_only_copies).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+do_disc_durability(Config,CopyType) ->
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(1)}]),
+ {success, [A,B,C]} = ?start_activities(Nodes),
+
+ Tab_set = disc_durability_set,
+ Def_set = [{attributes, [key, value]},
+ {CopyType, Nodes}],
+
+ Tab_bag = disc_durability_bag,
+ Def_bag = [{attributes, [key, value]},
+ {type, bag},
+ {CopyType, Nodes}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab_set, Def_set)),
+ ?match({atomic, ok}, mnesia:create_table(Tab_bag, Def_bag)),
+
+ %% do updates
+ ?match({atomic, ok},
+ mnesia:transaction(fun()->
+ mnesia:write({Tab_set, 11, 1111}),
+ mnesia:write({Tab_set, 22, 2222}),
+ mnesia:write({Tab_set, 33, 3333}),
+ mnesia:write({Tab_set, 55, 5555})
+ end)),
+ mnesia:dirty_write({Tab_set, 44, 4444}),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun()->
+ mnesia:write({Tab_bag, 11, a_1111}),
+ mnesia:write({Tab_bag, 11, b_1111}),
+ mnesia:write({Tab_bag, 22, a_2222}),
+ mnesia:write({Tab_bag, 22, b_2222}),
+ mnesia:write({Tab_bag, 33, a_3333}),
+ mnesia:write({Tab_bag, 33, b_3333})
+ end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun()-> mnesia:delete({Tab_set, 22}) end)),
+ ?match(ok, mnesia:dirty_delete({Tab_set, 33})),
+ ?match(5558, mnesia:dirty_update_counter({Tab_set, 55}, 3)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun()->
+ mnesia:delete_object({Tab_bag, 22, b_2222})
+ end)),
+ ?match(ok, mnesia:dirty_delete_object({Tab_bag, 33, b_3333})),
+ ?match(10, mnesia:dirty_update_counter({Tab_set, counter}, 10)),
+ ?match({atomic, ok}, % Also syncs update_counter
+ mnesia:sync_transaction(fun() -> mnesia:write({Tab_set,66,6666}) end)),
+
+ Updated = {[[{Tab_set,counter,10}],
+ [{Tab_set,counter,10}],
+ [{Tab_set,counter,10}]],[]},
+ ?match(Updated, rpc:multicall(Nodes, mnesia, dirty_read, [Tab_set,counter])),
+
+ %% kill mnesia on all nodes, start it again and check the data
+ mnesia_test_lib:kill_mnesia(Nodes),
+ mnesia_test_lib:start_mnesia(Nodes,[Tab_set,Tab_bag]),
+
+ ?log("Flushed ~p ~n", [mnesia_test_lib:flush()]), %% Debugging strange msgs..
+ ?log("Processes ~p ~p ~p~n", [A,B,C]),
+ check_tables([A,B,C],
+ [{Tab_set,11}, {Tab_set,22},{Tab_set,33},
+ {Tab_set,44},{Tab_set,55}, {Tab_set,66},
+ {Tab_bag,11}, {Tab_bag,22},{Tab_bag,33},
+ {Tab_set, counter}],
+ [[{Tab_set, 11, 1111}], [], [], [{Tab_set, 44, 4444}],
+ [{Tab_set, 55, 5558}], [{Tab_set, 66, 6666}],
+ lists:sort([{Tab_bag, 11, a_1111},{Tab_bag, 11, b_1111}]),
+ [{Tab_bag, 22, a_2222}], [{Tab_bag, 33, a_3333}],
+ [{Tab_set, counter, 10}]]),
+
+ timer:sleep(1000), %% Debugging strange msgs..
+ ?log("Flushed ~p ~n", [mnesia_test_lib:flush()]),
+ ?verify_mnesia(Nodes, []).
+
+%% check the contents of the table
+%%
+%% all the processes in the PidList shall find all
+%% table entries in ValList
+
+check_tables([],_vallist,_resultList) -> ok;
+check_tables([Pid|Rest],ValList,ResultList) ->
+ Pid ! fun () ->
+ check_values(ValList)
+ end,
+ ?match_receive({ Pid, ResultList }),
+ check_tables(Rest,ValList,ResultList).
+
+check_values([]) -> [];
+check_values([{Tab,Key}|Rest]) ->
+ Ret = lists:sort(mnesia:dirty_read({Tab,Key})),
+ [Ret|check_values(Rest)].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% stolen from mnesia_recovery_test.erl:
+
+receive_messages([]) -> [];
+receive_messages(ListOfMsgs) ->
+ receive
+ timeout ->
+ case lists:member(timeout, ListOfMsgs) of
+ false ->
+ ?warning("I (~p) have received unexpected msg~n ~p ~n",
+ [self(),timeout]),
+ receive_messages(ListOfMsgs);
+ true ->
+ ?verbose("I (~p) got msg ~p ~n", [self(),timeout]),
+ [ timeout | receive_messages(ListOfMsgs -- [timeout])]
+ end;
+
+ {Pid, Msg} ->
+ case lists:member(Msg, ListOfMsgs) of
+ false ->
+ ?warning("I (~p) have received unexpected msg~n ~p ~n",
+ [self(),{Pid, Msg}]),
+ receive_messages(ListOfMsgs);
+ true ->
+ ?verbose("I (~p) got msg ~p from ~p ~n", [self(),Msg, Pid]),
+ [{Pid, Msg} | receive_messages(ListOfMsgs -- [Msg])]
+ end;
+
+ Else -> ?warning("Recevied unexpected Msg~n ~p ~n", [Else])
+ after timer:seconds(40) ->
+ ?error("Timeout in receive msgs while waiting for ~p~n",
+ [ListOfMsgs])
+ end.
+
diff --git a/lib/mnesia/test/mnesia_evil_backup.erl b/lib/mnesia/test/mnesia_evil_backup.erl
new file mode 100644
index 0000000000..bbbebeb02c
--- /dev/null
+++ b/lib/mnesia/test/mnesia_evil_backup.erl
@@ -0,0 +1,750 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%%%----------------------------------------------------------------------
+%%% File : mnesia_evil_backup.erl
+%%% Author : Dan Gudmundsson <dgud@legolas>
+%%% Purpose : Evil backup tests
+%%% Created : 3 Jun 1998 by Dan Gudmundsson <[email protected]>
+%%%----------------------------------------------------------------------
+
+-module(mnesia_evil_backup).
+-author('[email protected]').
+-compile(export_all).
+-include("mnesia_test_lib.hrl").
+
+%%-export([Function/Arity, ...]).
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+all(doc) ->
+ ["Checking all the functionality regarding ",
+ "to the backup and different ",
+ "kinds of restore and fallback interface"];
+
+all(suite) ->
+ [
+ backup,
+ bad_backup,
+ global_backup_checkpoint,
+ restore_tables,
+ traverse_backup,
+ selective_backup_checkpoint,
+ incremental_backup_checkpoint,
+%% local_backup_checkpoint,
+ install_fallback,
+ uninstall_fallback,
+ local_fallback,
+ sops_with_checkpoint
+ ].
+
+backup(doc) -> ["Checking the interface to the function backup",
+ "We don't check that the backups can be used here",
+ "That is checked in install_fallback and in restore"];
+backup(suite) -> [];
+backup(Config) when is_list(Config) ->
+ [Node1, Node2] = _Nodes = ?acquire_nodes(2, Config),
+ Tab = backup_tab,
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ File = "backup_test.BUP",
+ ?match(ok, mnesia:backup(File)),
+
+ File2 = "backup_test2.BUP",
+ Tab2 = backup_tab2,
+ Def2 = [{disc_only_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match(ok, mnesia:backup(File2, mnesia_backup)),
+
+ File3 = "backup_test3.BUP",
+ mnesia_test_lib:kill_mnesia([Node2]),
+ ?match({error, _}, mnesia:backup(File3, mnesia_backup)),
+
+ ?match(ok, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?match({error, _}, file:delete(File3)),
+ ?verify_mnesia([Node1], [Node2]).
+
+
+bad_backup(suite) -> [];
+bad_backup(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = backup_tab,
+ Def = [{disc_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ File = "backup_test.BUP",
+ ?match(ok, mnesia:backup(File)),
+ file:write_file(File, "trash", [append]),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_bad})),
+ ?match({atomic,[Tab]}, mnesia:restore(File, [{clear_tables, [Tab]}])),
+ ?match([{Tab,1,test_ok}], mnesia:dirty_read(Tab, 1)),
+
+ ?match(ok, file:delete(File)),
+ ?verify_mnesia([Node1], []).
+
+
+
+global_backup_checkpoint(doc) ->
+ ["Checking the interface to the function backup_checkpoint",
+ "We don't check that the backups can be used here",
+ "That is checked in install_fallback and in restore"];
+global_backup_checkpoint(suite) -> [];
+global_backup_checkpoint(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = backup_cp,
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ File = "backup_checkpoint.BUP",
+ File2 = "backup_checkpoint2.BUP",
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ ?match({error, _}, mnesia:backup_checkpoint(cp_name, File)),
+ Spec = [{name, cp_name}, {max, mnesia:system_info(tables)}],
+ ?match({ok, _Name, _Ns}, mnesia:activate_checkpoint(Spec)),
+ ?match(ok, mnesia:backup_checkpoint(cp_name, File)),
+ ?match({error, _}, mnesia:backup_checkpoint(cp_name_nonexist, File)),
+ ?match(ok, mnesia:backup_checkpoint(cp_name, File2, mnesia_backup)),
+ ?match({error, _}, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?verify_mnesia(Nodes, []).
+
+restore_tables(doc) ->
+ ["Tests the interface of restore"];
+
+restore_tables(suite) ->
+ [
+ restore_errors,
+ restore_clear,
+ restore_keep,
+ restore_recreate,
+ restore_clear_ram
+ ].
+
+restore_errors(suite) -> [];
+restore_errors(Config) when is_list(Config) ->
+ [_Node] = ?acquire_nodes(1, Config),
+ ?match({aborted, enoent}, mnesia:restore(notAfile, [])),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, not_a_list)),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, [test_badarg])),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, [{test_badarg, xxx}])),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, [{skip_tables, xxx}])),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, [{recreate_tables, [schema]}])),
+ ?match({aborted, {badarg, _}}, mnesia:restore(notAfile, [{default_op, asdklasd}])),
+ ok.
+
+restore_clear(suite) -> [];
+restore_clear(Config) when is_list(Config) ->
+ restore(Config, clear_tables).
+
+restore_keep(suite) -> [];
+restore_keep(Config) when is_list(Config) ->
+ restore(Config, keep_tables).
+
+restore_recreate(suite) -> [];
+restore_recreate(Config) when is_list(Config) ->
+ restore(Config, recreate_tables).
+
+check_tab(Records, Line) ->
+ Verify = fun({Table, Key, Val}) ->
+ case catch mnesia:dirty_read({Table, Key}) of
+ [{Table, Key, Val}] -> ok;
+ Else ->
+ mnesia_test_lib:error("Not matching on Node ~p ~n"
+ " Expected ~p~n Actual ~p~n",
+ [node(), {Table, Key, Val}, Else],
+ ?MODULE, Line),
+ exit(error)
+ end;
+ (Recs) ->
+ [{Tab, Key, _}, _] = Recs,
+ SRecs = lists:sort(Recs),
+ R_Recs = lists:sort(catch mnesia:dirty_read({Tab, Key})),
+ case R_Recs of
+ SRecs -> ok;
+ Else ->
+ mnesia_test_lib:error("Not matching on Node ~p ~n"
+ " Expected ~p~n Actual ~p~n",
+ [node(), SRecs, Else],
+ ?MODULE, Line),
+ exit(error)
+ end
+ end,
+ lists:foreach(Verify, Records).
+
+restore(Config, Op) ->
+ [Node1, Node2, _Node3] = Nodes = ?acquire_nodes(3, Config),
+
+ Tab1 = ram_snmp,
+ Def1 = [{snmp, [{key, integer}]}, {ram_copies, [Node1]}],
+ Tab2 = disc_index,
+ Def2 = [{index, [val]}, {disc_copies, [Node1, Node2]}],
+ Tab3 = dionly_bag,
+ Def3 = [{type, bag}, {disc_only_copies, Nodes}],
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+
+ File1 = "restore1.BUP",
+ File2 = "restore2.BUP",
+
+ Restore = fun(O, A) ->
+ case mnesia:restore(O, A) of
+ {atomic, Tabs} when is_list(Tabs) -> {atomic, lists:sort(Tabs)};
+ Other -> Other
+ end
+ end,
+ Tabs = lists:sort([Tab1, Tab2, Tab3]),
+
+ [mnesia:dirty_write({Tab1, N, N+42}) || N <- lists:seq(1, 10)],
+ [mnesia:dirty_write({Tab2, N, N+43}) || N <- lists:seq(1, 10)],
+ [mnesia:dirty_write({Tab3, N, N+44}) || N <- lists:seq(1, 10)],
+
+ Res1 = [{Tab1, N, N+42} || N <- lists:seq(1, 10)],
+ Res2 = [{Tab2, N, N+43} || N <- lists:seq(1, 10)],
+ Res3 = [{Tab3, N, N+44} || N <- lists:seq(1, 10)],
+
+ {ok, Name, _} = mnesia:activate_checkpoint([{min, Tabs}, {ram_overrides_dump, true}]),
+ file:delete(File1),
+
+ %% Test standard Restore on one table on one node
+ ?match(ok, mnesia:backup_checkpoint(Name, File1)),
+ ?match(ok, mnesia:deactivate_checkpoint(Name)),
+ ?match(ok, mnesia:backup(File2)),
+ [mnesia:dirty_write({Tab1, N, N+1}) || N <- lists:seq(1, 11)],
+ [mnesia:dirty_write({Tab2, N, N+1}) || N <- lists:seq(1, 11)],
+ [mnesia:dirty_write({Tab3, N, N+1}) || N <- lists:seq(1, 11)],
+ _Res11 = [{Tab1, N, N+1} || N <- lists:seq(1, 11)],
+ Res21 = [{Tab2, N, N+1} || N <- lists:seq(1, 11)],
+ Res31 = [[{Tab3, N, N+1}, {Tab3, N, N+44}] || N <- lists:seq(1, 10)],
+
+ ?match({atomic, [Tab1]}, Restore(File1, [{Op, [Tab1]},
+ {skip_tables, Tabs -- [Tab1]}])),
+ case Op of
+ keep_tables ->
+ ?match([{Tab1, 11, 12}], mnesia:dirty_read({Tab1, 11}));
+ clear_tables ->
+ ?match([], mnesia:dirty_read({Tab1, 11}));
+ recreate_tables ->
+ ?match([], mnesia:dirty_read({Tab1, 11}))
+ end,
+ [rpc:call(Node, ?MODULE, check_tab, [Res1, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res21, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res31, ?LINE]) || Node <- Nodes],
+
+ %% Restore all tables on it's nodes
+ mnesia_schema:clear_table(Tab1),
+ mnesia_schema:clear_table(Tab2),
+ mnesia_schema:clear_table(Tab3),
+ [mnesia:dirty_write({Tab1, N, N+1}) || N <- lists:seq(1, 11)],
+ [mnesia:dirty_write({Tab2, N, N+1}) || N <- lists:seq(1, 11)],
+ [mnesia:dirty_write({Tab3, N, N+1}) || N <- lists:seq(1, 11)],
+
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab2, Node1)),
+
+ ?match({ok, Node1}, mnesia:subscribe({table, Tab1})),
+
+ ?match({atomic, Tabs}, Restore(File1, [{default_op, Op},
+ {module, mnesia_backup}])),
+ case Op of
+ clear_tables ->
+ ?match_receive({mnesia_table_event, {delete, {schema, Tab1}, _}}),
+ ?match_receive({mnesia_table_event, {write, {schema, Tab1, _}, _}}),
+ check_subscr(Tab1),
+ [rpc:call(Node, ?MODULE, check_tab, [Res1, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res2, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res3, ?LINE]) || Node <- Nodes],
+ ?match([], mnesia:dirty_read({Tab1, 11})),
+ ?match([], mnesia:dirty_read({Tab2, 11})),
+ ?match([], mnesia:dirty_read({Tab3, 11})),
+ %% Check Index
+ ?match([{Tab2, 10, 53}], mnesia:dirty_index_read(Tab2, 53, val)),
+ ?match([], mnesia:dirty_index_read(Tab2, 11, val)),
+ %% Check Snmp
+ ?match({ok, [1]}, mnesia:snmp_get_next_index(Tab1,[])),
+ ?match({ok, {Tab1, 1, 43}}, mnesia:snmp_get_row(Tab1, [1])),
+ ?match(undefined, mnesia:snmp_get_row(Tab1, [11])),
+ %% Check schema info
+ ?match([Node2], mnesia:table_info(Tab2, where_to_write));
+ keep_tables ->
+ check_subscr(Tab1),
+ [rpc:call(Node, ?MODULE, check_tab, [Res1, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res2, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res31, ?LINE]) || Node <- Nodes],
+ ?match([{Tab1, 11, 12}], mnesia:dirty_read({Tab1, 11})),
+ ?match([{Tab2, 11, 12}], mnesia:dirty_read({Tab2, 11})),
+ ?match([{Tab3, 11, 12}], mnesia:dirty_read({Tab3, 11})),
+ ?match([{Tab2, 10, 53}], mnesia:dirty_index_read(Tab2, 53, val)),
+ %% Check Index
+ ?match([], mnesia:dirty_index_read(Tab2, 11, val)),
+ ?match({ok, [1]}, mnesia:snmp_get_next_index(Tab1,[])),
+ %% Check Snmp
+ ?match({ok, {Tab1, 1, 43}}, mnesia:snmp_get_row(Tab1, [1])),
+ ?match({ok, {Tab1, 11, 12}}, mnesia:snmp_get_row(Tab1, [11])),
+ %% Check schema info
+ ?match([Node2], mnesia:table_info(Tab2, where_to_write));
+ recreate_tables ->
+ check_subscr(Tab1, 0),
+ [rpc:call(Node, ?MODULE, check_tab, [Res1, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res2, ?LINE]) || Node <- Nodes],
+ [rpc:call(Node, ?MODULE, check_tab, [Res3, ?LINE]) || Node <- Nodes],
+ ?match([], mnesia:dirty_read({Tab1, 11})),
+ ?match([], mnesia:dirty_read({Tab2, 11})),
+ ?match([], mnesia:dirty_read({Tab3, 11})),
+ %% Check Index
+ ?match([{Tab2, 10, 53}], mnesia:dirty_index_read(Tab2, 53, val)),
+ ?match([], mnesia:dirty_index_read(Tab2, 11, val)),
+ %% Check Snmp
+ ?match({ok, [1]}, mnesia:snmp_get_next_index(Tab1,[])),
+ ?match({ok, {Tab1, 1, 43}}, mnesia:snmp_get_row(Tab1, [1])),
+ ?match(undefined, mnesia:snmp_get_row(Tab1, [11])),
+ %% Check schema info
+ Ns = lists:sort([Node1, Node2]),
+ ?match(Ns, lists:sort(mnesia:table_info(Tab2, where_to_write)))
+ end,
+ ?match(ok, file:delete(File1)),
+ ?match(ok, file:delete(File2)),
+ ?verify_mnesia(Nodes, []).
+
+
+check_subscr(Tab) ->
+ check_subscr(Tab, 10).
+
+check_subscr(_Tab, 0) ->
+ receive
+ Msg ->
+ ?error("Too many msgs ~p~n", [Msg])
+ after 500 ->
+ ok
+ end;
+check_subscr(Tab, N) ->
+ V = N +42,
+ receive
+ {mnesia_table_event, {write, {Tab, N, V}, _}} ->
+ check_subscr(Tab, N-1)
+ after 500 ->
+ ?error("Missing ~p~n", [{Tab, N, V}])
+ end.
+
+restore_clear_ram(suite) -> [];
+restore_clear_ram(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, [{diskless, true}|Config]),
+
+ ?match({atomic, ok}, mnesia:create_table(a, [{ram_copies, Nodes}])),
+
+ Write = fun(What) ->
+ mnesia:write({a,1,What}),
+ mnesia:write({a,2,What}),
+ mnesia:write({a,3,What})
+ end,
+ Bup = "restore_clear_ram.BUP",
+
+ ?match({atomic, ok}, mnesia:transaction(Write, [initial])),
+ ?match({ok, _, _}, mnesia:activate_checkpoint([{name,test},
+ {min, [schema, a]},
+ {ram_overrides_dump, true}])),
+ ?match(ok, mnesia:backup_checkpoint(test, Bup)),
+
+ ?match({atomic, ok}, mnesia:transaction(Write, [data])),
+ ?match({atomic, [a]}, mnesia:restore(Bup, [{clear_tables,[a]},{default_op,skip_tables}])),
+
+ restore_clear_ram_loop(100, Nodes, Bup),
+
+ ok.
+
+restore_clear_ram_loop(N, Nodes = [N1,N2,N3], Bup) when N > 0 ->
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ ?match({_, []}, rpc:multicall([N1,N2], mnesia, start, [[{extra_db_nodes, Nodes}]])),
+ Key = rpc:async_call(N3, mnesia, start, [[{extra_db_nodes, Nodes}]]),
+ ?match({atomic, ok}, mnesia:create_table(a, [{ram_copies, Nodes}])),
+ ?match({atomic, [a]}, mnesia:restore(Bup, [{clear_tables,[a]},{default_op,skip_tables}])),
+ ?match(ok, rpc:yield(Key)),
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [[a], 3000])),
+ case rpc:multicall(Nodes, mnesia, table_info, [a,size]) of
+ {[3,3,3], []} ->
+ restore_clear_ram_loop(N-1, Nodes, Bup);
+ Error ->
+ ?match(3, Error)
+ end;
+restore_clear_ram_loop(_,_,_) ->
+ ok.
+
+traverse_backup(doc) ->
+ ["Testing the traverse_backup interface, the resulting file is not tested though",
+ "See install_fallback for result using the output file from traverse_backup",
+ "A side effect is that the backup file contents are tested"];
+traverse_backup(suite) -> [];
+traverse_backup(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = backup_tab,
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 4, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 5, test_nok})),
+ File = "_treverse_backup.BUP",
+ File2 = "traverse_backup2.BUP",
+ File3 = "traverse_backup3.BUP",
+ ?match(ok, mnesia:backup(File)),
+
+ Fun = fun({backup_tab, N, _}, Acc) -> {[{backup_tab, N, test_ok}], Acc+1};
+ (Other, Acc) -> {[Other], Acc}
+ end,
+
+ ?match({ok, 5}, mnesia:traverse_backup(File, read_only, Fun, 0)),
+ ?match(ok, file:delete(read_only)),
+
+ ?match({ok, 5}, mnesia:traverse_backup(File, mnesia_backup,
+ dummy, read_only, Fun, 0)),
+
+ ?match({ok, 5}, mnesia:traverse_backup(File, File2, Fun, 0)),
+ ?match({ok, 5}, mnesia:traverse_backup(File2, mnesia_backup,
+ File3, mnesia_backup, Fun, 0)),
+
+ BadFun = fun({bad_tab, _N, asd}, Acc) -> {{error, error}, Acc} end,
+ ?match({error, _}, mnesia:traverse_backup(File, read_only, BadFun, 0)),
+ ?match({error, _}, file:delete(read_only)),
+ ?match(ok, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?match(ok, file:delete(File3)),
+ ?verify_mnesia(Nodes, []).
+
+
+install_fallback(doc) ->
+ ["This tests the install_fallback intf.",
+ "It also verifies that the output from backup_checkpoint and traverse_backup",
+ "is valid"];
+install_fallback(suite) -> [];
+install_fallback(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = fallbacks_test,
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 4, test_nok})),
+ ?match(ok, mnesia:dirty_write({Tab, 5, test_nok})),
+
+ Tab2 = fallbacks_test2,
+ Def2 = [{disc_copies, [node()]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ Tab3 = fallbacks_test3,
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def2)),
+ Fun2 = fun(Key) ->
+ Rec = {Tab2, Key, test_ok},
+ mnesia:dirty_write(Rec),
+ [Rec]
+ end,
+ TabSize3 = 1000,
+ OldRecs2 = [Fun2(K) || K <- lists:seq(1, TabSize3)],
+
+ Spec =[{name, cp_name}, {max, mnesia:system_info(tables)}],
+ ?match({ok, _Name, Nodes}, mnesia:activate_checkpoint(Spec)),
+ ?match(ok, mnesia:dirty_write({Tab, 6, test_nok})),
+ [mnesia:dirty_write({Tab2, K, test_nok}) || K <- lists:seq(1, TabSize3 + 10)],
+ File = "install_fallback.BUP",
+ File2 = "install_fallback2.BUP",
+ File3 = "install_fallback3.BUP",
+ ?match(ok, mnesia:backup_checkpoint(cp_name, File)),
+
+ Fun = fun({T, N, _}, Acc) when T == Tab ->
+ case N rem 2 of
+ 0 ->
+ io:format("write ~p -> ~p~n", [N, T]),
+ {[{T, N, test_ok}], Acc + 1};
+ 1 ->
+ io:format("write ~p -> ~p~n", [N, Tab3]),
+ {[{Tab3, N, test_ok}], Acc + 1}
+ end;
+ ({T, N}, Acc) when T == Tab ->
+ case N rem 2 of
+ 0 ->
+ io:format("delete ~p -> ~p~n", [N, T]),
+ {[{T, N}], Acc + 1};
+ 1 ->
+ io:format("delete ~p -> ~p~n", [N, Tab3]),
+ {[{Tab3, N}], Acc + 1}
+ end;
+ (Other, Acc) ->
+ {[Other], Acc}
+ end,
+ ?match({ok, _}, mnesia:traverse_backup(File, File2, Fun, 0)),
+ ?match(ok, mnesia:install_fallback(File2)),
+
+ mnesia_test_lib:kill_mnesia([Node1, Node2]),
+ timer:sleep(timer:seconds(1)), % Let it die!
+
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2], [Tab, Tab2, Tab3])),
+
+ % Verify
+ ?match([], mnesia:dirty_read({Tab, 1})),
+ ?match([{Tab3, 1, test_ok}], mnesia:dirty_read({Tab3, 1})),
+ ?match([{Tab, 2, test_ok}], mnesia:dirty_read({Tab, 2})),
+ ?match([], mnesia:dirty_read({Tab3, 2})),
+ ?match([], mnesia:dirty_read({Tab, 3})),
+ ?match([{Tab3, 3, test_ok}], mnesia:dirty_read({Tab3, 3})),
+ ?match([{Tab, 4, test_ok}], mnesia:dirty_read({Tab, 4})),
+ ?match([], mnesia:dirty_read({Tab3, 4})),
+ ?match([], mnesia:dirty_read({Tab, 5})),
+ ?match([{Tab3, 5, test_ok}], mnesia:dirty_read({Tab3, 5})),
+ ?match([], mnesia:dirty_read({Tab, 6})),
+ ?match([], mnesia:dirty_read({Tab3, 6})),
+ ?match([], [mnesia:dirty_read({Tab2, K}) || K <- lists:seq(1, TabSize3)] -- OldRecs2),
+ ?match(TabSize3, mnesia:table_info(Tab2, size)),
+
+ % Check the interface
+ file:delete(File3),
+ ?match({error, _}, mnesia:install_fallback(File3)),
+ ?match({error, _}, mnesia:install_fallback(File2, mnesia_badmod)),
+ ?match(ok, mnesia:install_fallback(File2, mnesia_backup)),
+ ?match(ok, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?match({error, _}, file:delete(File3)),
+ ?verify_mnesia(Nodes, []).
+
+uninstall_fallback(suite) -> [];
+uninstall_fallback(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = uinst_fallbacks_test,
+ File = "uinst_fallback.BUP",
+ File2 = "uinst_fallback2.BUP",
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ ?match(ok, mnesia:backup(File)),
+ Fun = fun({T, N, _}, Acc) when T == Tab ->
+ {[{T, N, test_nok}], Acc+1};
+ (Other, Acc) -> {[Other], Acc}
+ end,
+ ?match({ok, _}, mnesia:traverse_backup(File, File2, Fun, 0)),
+ ?match({error, enoent}, mnesia:uninstall_fallback()),
+ ?match(ok, mnesia:install_fallback(File2)),
+ ?match(ok, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?match(ok, mnesia:uninstall_fallback()),
+
+ mnesia_test_lib:kill_mnesia([Node1, Node2]),
+ timer:sleep(timer:seconds(1)), % Let it die!
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2], [Tab])),
+ ?match([{Tab, 1, test_ok}], mnesia:dirty_read({Tab, 1})),
+ ?verify_mnesia(Nodes, []).
+
+local_fallback(suite) -> [];
+local_fallback(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = local_fallback,
+ File = "local_fallback.BUP",
+ Def = [{disc_copies, Nodes}],
+ Key = foo,
+ Pre = {Tab, Key, pre},
+ Post = {Tab, Key, post},
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write(Pre)),
+ ?match(ok, mnesia:backup(File)),
+ ?match(ok, mnesia:dirty_write(Post)),
+ Local = [{scope, local}],
+ ?match({error, enoent}, mnesia:uninstall_fallback(Local)),
+ ?match(ok, mnesia:install_fallback(File, Local)),
+ ?match(true, mnesia:system_info(fallback_activated)),
+ ?match(ok, mnesia:uninstall_fallback(Local)),
+ ?match(false, mnesia:system_info(fallback_activated)),
+ ?match(ok, mnesia:install_fallback(File, Local)),
+ ?match(true, mnesia:system_info(fallback_activated)),
+
+ ?match(false, rpc:call(Node2, mnesia, system_info , [fallback_activated])),
+ ?match(ok, rpc:call(Node2, mnesia, install_fallback , [File, Local])),
+ ?match([Post], mnesia:dirty_read({Tab, Key})),
+ ?match([Post], rpc:call(Node2, mnesia, dirty_read, [{Tab, Key}])),
+
+ ?match([], mnesia_test_lib:kill_mnesia(Nodes)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab])),
+ ?match([Pre], mnesia:dirty_read({Tab, Key})),
+ ?match([Pre], rpc:call(Node2, mnesia, dirty_read, [{Tab, Key}])),
+ Dir = rpc:call(Node2, mnesia, system_info , [directory]),
+
+ ?match(ok, mnesia:dirty_write(Post)),
+ ?match([Post], mnesia:dirty_read({Tab, Key})),
+ ?match([], mnesia_test_lib:kill_mnesia([Node2])),
+ ?match(ok, mnesia:install_fallback(File, Local ++ [{mnesia_dir, Dir}])),
+ ?match([], mnesia_test_lib:kill_mnesia([Node1])),
+
+ ?match([], mnesia_test_lib:start_mnesia([Node2], [])),
+ ?match(yes, rpc:call(Node2, mnesia, force_load_table, [Tab])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab])),
+ ?match([Pre], mnesia:dirty_read({Tab, Key})),
+
+ ?match(ok, file:delete(File)),
+ ?verify_mnesia(Nodes, []).
+
+selective_backup_checkpoint(doc) ->
+ ["Perform a selective backup of a checkpoint"];
+selective_backup_checkpoint(suite) -> [];
+selective_backup_checkpoint(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = sel_backup,
+ OmitTab = sel_backup_omit,
+ CpName = sel_cp,
+ Def = [{disc_copies, [Node1, Node2]}],
+ File = "selective_backup_checkpoint.BUP",
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match({atomic, ok}, mnesia:create_table(OmitTab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ ?match(ok, mnesia:dirty_write({OmitTab, 1, test_ok})),
+ CpSpec = [{name, CpName}, {max, mnesia:system_info(tables)}],
+ ?match({ok, CpName, _Ns}, mnesia:activate_checkpoint(CpSpec)),
+
+ BupSpec = [{tables, [Tab]}],
+ ?match(ok, mnesia:backup_checkpoint(CpName, File, BupSpec)),
+
+ ?match([schema, sel_backup], bup_tables(File, mnesia_backup)),
+ ?match(ok, file:delete(File)),
+
+ BupSpec2 = [{tables, [Tab, OmitTab]}],
+ ?match(ok, mnesia:backup_checkpoint(CpName, File, BupSpec2)),
+
+ ?match([schema, sel_backup, sel_backup_omit],
+ bup_tables(File, mnesia_backup)),
+ ?match(ok, file:delete(File)),
+ ?verify_mnesia(Nodes, []).
+
+bup_tables(File, Mod) ->
+ Fun = fun(Rec, Tabs) ->
+ Tab = element(1, Rec),
+ Tabs2 = [Tab | lists:delete(Tab, Tabs)],
+ {[Rec], Tabs2}
+ end,
+ case mnesia:traverse_backup(File, Mod, dummy, read_only, Fun, []) of
+ {ok, Tabs} ->
+ lists:sort(Tabs);
+ {error, Reason} ->
+ exit(Reason)
+ end.
+
+incremental_backup_checkpoint(doc) ->
+ ["Perform a incremental backup of a checkpoint"];
+incremental_backup_checkpoint(suite) -> [];
+incremental_backup_checkpoint(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = incr_backup,
+ Def = [{disc_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ OldRecs = [{Tab, K, -K} || K <- lists:seq(1, 5)],
+ ?match([ok|_], [mnesia:dirty_write(R) || R <- OldRecs]),
+ OldCpName = old_cp,
+ OldCpSpec = [{name, OldCpName}, {min, [Tab]}],
+ ?match({ok, OldCpName, _Ns}, mnesia:activate_checkpoint(OldCpSpec)),
+
+ BupSpec = [{tables, [Tab]}],
+ OldFile = "old_full_backup.BUP",
+ ?match(ok, mnesia:backup_checkpoint(OldCpName, OldFile, BupSpec)),
+ ?match(OldRecs, bup_records(OldFile, mnesia_backup)),
+ ?match(ok, mnesia:dirty_delete({Tab, 1})),
+ ?match(ok, mnesia:dirty_write({Tab, 2, 2})),
+ ?match(ok, mnesia:dirty_write({Tab, 3, -3})),
+
+ NewCpName = new_cp,
+ NewCpSpec = [{name, NewCpName}, {min, [Tab]}],
+ ?match({ok, NewCpName, _Ns}, mnesia:activate_checkpoint(NewCpSpec)),
+ ?match(ok, mnesia:dirty_write({Tab, 4, 4})),
+
+ NewFile = "new_full_backup.BUP",
+ ?match(ok, mnesia:backup_checkpoint(NewCpName, NewFile, BupSpec)),
+ NewRecs = [{Tab, 2, 2}, {Tab, 3, -3},
+ {Tab, 4, 4}, {Tab, 4}, {Tab, 4, -4}, {Tab, 5, -5}],
+ ?match(NewRecs, bup_records(NewFile, mnesia_backup)),
+
+ DiffFile = "diff_backup.BUP",
+ DiffBupSpec = [{tables, [Tab]}, {incremental, OldCpName}],
+ ?match(ok, mnesia:backup_checkpoint(NewCpName, DiffFile, DiffBupSpec)),
+ DiffRecs = [{Tab, 1}, {Tab, 2}, {Tab, 2, 2}, {Tab, 3}, {Tab, 3, -3},
+ {Tab, 4}, {Tab, 4, 4}, {Tab, 4}, {Tab, 4, -4}],
+ ?match(DiffRecs, bup_records(DiffFile, mnesia_backup)),
+
+ ?match(ok, mnesia:deactivate_checkpoint(OldCpName)),
+ ?match(ok, mnesia:deactivate_checkpoint(NewCpName)),
+ ?match(ok, file:delete(OldFile)),
+ ?match(ok, file:delete(NewFile)),
+ ?match(ok, file:delete(DiffFile)),
+
+ ?verify_mnesia(Nodes, []).
+
+bup_records(File, Mod) ->
+ Fun = fun(Rec, Recs) when element(1, Rec) == schema ->
+ {[Rec], Recs};
+ (Rec, Recs) ->
+ {[Rec], [Rec | Recs]}
+ end,
+ case mnesia:traverse_backup(File, Mod, dummy, read_only, Fun, []) of
+ {ok, Recs} ->
+ lists:keysort(1, lists:keysort(2, lists:reverse(Recs)));
+ {error, Reason} ->
+ exit(Reason)
+ end.
+
+sops_with_checkpoint(doc) ->
+ ["Test schema operations during a checkpoint"];
+sops_with_checkpoint(suite) -> [];
+sops_with_checkpoint(Config) when is_list(Config) ->
+ Ns = ?acquire_nodes(2, Config),
+
+ ?match({ok, cp1, Ns}, mnesia:activate_checkpoint([{name, cp1},{max,mnesia:system_info(tables)}])),
+ Tab = tab,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{disc_copies,Ns}])),
+ OldRecs = [{Tab, K, -K} || K <- lists:seq(1, 5)],
+ [mnesia:dirty_write(R) || R <- OldRecs],
+
+ ?match({ok, cp2, Ns}, mnesia:activate_checkpoint([{name, cp2},{max,mnesia:system_info(tables)}])),
+ File1 = "cp1_delete_me.BUP",
+ ?match(ok, mnesia:dirty_write({Tab,6,-6})),
+ ?match(ok, mnesia:backup_checkpoint(cp1, File1)),
+ ?match(ok, mnesia:dirty_write({Tab,7,-7})),
+ File2 = "cp2_delete_me.BUP",
+ ?match(ok, mnesia:backup_checkpoint(cp2, File2)),
+
+ ?match(ok, mnesia:deactivate_checkpoint(cp1)),
+ ?match(ok, mnesia:backup_checkpoint(cp2, File1)),
+ ?match(ok, mnesia:dirty_write({Tab,8,-8})),
+
+ ?match({atomic,ok}, mnesia:delete_table(Tab)),
+ ?match({error,_}, mnesia:backup_checkpoint(cp2, File2)),
+ ?match({'EXIT',_}, mnesia:dirty_write({Tab,9,-9})),
+
+ ?match({atomic,_}, mnesia:restore(File1, [{default_op, recreate_tables}])),
+ Test = fun(N) when N > 5 -> ?error("To many records in backup ~p ~n", [N]);
+ (N) -> case mnesia:dirty_read(Tab,N) of
+ [{Tab,N,B}] when -B =:= N -> ok;
+ Other -> ?error("Not matching ~p ~p~n", [N,Other])
+ end
+ end,
+ [Test(N) || N <- mnesia:dirty_all_keys(Tab)],
+ ?match({aborted,enoent}, mnesia:restore(File2, [{default_op, recreate_tables}])),
+
+ file:delete(File1), file:delete(File2),
+
+ ?verify_mnesia(Ns, []).
diff --git a/lib/mnesia/test/mnesia_evil_coverage_test.erl b/lib/mnesia/test/mnesia_evil_coverage_test.erl
new file mode 100644
index 0000000000..06674d9eef
--- /dev/null
+++ b/lib/mnesia/test/mnesia_evil_coverage_test.erl
@@ -0,0 +1,2167 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_evil_coverage_test).
+-author('[email protected]').
+-include("mnesia_test_lib.hrl").
+
+-compile([export_all]).
+
+-define(cleanup(N, Config),
+ mnesia_test_lib:prepare_test_case([{reload_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Evil usage of the API.",
+ "Invoke all functions in the API and try to cover all legal uses",
+ "cases as well the illegal dito. This is a complement to the",
+ "other more explicit test cases."];
+all(suite) ->
+ [
+ system_info,
+ table_info,
+ error_description,
+ db_node_lifecycle,
+ evil_delete_db_node,
+ start_and_stop,
+ checkpoint,
+ table_lifecycle,
+ add_copy_conflict,
+ add_copy_when_going_down,
+ replica_management,
+ schema_availability,
+ local_content,
+ table_access_modifications,
+ replica_location,
+ table_sync,
+ user_properties,
+ unsupp_user_props,
+ record_name,
+ snmp_access,
+ iteration,
+ debug_support,
+ sorted_ets,
+ {mnesia_dirty_access_test, all},
+ {mnesia_trans_access_test, all},
+ {mnesia_evil_backup, all}
+ ].
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Get meta info about Mnesia
+
+system_info(suite) -> [];
+system_info(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(all, Config),
+ Ns = ?sort(Nodes),
+ ?match(yes, mnesia:system_info(is_running)),
+ ?match(Ns, ?sort(mnesia:system_info(db_nodes))),
+ ?match(Ns, ?sort(mnesia:system_info(running_db_nodes))),
+ ?match(A when is_atom(A), mnesia:system_info(debug)),
+ ?match(L when is_list(L), mnesia:system_info(directory)),
+ ?match(L when is_list(L), mnesia:system_info(log_version)),
+ ?match({_, _}, mnesia:system_info(schema_version)),
+ ?match(L when is_list(L), mnesia:system_info(tables)),
+ ?match(L when is_list(L), mnesia:system_info(local_tables)),
+ ?match(L when is_list(L), mnesia:system_info(held_locks)),
+ ?match(L when is_list(L), mnesia:system_info(lock_queue)),
+ ?match(L when is_list(L), mnesia:system_info(transactions)),
+ ?match(I when is_integer(I), mnesia:system_info(transaction_failures)),
+ ?match(I when is_integer(I), mnesia:system_info(transaction_commits)),
+ ?match(I when is_integer(I), mnesia:system_info(transaction_restarts)),
+ ?match(L when is_list(L), mnesia:system_info(checkpoints)),
+ ?match(A when is_atom(A), mnesia:system_info(backup_module)),
+ ?match(true, mnesia:system_info(auto_repair)),
+ ?match({_, _}, mnesia:system_info(dump_log_interval)),
+ ?match(A when is_atom(A), mnesia:system_info(dump_log_update_in_place)),
+ ?match(I when is_integer(I), mnesia:system_info(transaction_log_writes)),
+ ?match(I when is_integer(I), mnesia:system_info(send_compressed)),
+ ?match(L when is_list(L), mnesia:system_info(all)),
+ ?match({'EXIT', {aborted, Reason }} when element(1, Reason) == badarg
+ , mnesia:system_info(ali_baba)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Get meta info about table
+
+table_info(suite) -> [];
+table_info(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+
+ Tab = table_info,
+ Type = bag,
+ ValPos = 3,
+ Attrs = [k, v],
+ Arity = length(Attrs) +1,
+
+ Schema =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{type, Type}, {attributes, Attrs}, {index, [ValPos]},
+ {ram_copies, Nodes}];
+ false ->
+ [{type, Type}, {attributes, Attrs}, {index, [ValPos]},
+ {disc_only_copies, [Node1]}, {ram_copies, [Node2]},
+ {disc_copies, [Node3]}]
+ end,
+ ?match({atomic, ok}, mnesia:create_table(Tab, Schema)),
+
+ Size = 10,
+ Keys = lists:seq(1, Size),
+ Records = [{Tab, A, 7} || A <- Keys],
+ lists:foreach(fun(Rec) -> ?match(ok, mnesia:dirty_write(Rec)) end, Records),
+ ?match(Mem when is_integer(Mem), mnesia:table_info(Tab, memory)),
+ ?match(Size, mnesia:table_info(Tab, size)),
+ ?match(Type, mnesia:table_info(Tab, type)),
+
+ case mnesia_test_lib:diskless(Config) of
+ true ->
+ ?match(Nodes, mnesia:table_info(Tab, ram_copies));
+ false ->
+ ?match([Node3], mnesia:table_info(Tab, mnesia_test_lib:storage_type(disc_copies, Config))),
+ ?match([Node2], mnesia:table_info(Tab, ram_copies)),
+ ?match([Node1], mnesia:table_info(Tab, mnesia_test_lib:storage_type(disc_only_copies, Config)))
+ end,
+ Read = [Node1, Node2, Node3],
+ ?match(true, lists:member(mnesia:table_info(Tab, where_to_read), Read)),
+ Write = ?sort([Node1, Node2, Node3]),
+ ?match(Write, ?sort(mnesia:table_info(Tab, where_to_write))),
+ ?match([ValPos], mnesia:table_info(Tab, index)),
+ ?match(Arity, mnesia:table_info(Tab, arity)),
+ ?match(Attrs, mnesia:table_info(Tab, attributes)),
+ ?match({Tab, '_', '_'}, mnesia:table_info(Tab, wild_pattern)),
+ ?match({atomic, Attrs}, mnesia:transaction(fun() ->
+ mnesia:table_info(Tab, attributes) end)),
+
+ ?match(L when is_list(L), mnesia:table_info(Tab, all)),
+
+ %% Table info when table not loaded
+ ?match({atomic, ok},
+ mnesia:create_table(tab_info, Schema)),
+ ?match(stopped, mnesia:stop()),
+ ?match(stopped, rpc:call(Node2, mnesia, stop, [])),
+ ?match(ok, mnesia:start()),
+ ?match(ok, mnesia:wait_for_tables([tab_info], 5000)),
+ ?match(0, mnesia:table_info(tab_info, size)),
+ ?verify_mnesia([Node1, Node3], [Node2]).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Check the error descriptions
+
+error_description(suite) -> [];
+error_description(Config) when is_list(Config) ->
+ ?acquire_nodes(1, Config),
+ Errors = [nested_transaction, badarg, no_transaction, combine_error,
+ bad_index, already_exists, index_exists, no_exists, system_limit,
+ mnesia_down, not_a_db_node, bad_type, node_not_running,
+ truncated_binary_file, active, illegal
+ ],
+ ?match(X when is_atom(X), mnesia:error_description({error, bad_error_msg})),
+ ?match(X when is_tuple(X), mnesia:error_description({'EXIT', pid, bad})),
+ %% This is real error msg
+ ?match(X when is_tuple(X), mnesia:error_description(
+ {error,
+ {"Cannot prepare checkpoint (bad reply)",
+ {{877,957351,758147},a@legolas},
+ {error,{node_not_running,a1@legolas}}}})),
+ check_errors(error, Errors),
+ check_errors(aborted, Errors),
+ check_errors('EXIT', Errors).
+
+check_errors(_Err, []) -> ok;
+check_errors(Err, [Desc|R]) ->
+ ?match(X when is_list(X), mnesia:error_description({Err, Desc})),
+ check_errors(Err, R).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Add and drop db nodes
+
+db_node_lifecycle(suite) -> [];
+db_node_lifecycle(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = AllNodes = ?acquire_nodes(3, Config),
+ Tab = db_node_lifecycle,
+
+ Who = fun(T) ->
+ L1 = mnesia:table_info(T, ram_copies),
+ L2 = mnesia:table_info(T, disc_copies),
+ L3 = mnesia:table_info(T, disc_only_copies),
+ L1 ++ L2 ++ L3
+ end,
+
+ SNs = ?sort(AllNodes),
+
+ Schema = [{name, Tab}, {ram_copies, [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ ?match([], mnesia_test_lib:stop_mnesia(AllNodes)),
+ ?match(ok, mnesia:delete_schema(AllNodes)),
+ ?match({error, _}, mnesia:create_schema(foo)),
+ ?match({error, _}, mnesia:create_schema([foo])),
+ ?match({error, _}, mnesia:create_schema([foo@bar])),
+ ?match(ok, mnesia:start()),
+ ?match(false, mnesia:system_info(use_dir)),
+ ?match({atomic, ok}, mnesia:create_table(Tab, [])),
+ ?match({aborted, {has_no_disc, Node1}}, mnesia:dump_tables([Tab])),
+ ?match({aborted, {has_no_disc, Node1}}, mnesia:change_table_copy_type(Tab, node(), disc_copies)),
+ ?match({aborted, {has_no_disc, Node1}}, mnesia:change_table_copy_type(Tab, node(), disc_only_copies)),
+
+ ?match(stopped, mnesia:stop()),
+
+ ?match(ok, mnesia:create_schema(AllNodes)),
+ ?match([], mnesia_test_lib:start_mnesia(AllNodes)),
+
+ ?match([SNs, SNs, SNs],
+ lists:map({lists, sort},
+ element(1, rpc:multicall(AllNodes, mnesia, table_info,
+ [schema, disc_copies])))),
+
+ ?match({aborted, {already_exists, schema, Node2, _}},
+ mnesia:change_table_copy_type(schema, Node2, disc_copies)),
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(schema, Node2, ram_copies)),
+ ?match({aborted, {already_exists, schema, Node2, _}},
+ mnesia:change_table_copy_type(schema, Node2, ram_copies)),
+
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(schema, Node2, disc_copies)),
+
+ ?match([SNs, SNs, SNs],
+ lists:map({lists, sort},
+ element(1, rpc:multicall(AllNodes, mnesia, table_info,
+ [schema, disc_copies])))),
+
+ %% Delete the DB
+
+ Tab2 = disk_tab,
+ Tab3 = not_local,
+ Tab4 = local,
+ Tab5 = remote,
+
+ Tabs = [Schema,
+ [{name, Tab2}, {disc_copies, AllNodes}],
+ [{name, Tab3}, {ram_copies, [Node2, Node3]}],
+ [{name, Tab4}, {disc_only_copies, [Node1]}],
+ [{name, Tab5}, {disc_only_copies, [Node2]}]],
+
+ [?match({atomic, ok}, mnesia:create_table(T)) || T <- Tabs ],
+
+ ?match({aborted, {active, _, Node2}},
+ mnesia:del_table_copy(schema, Node2)),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node1])),
+ ?match({aborted, {node_not_running, Node1}},
+ mnesia:del_table_copy(schema, Node2)),
+
+ ?match([], mnesia_test_lib:start_mnesia([Node1],[Tab2,Tab4])),
+ ?match([], mnesia_test_lib:stop_mnesia([Node2])),
+ ?match({atomic, ok},
+ mnesia:del_table_copy(schema, Node2)),
+
+ %% Check
+ RemNodes = AllNodes -- [Node2],
+
+ ?match(RemNodes, mnesia:system_info(db_nodes)),
+ ?match([Node1], Who(Tab)),
+ ?match(RemNodes, Who(Tab2)),
+ ?match([Node3], Who(Tab3)),
+ ?match([Node1], Who(Tab4)),
+ ?match({'EXIT', {aborted, {no_exists, _, _}}}, Who(Tab5)),
+
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(Tab2, Node3, ram_copies)),
+
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(schema, Node3, ram_copies)),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node3])),
+ ?match({atomic, ok},
+ mnesia:del_table_copy(schema, Node3)),
+ ?match([Node1], mnesia:system_info(db_nodes)),
+ ?match([Node1], Who(Tab)),
+ ?match([Node1], Who(Tab2)),
+ ?match({'EXIT', {aborted, {no_exists, _, _}}}, Who(Tab3)),
+ ?match([Node1], Who(Tab4)),
+ ?match({'EXIT', {aborted, {no_exists, _, _}}}, Who(Tab5)),
+
+ ?verify_mnesia([Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Drop a db node when several disk resident nodes are down
+
+evil_delete_db_node(suite) -> [];
+evil_delete_db_node(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = AllNodes = ?acquire_nodes(3, Config),
+ Tab = evil_delete_db_node,
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{disc_copies, AllNodes}])),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node2, Node3])),
+
+ ?match({atomic, ok}, mnesia:del_table_copy(schema, Node2)),
+
+ RemNodes = AllNodes -- [Node2],
+
+ ?match(RemNodes, mnesia:system_info(db_nodes)),
+ ?match(RemNodes, mnesia:table_info(Tab, disc_copies)),
+
+ ?verify_mnesia([Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Start and stop the system
+
+start_and_stop(suite) -> [];
+start_and_stop(Config) when is_list(Config) ->
+ [Node1 | _] = Nodes = ?acquire_nodes(all, Config),
+
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ ?match(ok, rpc:call(Node1, mnesia, start, [])),
+ ?match(ok, rpc:call(Node1, mnesia, start, [])),
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ ?verify_mnesia(Nodes -- [Node1], [Node1]),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes)),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Checkpoints and backup management
+
+checkpoint(suite) -> [];
+checkpoint(Config) when is_list(Config) ->
+ checkpoint(2, Config),
+ checkpoint(3, Config).
+
+checkpoint(NodeConfig, Config) ->
+ [Node1 | _] = TabNodes = ?acquire_nodes(NodeConfig, Config),
+ CreateTab = fun(Type, N, Ns) ->
+ Tab0 = lists:concat(["local_checkpoint_", Type, N]),
+ Tab = list_to_atom(Tab0),
+ catch mnesia:delete_table(Tab),
+ ?match({atomic, ok},
+ mnesia:create_table(Tab, [{Type, Ns}])),
+ Tab
+ end,
+ CreateTabs = fun(Type, Acc) ->
+ [CreateTab(Type, 1, [hd(TabNodes)]),
+ CreateTab(Type, 2, TabNodes),
+ CreateTab(Type, 3, [lists:last(TabNodes)])] ++
+ Acc
+ end,
+ Types = [ram_copies, disc_copies, disc_only_copies],
+ Tabs = lists:foldl(CreateTabs, [], Types),
+ Recs = ?sort([{T, N, N} || T <- Tabs, N <- lists:seq(1, 10)]),
+ lists:foreach(fun(R) -> ?match(ok, mnesia:dirty_write(R)) end, Recs),
+
+ CpName = a_checkpoint_name,
+ MinArgs = [{name, CpName}, {min, Tabs}, {allow_remote, false}],
+ ?match({error, _}, rpc:call(Node1, mnesia, activate_checkpoint, [MinArgs])),
+
+ MaxArgs = [{name, CpName}, {max, Tabs}, {allow_remote, true}],
+ ?match({ok, CpName, L} when is_list(L),
+ rpc:call(Node1, mnesia, activate_checkpoint, [MaxArgs])),
+ ?match(ok, rpc:call(Node1, mnesia, deactivate_checkpoint, [CpName])),
+
+ Args = [{name, CpName}, {min, Tabs}, {allow_remote, true}],
+ ?match({ok, CpName, L} when is_list(L),
+ rpc:call(Node1, mnesia, activate_checkpoint, [Args])),
+ Recs2 = ?sort([{T, K, 0} || {T, K, _} <- Recs]),
+ lists:foreach(fun(R) -> ?match(ok, mnesia:dirty_write(R)) end, Recs2),
+ ?match(ok, rpc:call(Node1, mnesia, deactivate_checkpoint, [CpName])),
+
+ ?match({error, Reason1 } when element(1, Reason1) == no_exists,
+ mnesia:deactivate_checkpoint(CpName)),
+ ?match({error, Reason2 } when element(1, Reason2) == badarg,
+ mnesia:activate_checkpoint(foo)),
+ ?match({error, Reason3 } when element(1, Reason3) == badarg,
+ mnesia:activate_checkpoint([{foo, foo}])),
+ ?match({error, Reason4 } when element(1, Reason4) == badarg,
+ mnesia:activate_checkpoint([{max, foo}])),
+ ?match({error, Reason5 } when element(1, Reason5) == badarg,
+ mnesia:activate_checkpoint([{min, foo}])),
+ ?match({error, _}, mnesia:activate_checkpoint([{min, [foo@bar]}])),
+ ?match({error, Reason6 } when element(1, Reason6) == badarg,
+ mnesia:activate_checkpoint([{allow_remote, foo}])),
+
+ Fun = fun(Tab) -> ?match({atomic, ok}, mnesia:delete_table(Tab)) end,
+ lists:foreach(Fun, Tabs),
+ ?verify_mnesia(TabNodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Create and delete tables
+
+%% Get meta info about table
+
+-define(vrl, mnesia_test_lib:verify_replica_location).
+
+replica_location(suite) -> [];
+replica_location(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab = replica_location,
+
+ %% Create three replicas
+ Schema = [{name, Tab}, {disc_only_copies, [Node1]},
+ {ram_copies, [Node2]}, {disc_copies, [Node3]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ?match([], ?vrl(Tab, [Node1], [Node2], [Node3], Nodes)),
+
+ %% Delete one replica
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node2)),
+ ?match([], ?vrl(Tab, [Node1], [], [Node3], Nodes)),
+
+ %% Move one replica
+ ?match({atomic, ok}, mnesia:move_table_copy(Tab, Node1, Node2)),
+ ?match([], ?vrl(Tab, [Node2], [], [Node3], Nodes)),
+
+ %% Change replica type
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node2, ram_copies)),
+ ?match([], ?vrl(Tab, [], [Node2], [Node3], Nodes)),
+
+ ?verify_mnesia(Nodes, []).
+
+table_lifecycle(suite) -> [];
+table_lifecycle(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ ?match({atomic, ok}, mnesia:create_table([{type, bag},
+ {ram_copies, [Node1]},
+ {attributes, [rajtan, tajtan]},
+ {name, order_of_args}])),
+ ?match([], mnesia:dirty_read({order_of_args, 4711})),
+ ?match({atomic, ok}, mnesia:create_table([{name, already_exists},
+ {ram_copies, [Node1]}])),
+ ?match({aborted, Reason23 } when element(1, Reason23) ==already_exists,
+ mnesia:create_table([{name, already_exists},
+ {ram_copies, [Node1]}])),
+ ?match({aborted, Reason21 } when element(1, Reason21) == bad_type,
+ mnesia:create_table([{name, bad_node}, {ram_copies, ["foo"]}])),
+ ?match({aborted, Reason2} when element(1, Reason2) == bad_type,
+ mnesia:create_table([{name, zero_arity}, {attributes, []}])),
+ ?match({aborted, Reason3} when element(1, Reason3) == badarg,
+ mnesia:create_table([])),
+ ?match({aborted, Reason4} when element(1, Reason4) == badarg,
+ mnesia:create_table(atom)),
+ ?match({aborted, Reason5} when element(1, Reason5) == badarg,
+ mnesia:create_table({cstruct, table_name_as_atom})),
+ ?match({aborted, Reason6 } when element(1, Reason6) == bad_type,
+ mnesia:create_table([{name, no_host}, {ram_copies, foo}])),
+ ?match({aborted, Reason7 } when element(1, Reason7) == bad_type,
+ mnesia:create_table([{name, no_host}, {disc_only_copies, foo}])),
+ ?match({aborted, Reason8} when element(1, Reason8) == bad_type,
+ mnesia:create_table([{name, no_host}, {disc_copies, foo}])),
+
+ CreateFun =
+ fun() -> ?match({aborted, nested_transaction},
+ mnesia:create_table([{name, nested_trans}])), ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(CreateFun)),
+ ?match({atomic, ok}, mnesia:create_table([{name, remote_tab},
+ {ram_copies, [Node2]}])),
+
+ ?match({atomic, ok}, mnesia:create_table([{name, a_brand_new_tab},
+ {ram_copies, [Node1]}])),
+ ?match([], mnesia:dirty_read({a_brand_new_tab, 4711})),
+ ?match({atomic, ok}, mnesia:delete_table(a_brand_new_tab)),
+ ?match({'EXIT', {aborted, Reason31}} when element(1, Reason31) == no_exists,
+ mnesia:dirty_read({a_brand_new_tab, 4711})),
+ ?match({aborted, Reason41} when element(1, Reason41) == no_exists,
+ mnesia:delete_table(a_brand_new_tab)),
+ ?match({aborted, Reason9} when element(1, Reason9) == badarg,
+ mnesia:create_table([])),
+
+ ?match({atomic, ok}, mnesia:create_table([{name, nested_del_trans},
+ {ram_copies, [Node1]}])),
+
+ DeleteFun = fun() -> ?match({aborted, nested_transaction},
+ mnesia:delete_table(nested_del_trans)), ok end,
+ ?match({atomic, ok}, mnesia:transaction(DeleteFun)),
+
+ ?match({aborted, Reason10} when element(1, Reason10) == bad_type,
+ mnesia:create_table([{name, create_with_index}, {index, 2}])),
+ ?match({aborted, Reason32} when element(1, Reason32) == bad_type,
+ mnesia:create_table([{name, create_with_index}, {index, [-1]}])),
+ ?match({aborted, Reason33} when element(1, Reason33) == bad_type,
+ mnesia:create_table([{name, create_with_index}, {index, [0]}])),
+ ?match({aborted, Reason34} when element(1, Reason34) == bad_type,
+ mnesia:create_table([{name, create_with_index}, {index, [1]}])),
+ ?match({aborted, Reason35} when element(1, Reason35) == bad_type,
+ mnesia:create_table([{name, create_with_index}, {index, [2]}])),
+ ?match({atomic, ok},
+ mnesia:create_table([{name, create_with_index}, {index, [3]},
+ {ram_copies, [Node1]}])),
+ ets:new(ets_table, [named_table]),
+
+ ?match({aborted, _}, mnesia:create_table(ets_table, [{ram_copies, Nodes}])),
+
+ ?verify_mnesia(Nodes, []).
+
+add_copy_conflict(suite) -> [];
+add_copy_conflict(doc) ->
+ ["Verify that OTP-5065 doesn't happen again, whitebox testing"];
+add_copy_conflict(Config) when is_list(Config) ->
+ Nodes = [Node1, Node2] =
+ ?acquire_nodes(2, Config ++ [{tc_timeout, timer:minutes(2)}]),
+
+ ?match({atomic, ok}, mnesia:create_table(a, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(b, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(test, [{ram_copies, [Node2]}])),
+ mnesia:stop(),
+ ?match(ok,mnesia:start([{no_table_loaders, 1}])),
+
+ verify_ll_queue(10),
+
+ Self = self(),
+ Test = fun() ->
+ Res = mnesia:add_table_copy(test, Node1, ram_copies),
+ Self ! {test, Res}
+ end,
+ %% Create conflict with loader queue.
+ spawn_link(Test),
+ ?match_receive(timeout),
+ %% Conflict ok
+ mnesia_controller:unblock_controller(),
+
+ ?match_receive({test, {atomic,ok}}),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(1, Config).
+
+verify_ll_queue(0) ->
+ ?error("Couldn't find anything in queue~n",[]);
+verify_ll_queue(N) ->
+ ?match(granted,mnesia_controller:block_controller()),
+ case mnesia_controller:get_info(1000) of
+ {info,{state,_,true,[],_Loader,[],[],[],_,_,_,_,_,_}} ->
+ %% Very slow SMP machines havn't loaded it yet..
+ mnesia_controller:unblock_controller(),
+ timer:sleep(10),
+ verify_ll_queue(N-1);
+ {info,{state,_,true,[],Loader,LL,[],[],_,_,_,_,_,_}} ->
+ io:format("~p~n", [{Loader,LL}]),
+ ?match([_], LL); %% Verify that something is in the loader queue
+ Else ->
+ ?error("No match ~p maybe the internal format has changed~n",[Else])
+ end.
+
+add_copy_when_going_down(suite) -> [];
+add_copy_when_going_down(doc) ->
+ ["Tests abort when node we load from goes down"];
+add_copy_when_going_down(Config) ->
+ [Node1, Node2] =
+ ?acquire_nodes(2, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ ?match({atomic, ok}, mnesia:create_table(a, [{ram_copies, [Node1]}])),
+ %% Grab a write lock
+ WriteAndWait = fun() ->
+ mnesia:write({a,1,1}),
+ receive continue -> ok
+ end
+ end,
+ _Lock = spawn(fun() -> mnesia:transaction(WriteAndWait) end),
+ Tester = self(),
+ spawn_link(fun() -> Res = rpc:call(Node2,mnesia, add_table_copy,
+ [a, Node2, ram_copies]),
+ Tester ! {test, Res}
+ end),
+ %% We have a lock here we should get a timeout
+ ?match_receive(timeout),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ ?match_receive({test,{aborted,_}}),
+ ?verify_mnesia([Node2], []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Add, drop and move replicas, change storage types
+%% Change table layout (only arity change supported)
+
+-record(replica_management, {k, v}).
+-record(new_replica_management, {k, v, extra}).
+
+-define(SS(R), lists:sort(element(1,R))).
+
+replica_management(doc) ->
+ "Add, drop and move replicas, change storage types.";
+replica_management(suite) ->
+ [];
+replica_management(Config) when is_list(Config) ->
+ %% add_table_copy/3, del_table_copy/2, move_table_copy/3,
+ %% change_table_copy_type/3, transform_table/3
+
+ Nodes = [Node1, Node2, Node3] = ?acquire_nodes(3, Config),
+
+ Tab = replica_management,
+ Attrs = record_info(fields, replica_management),
+
+ %%
+ %% Add, delete and change replicas
+ %%
+ ?match({atomic, ok},
+ mnesia:create_table([{name, Tab}, {attributes, Attrs},
+ {ram_copies, [Node1, Node3]}])),
+ [?match(ok, mnesia:dirty_write({Tab, K, K + 2})) || K <-lists:seq(1, 10)],
+ ?match([], ?vrl(Tab, [], [Node1, Node3], [], Nodes)),
+ %% R - -
+ ?match({atomic, ok}, mnesia:dump_tables([Tab])),
+ ?match({aborted, Reason50 } when element(1, Reason50) == combine_error,
+ mnesia:add_table_copy(Tab, Node2, disc_copies)),
+ ?match({aborted, Reason51 } when element(1, Reason51) == combine_error,
+ mnesia:change_table_copy_type(Tab, Node1, disc_copies)),
+ ?match({atomic, ok}, mnesia:clear_table(Tab)),
+ SyncedCheck = fun() ->
+ mnesia:lock({record,Tab,0}, write),
+ ?match([0,0,0], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size])))
+ end,
+ mnesia:transaction(SyncedCheck),
+
+ ?match({[0,0,0], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node1)),
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node3)),
+ ?match([], ?vrl(Tab, [], [], [], Nodes)),
+ %% - - -
+ ?match({aborted,Reason52} when element(1, Reason52) == no_exists,
+ mnesia:add_table_copy(Tab, Node3, ram_copies)),
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {attributes, Attrs},
+ {disc_copies, [Node1]}])),
+ ?match([], ?vrl(Tab, [], [], [Node1], Nodes)),
+ %% D - -
+ [?match(ok, mnesia:dirty_write({Tab, K, K + 2})) || K <-lists:seq(1, 10)],
+
+ ?match({aborted, Reason53} when element(1, Reason53) == badarg,
+ mnesia:add_table_copy(Tab, Node2, bad_storage_type)),
+ ?match({atomic, ok}, mnesia:add_table_copy(Tab, Node2, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node2], [], [Node1], Nodes)),
+ ?match([0,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% D DO -
+ ?match({atomic, ok}, mnesia:add_table_copy(Tab, Node3, ram_copies)),
+ ?match([], ?vrl(Tab, [Node2], [Node3], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% D DO R
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(Tab, Node1, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node1, Node2], [Node3], [], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% DO DO R
+ ?match({aborted, Reason54} when element(1, Reason54) == already_exists,
+ mnesia:add_table_copy(Tab, Node3, ram_copies)),
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node1)),
+ ?match([], ?vrl(Tab, [Node2], [Node3], [], Nodes)),
+ %% - DO R
+ ?match({aborted, _}, mnesia:del_table_copy(Tab, Node1)),
+ ?match(Tab, ets:new(Tab, [named_table])),
+ ?match({aborted, _}, mnesia:add_table_copy(Tab, Node1, disc_copies)),
+ ?match(true, ets:delete(Tab)),
+ ?match({atomic, ok}, mnesia:add_table_copy(Tab, Node1, disc_copies)),
+ ?match([], ?vrl(Tab, [Node2], [Node3], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% D DO R
+ ?match({atomic, ok},mnesia:change_table_copy_type(Tab, Node3, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node2, Node3], [], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+
+ %% D DO D0
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node3, ram_copies)),
+ ?match([], ?vrl(Tab, [Node2], [Node3], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% D DO R
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(Tab, Node2, disc_copies)),
+ ?match([], ?vrl(Tab, [], [Node3], [Node1,Node2], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+
+ %% D D R
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node1, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node1], [Node3], [Node2], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+
+ %% DO D R
+ ?match(Tab, ets:new(Tab, [named_table])),
+ ?match({aborted, _}, mnesia:change_table_copy_type(Tab, Node1, ram_copies)),
+ ?match(true, ets:delete(Tab)),
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node1, ram_copies)),
+ ?match([], ?vrl(Tab, [], [Node3,Node1], [Node2], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% R D R
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node1, disc_copies)),
+ ?match([], ?vrl(Tab, [], [Node3], [Node2,Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+
+ %% D D R
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node2, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node2], [Node3], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+
+ %% D DO R
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, Node3, disc_only_copies)),
+ ?match([], ?vrl(Tab, [Node2, Node3], [], [Node1], Nodes)),
+ ?match([10,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% D DO DO
+ %% test clear
+ ?match({atomic, ok}, mnesia:clear_table(Tab)),
+ mnesia:transaction(SyncedCheck),
+
+ %% rewrite for rest of testcase
+ [?match(ok, mnesia:dirty_write({Tab, K, K + 2})) || K <-lists:seq(1, 10)],
+
+ %% D DO DO
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node2)),
+ ?match([], ?vrl(Tab, [Node3], [], [Node1], Nodes)),
+ %% D - DO
+ ?match({aborted, Reason55} when element(1, Reason55) == already_exists,
+ mnesia:change_table_copy_type(Tab, Node1, disc_copies)),
+
+ %%
+ %% Move replica
+ %%
+ ?match({atomic, ok}, mnesia:move_table_copy(Tab, Node1, Node2)),
+ ?match([], ?vrl(Tab, [Node3], [], [Node2], Nodes)),
+ ?match([0,10,10], ?SS(rpc:multicall(Nodes, mnesia, table_info, [Tab, size]))),
+ %% - D DO
+ ?match({aborted, _}, mnesia:move_table_copy(Tab, Node1, Node2)),
+ ?match([], mnesia_test_lib:stop_mnesia([Node3])),
+ ?match({atomic,ok}, mnesia:transaction(fun() -> mnesia:write({Tab, 43, sync_me}) end)),
+ ?match([], ?vrl(Tab, [Node3], [], [Node2],Nodes -- [Node3])),
+ %% - D DO
+ ?match({aborted,Reason56} when element(1, Reason56) == not_active,
+ mnesia:move_table_copy(Tab, Node3, Node1)),
+ ?match([], ?vrl(Tab, [Node3], [], [Node2],Nodes -- [Node3])),
+ %% DO D -
+ ?match([], mnesia_test_lib:start_mnesia([Node3])),
+ ?match([], ?vrl(Tab, [Node3], [], [Node2], Nodes)),
+ %% DO D -
+
+ %%
+ %% Transformer
+ %%
+
+ NewAttrs = record_info(fields, new_replica_management),
+ Transformer =
+ fun(Rec) when is_record(Rec, replica_management) ->
+ #new_replica_management{k = Rec#replica_management.k,
+ v = Rec#replica_management.v,
+ extra = Rec#replica_management.k * 2}
+ end,
+ ?match({atomic, ok}, mnesia:transform_table(Tab, fun(R) -> R end, Attrs)),
+ ?match({atomic, ok}, mnesia:transform_table(Tab, Transformer, NewAttrs, new_replica_management)),
+
+ ERlist = [#new_replica_management{k = K, v = K+2, extra = K*2} || K <- lists:seq(1, 10)],
+ ARlist = [hd(mnesia:dirty_read(Tab, K)) || K <- lists:seq(1, 10)],
+
+ ?match(ERlist, ARlist),
+
+ ?match({aborted, Reason56} when element(1, Reason56) == bad_type,
+ mnesia:transform_table(Tab, Transformer, 0)),
+ ?match({aborted, Reason57} when element(1, Reason57) == bad_type,
+ mnesia:transform_table(Tab, Transformer, -1)),
+ ?match({aborted, Reason58} when element(1, Reason58) == bad_type,
+ mnesia:transform_table(Tab, Transformer, [])),
+ ?match({aborted, Reason59} when element(1, Reason59) == bad_type,
+ mnesia:transform_table(Tab, no_fun, NewAttrs)),
+ ?match({aborted, Reason59} when element(1, Reason59) == bad_type,
+ mnesia:transform_table(Tab, fun(X) -> X end, NewAttrs, {tuple})),
+
+ %% OTP-3878
+ ?match({atomic, ok}, mnesia:transform_table(Tab, ignore,
+ NewAttrs ++ [dummy])),
+ ?match({atomic, ok}, mnesia:transform_table(Tab, ignore,
+ NewAttrs ++ [dummy], last_rec)),
+
+ ARlist = [hd(mnesia:dirty_read(Tab, K)) || K <- lists:seq(1, 10)],
+ ?match({'EXIT',{aborted,{bad_type,_}}},
+ mnesia:dirty_write(Tab, #new_replica_management{})),
+ ?match(ok, mnesia:dirty_write(Tab, {last_rec, k, v, e, dummy})),
+
+ ?verify_mnesia(Nodes, []).
+
+schema_availability(doc) ->
+ ["Test that schema succeeds (or fails) as intended when some db nodes are down."];
+schema_availability(suite) ->
+ [];
+schema_availability(Config) when is_list(Config) ->
+ [N1, _N2, N3] = Nodes = ?acquire_nodes(3, Config),
+ Tab = schema_availability,
+ Storage = mnesia_test_lib:storage_type(ram_copies, Config),
+ Def1 = [{Storage, [N1, N3]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def1)),
+
+ N = 10,
+ ?match(ok, mnesia:sync_dirty(fun() -> [mnesia:write({Tab, K, K + 2}) || K <- lists:seq(1, N)], ok end)),
+ ?match({[N,0,N], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+ ?match({[N,0,0], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+
+ ?match([], mnesia_test_lib:start_mnesia([N3], [Tab])),
+ ?match({[N,0,N], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+
+ ?match({atomic, ok}, mnesia:clear_table(Tab)),
+ ?match({[0,0,0], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+
+ ?match([], mnesia_test_lib:start_mnesia([N3], [Tab])),
+ ?match({[0,0,0], []}, rpc:multicall(Nodes, mnesia, table_info, [Tab, size])),
+
+ ?verify_mnesia(Nodes, []).
+
+-define(badrpc(Tab), {badrpc, {'EXIT', {aborted,{no_exists,Tab}}}}).
+
+local_content(doc) ->
+ ["Test local_content functionality, we want to see that correct"
+ " properties gets propageted correctly between nodes"];
+local_content(suite) -> [];
+local_content(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab1 = local1,
+ Def1 = [{local_content, true}, {ram_copies, Nodes}],
+ Tab2 = local2,
+ Def2 = [{local_content, true}, {disc_copies, [Node1]}],
+ Tab3 = local3,
+ Def3 = [{local_content, true}, {disc_only_copies, [Node1]}],
+ Tab4 = local4,
+ Def4 = [{local_content, true}, {ram_copies, [Node1]}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+ ?match({atomic, ok}, mnesia:create_table(Tab4, Def4)),
+
+ ?match(ok, rpc:call(Node1, mnesia, dirty_write, [{Tab1, 1, Node1}])),
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab1, 1, Node2}])),
+ ?match(ok, rpc:call(Node3, mnesia, dirty_write, [{Tab1, 1, Node3}])),
+ ?match(ok, rpc:call(Node1, mnesia, dirty_write, [{Tab2, 1, Node1}])),
+ ?match(ok, rpc:call(Node1, mnesia, dirty_write, [{Tab3, 1, Node1}])),
+ ?match(ok, rpc:call(Node1, mnesia, dirty_write, [{Tab4, 1, Node1}])),
+
+ ?match(?badrpc(Tab2), rpc:call(Node2, mnesia, dirty_write, [{Tab2, 1, Node2}])),
+ ?match(?badrpc(Tab3), rpc:call(Node2, mnesia, dirty_write, [{Tab3, 1, Node2}])),
+ ?match(?badrpc(Tab4), rpc:call(Node2, mnesia, dirty_write, [{Tab4, 1, Node2}])),
+
+ ?match({atomic, ok}, rpc:call(Node1, mnesia, add_table_copy, [Tab2, Node2, ram_copies])),
+ ?match({atomic, ok}, rpc:call(Node2, mnesia, add_table_copy, [Tab3, Node2, disc_copies])),
+ ?match({atomic, ok}, rpc:call(Node3, mnesia, add_table_copy, [Tab4, Node2, disc_only_copies])),
+ ?match([], rpc:call(Node2, mnesia, dirty_read, [{Tab2, 1}])),
+ ?match([], rpc:call(Node2, mnesia, dirty_read, [{Tab3, 1}])),
+ ?match([], rpc:call(Node2, mnesia, dirty_read, [{Tab4, 1}])),
+
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab2, 1, Node2}])),
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab3, 1, Node2}])),
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab4, 1, Node2}])),
+
+ ?match([{Tab1, 1, Node1}], rpc:call(Node1, mnesia, dirty_read, [{Tab1, 1}])),
+ ?match([{Tab2, 1, Node1}], rpc:call(Node1, mnesia, dirty_read, [{Tab2, 1}])),
+ ?match([{Tab3, 1, Node1}], rpc:call(Node1, mnesia, dirty_read, [{Tab3, 1}])),
+ ?match([{Tab4, 1, Node1}], rpc:call(Node1, mnesia, dirty_read, [{Tab4, 1}])),
+
+ ?match([{Tab1, 1, Node2}], rpc:call(Node2, mnesia, dirty_read, [{Tab1, 1}])),
+ ?match([{Tab2, 1, Node2}], rpc:call(Node2, mnesia, dirty_read, [{Tab2, 1}])),
+ ?match([{Tab3, 1, Node2}], rpc:call(Node2, mnesia, dirty_read, [{Tab3, 1}])),
+ ?match([{Tab4, 1, Node2}], rpc:call(Node2, mnesia, dirty_read, [{Tab4, 1}])),
+
+ ?match([{Tab1, 1, Node3}], rpc:call(Node3, mnesia, dirty_read, [{Tab1, 1}])),
+ ?match(?badrpc([_Tab2, 1]), rpc:call(Node3, mnesia, dirty_read, [{Tab2, 1}])),
+ ?match(?badrpc([_Tab3, 1]), rpc:call(Node3, mnesia, dirty_read, [{Tab3, 1}])),
+ ?match(?badrpc([_Tab4, 1]), rpc:call(Node3, mnesia, dirty_read, [{Tab4, 1}])),
+
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(schema, Node3, ram_copies)),
+ ?match([], mnesia_test_lib:stop_mnesia([Node3])),
+
+ %% Added for OTP-44306
+ ?match(ok, rpc:call(Node3, mnesia, start, [])),
+ ?match({ok, _}, mnesia:change_config(extra_db_nodes, [Node3])),
+
+ mnesia_test_lib:sync_tables([Node3], [Tab1]),
+
+ ?match([], rpc:call(Node3, mnesia, dirty_read, [{Tab1, 1}])),
+
+ ?match({atomic, ok}, rpc:call(Node1, mnesia, clear_table, [Tab1])),
+
+ SyncedCheck = fun(Tab) ->
+ mnesia:lock({record,Tab,0}, write),
+ {OK, []} = rpc:multicall(Nodes, mnesia, table_info, [Tab, size]),
+ OK
+ end,
+ ?match({atomic, [0,1,0]}, mnesia:transaction(SyncedCheck, [Tab1])),
+ ?match({atomic, ok}, rpc:call(Node2, mnesia, clear_table, [Tab2])),
+ ?match({atomic, [1,0,0]}, mnesia:transaction(SyncedCheck, [Tab2])),
+ ?match({atomic, ok}, rpc:call(Node2, mnesia, clear_table, [Tab3])),
+ ?match({atomic, [1,0,0]}, mnesia:transaction(SyncedCheck, [Tab3])),
+
+ ?verify_mnesia(Nodes, []).
+
+table_access_modifications(suite) ->
+ [
+ change_table_access_mode,
+ change_table_load_order,
+ set_master_nodes,
+ offline_set_master_nodes
+ ].
+
+change_table_access_mode(suite) -> [];
+change_table_access_mode(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab = test_access_mode_tab,
+
+ Def = case mnesia_test_lib:diskless(Config) of
+ true -> [{name, Tab}, {ram_copies, Nodes}];
+ false -> [{name, Tab}, {ram_copies, [Node1]},
+ {disc_copies, [Node2]},
+ {disc_only_copies, [Node3]}]
+ end,
+ ?match({atomic, ok}, mnesia:create_table(Def)),
+
+ Write = fun(What) -> mnesia:write({Tab, 1, What}) end,
+ Read = fun() -> mnesia:read({Tab, 1}) end,
+
+ ?match({atomic, ok}, mnesia:transaction(Write, [test_ok])),
+ %% test read_only
+ ?match({atomic, ok}, mnesia:change_table_access_mode(Tab, read_only)),
+ ?match({aborted, _}, mnesia:transaction(Write, [nok])),
+ ?match({'EXIT', {aborted, _}}, mnesia:dirty_write({Tab, 1, [nok]})),
+ ?match({aborted, _}, rpc:call(Node2, mnesia, transaction, [Write, [nok]])),
+ ?match({aborted, _}, rpc:call(Node3, mnesia, transaction, [Write, [nok]])),
+ ?match({atomic, [{Tab, 1, test_ok}]}, mnesia:transaction(Read)),
+ %% test read_write
+ ?match({atomic, ok}, mnesia:change_table_access_mode(Tab, read_write)),
+ ?match({atomic, ok}, mnesia:transaction(Write, [test_ok1])),
+ ?match({atomic, [{Tab, 1, test_ok1}]}, mnesia:transaction(Read)),
+ ?match({atomic, ok}, rpc:call(Node2, mnesia, transaction, [Write, [test_ok2]])),
+ ?match({atomic, [{Tab, 1, test_ok2}]}, mnesia:transaction(Read)),
+ ?match({atomic, ok}, rpc:call(Node3, mnesia, transaction, [Write, [test_ok3]])),
+ ?match({atomic, [{Tab, 1, test_ok3}]}, mnesia:transaction(Read)),
+
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+
+ Def4 = [{name, Tab}, {access_mode, read_only_bad}],
+ ?match({aborted, {bad_type, _, _}}, mnesia:create_table(Def4)),
+
+ Def2 = [{name, Tab}, {access_mode, read_only}],
+ ?match({atomic, ok}, mnesia:create_table(Def2)),
+ ?match({aborted, _}, mnesia:transaction(Write, [nok])),
+
+ ?match({atomic, ok}, mnesia:change_table_access_mode(Tab, read_write)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+
+ Def3 = [{name, Tab}, {mnesia_test_lib:storage_type(disc_copies, Config),
+ [Node1, Node2]},
+ {access_mode, read_write}],
+ ?match({atomic, ok}, mnesia:create_table(Def3)),
+ ?match({atomic, ok}, mnesia:transaction(Write, [ok])),
+ ?match({atomic, ok}, mnesia:change_table_access_mode(Tab, read_only)),
+ ?match({aborted, _}, mnesia:del_table_copy(Tab, Node2)),
+ ?match({aborted, _}, mnesia:del_table_copy(Tab, Node1)),
+ ?match({aborted, _}, mnesia:delete_table(Tab)),
+ ?match({atomic, ok}, mnesia:change_table_access_mode(Tab, read_write)),
+
+ ?match({aborted, {bad_type, _, _}},
+ mnesia:change_table_access_mode(Tab, strange_atom)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+
+ ?match({aborted, {no_exists, _}},
+ mnesia:change_table_access_mode(err_tab, read_only)),
+ ?match({aborted, {no_exists, _}},
+ mnesia:change_table_access_mode([Tab], read_only)),
+ ?verify_mnesia(Nodes, []).
+
+change_table_load_order(suite) -> [];
+change_table_load_order(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab1 = load_order_tab1,
+ Tab2 = load_order_tab2,
+ Tab3 = load_order_tab3,
+
+ Def = case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{ram_copies, [Node1]},
+ {disc_copies, [Node2]},
+ {disc_only_copies, [Node3]}]
+ end,
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def)),
+
+ ?match({aborted, _}, mnesia:change_table_load_order(Tab1, should_be_integer)),
+ ?match({aborted, _}, mnesia:change_table_load_order(err_tab, 5)),
+ ?match({aborted, _}, mnesia:change_table_load_order([err_tab], 5)),
+ ?match({atomic, ok}, mnesia:change_table_load_order(Tab1, 5)),
+ ?match({atomic, ok}, mnesia:change_table_load_order(Tab2, 4)),
+ ?match({atomic, ok}, mnesia:change_table_load_order(Tab3, 73)),
+
+ ?match({aborted, _}, mnesia:change_table_load_order(schema, -32)),
+
+ ?verify_mnesia(Nodes, []).
+
+set_master_nodes(suite) -> [];
+set_master_nodes(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab1 = master_node_tab1,
+ Tab2 = master_node_tab2,
+ Tab3 = master_node_tab3,
+ Def1 = [{ram_copies, [Node1, Node2]}],
+ Def2 = [{disc_copies, [Node2, Node3]}],
+ Def3 = [{disc_only_copies, [Node3, Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+
+ ?match({error, _}, mnesia:set_master_nodes(schema, ['[email protected]'])),
+ ?match({error, _}, mnesia:set_master_nodes(badtab, [Node2, Node3])),
+ ?match({error, _}, mnesia:set_master_nodes(Tab1, [Node3])),
+ ?match([], mnesia:table_info(Tab1, master_nodes)),
+
+ ?match(ok, mnesia:set_master_nodes(schema, [Node3, Node1])),
+ ?match([Node3, Node1], mnesia:table_info(schema, master_nodes)),
+ ?match(ok, mnesia:set_master_nodes(Tab1, [Node2])),
+ ?match([Node2], mnesia:table_info(Tab1, master_nodes)),
+ ?match(ok, mnesia:set_master_nodes(Tab1, [Node2, Node1])),
+ ?match([Node2, Node1], mnesia:table_info(Tab1, master_nodes)),
+ ?match(ok, mnesia:set_master_nodes(Tab2, [Node2])), % Should set where_to_read to Node2!
+ ?match([Node2], mnesia:table_info(Tab2, master_nodes)),
+ ?match(ok, mnesia:set_master_nodes(Tab3, [Node3])),
+ ?match([Node3], mnesia:table_info(Tab3, master_nodes)),
+ ?match(ok, mnesia:set_master_nodes(Tab3, [])),
+ ?match([], mnesia:table_info(Tab3, master_nodes)),
+
+ ?match(ok, mnesia:set_master_nodes([Node1])),
+ ?match([Node1], mnesia:table_info(schema, master_nodes)),
+ ?match([Node1], mnesia:table_info(Tab1, master_nodes)),
+ ?match([], mnesia:table_info(Tab2, master_nodes)),
+ ?match([Node1], mnesia:table_info(Tab3, master_nodes)),
+
+ ?match(ok, mnesia:set_master_nodes([Node1, Node2])),
+ ?match([Node1, Node2], mnesia:table_info(schema, master_nodes)),
+ ?match([Node1, Node2], mnesia:table_info(Tab1, master_nodes)),
+ ?match([Node2], mnesia:table_info(Tab2, master_nodes)),
+ ?match([Node1], mnesia:table_info(Tab3, master_nodes)),
+
+ ?verify_mnesia(Nodes, []).
+
+offline_set_master_nodes(suite) -> [];
+offline_set_master_nodes(Config) when is_list(Config) ->
+ [Node] = Nodes = ?acquire_nodes(1, Config),
+ Tab1 = offline_master_node_tab1,
+ Tab2 = offline_master_node_tab2,
+ Tab3 = offline_master_node_tab3,
+ Tabs = ?sort([Tab1, Tab2, Tab3]),
+ Def1 = [{ram_copies, [Node]}],
+ Def2 = [{disc_copies, [Node]}],
+ Def3 = [{disc_only_copies, [Node]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+ ?match([], mnesia:system_info(master_node_tables)),
+ ?match([], mnesia_test_lib:stop_mnesia([Node])),
+
+ ?match(ok, mnesia:set_master_nodes(Tab1, [Node])),
+ ?match(ok, mnesia:set_master_nodes(Tab2, [Node])),
+ ?match(ok, mnesia:set_master_nodes(Tab3, [Node])),
+ ?match([], mnesia_test_lib:start_mnesia([Node])),
+ ?match(Tabs, ?sort(mnesia:system_info(master_node_tables))),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node])),
+ ?match(ok, mnesia:set_master_nodes(Tab1, [])),
+ ?match(ok, mnesia:set_master_nodes(Tab2, [])),
+ ?match(ok, mnesia:set_master_nodes(Tab3, [])),
+ ?match([], mnesia_test_lib:start_mnesia([Node])),
+ ?match([], mnesia:system_info(master_node_tables)),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node])),
+ ?match(ok, mnesia:set_master_nodes([Node])),
+ ?match([], mnesia_test_lib:start_mnesia([Node])),
+ AllTabs = ?sort([schema | Tabs]),
+ ?match(AllTabs, ?sort(mnesia:system_info(master_node_tables))),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node])),
+ ?match(ok, mnesia:set_master_nodes([])),
+ ?match([], mnesia_test_lib:start_mnesia([Node])),
+ ?match([], mnesia:system_info(master_node_tables)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Syncronize table with log or disc
+%%
+table_sync(suite) ->
+ [
+ dump_tables,
+ dump_log,
+ wait_for_tables,
+ force_load_table
+ ].
+
+%% Dump ram tables on disc
+dump_tables(suite) -> [];
+dump_tables(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = dump_tables,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ %% Dump 10 records
+ Size = 10,
+ Keys = lists:seq(1, Size),
+ Records = [{Tab, A, 7} || A <- Keys],
+ lists:foreach(fun(Rec) -> ?match(ok, mnesia:dirty_write(Rec)) end, Records),
+
+ AllKeys = fun() -> ?sort(mnesia:all_keys(Tab)) end,
+
+ ?match({atomic, Keys}, mnesia:transaction(AllKeys)),
+ ?match({atomic, ok}, mnesia:dump_tables([Tab])),
+
+ %% Delete one record
+ ?match(ok, mnesia:dirty_delete({Tab, 5})),
+ Keys2 = lists:delete(5, Keys),
+
+ ?match({atomic, Keys2}, mnesia:transaction(AllKeys)),
+
+ %% Check that all 10 is restored after a stop
+ ?match([], mnesia_test_lib:stop_mnesia([Node1, Node2])),
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2])),
+ ?match(ok, mnesia:wait_for_tables([Tab], infinity)),
+
+ ?match({atomic, Keys}, mnesia:transaction(AllKeys)),
+
+ ?match({aborted,Reason} when element(1, Reason) == no_exists,
+ mnesia:dump_tables([foo])),
+ ?verify_mnesia(Nodes, []).
+
+dump_log(suite) -> [];
+dump_log(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = dump_log,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ Tab1 = dump_log1,
+ Schema1 = [{name, Tab1}, {attributes, [k, v]}, {disc_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema1)),
+ Tab2 = dump_log2,
+ Schema2 = [{name, Tab2}, {attributes, [k, v]}, {disc_only_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema2)),
+
+ ?match(ok, mnesia:dirty_write({Tab, 1, ok})),
+ ?match(ok, mnesia:dirty_write({Tab1, 1, ok})),
+ ?match(ok, mnesia:dirty_write({Tab2, 1, ok})),
+
+ ?match(dumped, mnesia:dump_log()),
+ ?match(dumped, rpc:call(Node2, mnesia, dump_log, [])),
+
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, Node2, ram_copies)),
+ ?match(dumped, rpc:call(Node2, mnesia, dump_log, [])),
+
+ Self = self(),
+ spawn(fun() -> dump_log(100, Self) end),
+ spawn(fun() -> dump_log(100, Self) end),
+
+ ?match(ok, receive finished -> ok after 3000 -> timeout end),
+ ?match(ok, receive finished -> ok after 3000 -> timeout end),
+
+ ?verify_mnesia(Nodes, []).
+
+dump_log(N, Tester) when N > 0 ->
+ mnesia:dump_log(),
+ dump_log(N-1, Tester);
+dump_log(_, Tester) ->
+ Tester ! finished.
+
+
+wait_for_tables(doc) ->
+ ["Intf. test of wait_for_tables, see also force_load_table"];
+wait_for_tables(suite) -> [];
+wait_for_tables(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = wf_tab,
+ Schema = [{name, Tab}, {ram_copies, [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ?match(ok, mnesia:wait_for_tables([wf_tab], infinity)),
+ ?match(ok, mnesia:wait_for_tables([], timer:seconds(5))),
+ ?match({timeout, [bad_tab]}, mnesia:wait_for_tables([bad_tab], timer:seconds(5))),
+ ?match(ok, mnesia:wait_for_tables([wf_tab], 0)),
+ ?match({error,_}, mnesia:wait_for_tables([wf_tab], -1)),
+ ?verify_mnesia(Nodes, []).
+
+force_load_table(suite) -> [];
+force_load_table(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+ Tab = wf_tab,
+
+ Schema = [{name, Tab}, {disc_copies, [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, test_ok})),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ ?match(ok, rpc:call(Node2, mnesia, dirty_write, [{Tab, 1, test_nok}])),
+ mnesia_test_lib:kill_mnesia([Node2]),
+ %% timer:sleep(timer:seconds(5)),
+ ?match(ok, mnesia:start()),
+ ?match({timeout, [Tab]}, mnesia:wait_for_tables([Tab], 5)),
+ ?match({'EXIT', _}, mnesia:dirty_read({Tab, 1})),
+ ?match(yes, mnesia:force_load_table(Tab)),
+ ?match([{Tab, 1, test_ok}], mnesia:dirty_read({Tab, 1})),
+
+ ?match({error, _}, mnesia:force_load_table(error_tab)),
+ ?verify_mnesia([Node1], [Node2]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+user_properties(doc) ->
+ ["Test of reading, writing and deletion of user properties",
+ "Plus initialization of user properties when a table is created",
+ "Do also test mnesia:table_info(Tab, user_properties)"];
+user_properties(suite) -> [];
+user_properties(Config) when is_list(Config) ->
+ [Node] = Nodes = ?acquire_nodes(1, Config),
+ Tab1 = user_properties_1,
+ Tab2 = user_properties_2,
+ Tab3 = user_properties_3,
+ Def1 = [{ram_copies, [Node]}, {user_properties, []}],
+ Def2 = [{mnesia_test_lib:storage_type(disc_copies, Config), [Node]}],
+ Def3 = [{mnesia_test_lib:storage_type(disc_only_copies, Config), [Node]},
+ {user_properties, []}],
+
+ PropKey = my_prop,
+ Prop = {PropKey, some, elements},
+ Prop2 = {PropKey, some, other, elements},
+ YourProp= {your_prop},
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+
+ ?match([], mnesia:table_info(Tab1, user_properties)),
+ ?match([], mnesia:table_info(Tab2, user_properties)),
+ ?match([], mnesia:table_info(Tab3, user_properties)),
+
+ ?match({'EXIT', {no_exists, {Tab1, user_property, PropKey}}},
+ mnesia:read_table_property(Tab1, PropKey)),
+ ?match({'EXIT', {no_exists, {Tab2, user_property, PropKey}}},
+ mnesia:read_table_property(Tab2, PropKey)),
+ ?match({'EXIT', {no_exists, {Tab3, user_property, PropKey}}},
+ mnesia:read_table_property(Tab3, PropKey)),
+
+ ?match({atomic, ok}, mnesia:write_table_property(Tab1, Prop)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab2, Prop)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab3, Prop)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab1, YourProp)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab2, YourProp)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab3, YourProp)),
+
+ ?match(Prop, mnesia:read_table_property(Tab1, PropKey)),
+ ?match(Prop, mnesia:read_table_property(Tab2, PropKey)),
+ ?match(Prop, mnesia:read_table_property(Tab3, PropKey)),
+
+ ?match({atomic, ok}, mnesia:write_table_property(Tab1, Prop2)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab2, Prop2)),
+ ?match({atomic, ok}, mnesia:write_table_property(Tab3, Prop2)),
+ ?match(Prop2, mnesia:read_table_property(Tab1, PropKey)),
+ ?match(Prop2, mnesia:read_table_property(Tab2, PropKey)),
+ ?match(Prop2, mnesia:read_table_property(Tab3, PropKey)),
+
+ ?match({atomic, ok}, mnesia:delete_table_property(Tab1, PropKey)),
+ ?match({atomic, ok}, mnesia:delete_table_property(Tab2, PropKey)),
+ ?match({atomic, ok}, mnesia:delete_table_property(Tab3, PropKey)),
+
+ ?match([YourProp], mnesia:table_info(Tab1, user_properties)),
+ ?match([YourProp], mnesia:table_info(Tab2, user_properties)),
+ ?match([YourProp], mnesia:table_info(Tab3, user_properties)),
+
+ Tab4 = user_properties_4,
+ ?match({atomic, ok},
+ mnesia:create_table(Tab4, [{user_properties, [Prop]}])),
+
+ ?match([Prop], mnesia:table_info(Tab4, user_properties)),
+
+ %% Some error cases
+
+ ?match({aborted, {bad_type, Tab1, {}}},
+ mnesia:write_table_property(Tab1, {})),
+ ?match({aborted, {bad_type, Tab1, ali}},
+ mnesia:write_table_property(Tab1, ali)),
+
+ Tab5 = user_properties_5,
+ ?match({aborted, {bad_type, Tab5, {user_properties, {}}}},
+ mnesia:create_table(Tab5, [{user_properties, {}}])),
+ ?match({aborted, {bad_type, Tab5, {user_properties, ali}}},
+ mnesia:create_table(Tab5, [{user_properties, ali}])),
+ ?match({aborted, {bad_type, Tab5, {user_properties, [{}]}}},
+ mnesia:create_table(Tab5, [{user_properties, [{}]}])),
+ ?match({aborted, {bad_type, Tab5, {user_properties, [ali]}}},
+ mnesia:create_table(Tab5, [{user_properties, [ali]}])),
+
+ ?verify_mnesia(Nodes, []).
+
+
+unsupp_user_props(doc) ->
+ ["Simple test of adding user props in a schema_transaction"];
+unsupp_user_props(suite) -> [];
+unsupp_user_props(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab1 = silly1,
+ Tab2 = silly2,
+ Storage = mnesia_test_lib:storage_type(ram_copies, Config),
+
+ ?match({atomic, ok}, rpc:call(Node1, mnesia,
+ create_table, [Tab1, [{Storage, [Node1]}]])),
+ ?match({atomic, ok}, rpc:call(Node1, mnesia,
+ create_table, [Tab2, [{Storage, [Node1]}]])),
+
+ F1 = fun() ->
+ mnesia_schema:do_write_table_property(
+ silly1, {prop, propval1}),
+ mnesia_schema:do_write_table_property(
+ silly2, {prop, propval2}), % same key as above
+ mnesia_schema:do_write_table_property(
+ schema, {prop, propval3}) % same key as above
+ end,
+ ?match({atomic, ok}, rpc:call(Node1, mnesia_schema,
+ schema_transaction, [F1])),
+
+ ?match([{prop,propval1}], rpc:call(Node1, mnesia,
+ table_info, [silly1, user_properties])),
+ ?match([{prop,propval2}], rpc:call(Node1, mnesia,
+ table_info, [silly2, user_properties])),
+ ?match([{prop,propval3}], rpc:call(Node1, mnesia,
+ table_info, [schema, user_properties])),
+
+ F2 = fun() ->
+ mnesia_schema:do_write_table_property(
+ silly1, {prop, propval1a}),
+ mnesia_schema:do_write_table_property(
+ silly1, {prop, propval1b}) % same key as above
+ end,
+ ?match({atomic, ok}, rpc:call(Node1, mnesia_schema,
+ schema_transaction, [F2])),
+
+ ?match([{prop,propval1b}], rpc:call(Node1, mnesia,
+ table_info,
+ [silly1, user_properties])),
+ ?verify_mnesia([Node1], []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+snmp_access(doc) ->
+ ["Make Mnesia table accessible via SNMP"];
+
+snmp_access(suite) ->
+ [
+ snmp_open_table,
+ snmp_close_table,
+ snmp_get_next_index,
+ snmp_get_row,
+ snmp_get_mnesia_key,
+ snmp_update_counter,
+ snmp_order
+ ].
+
+snmp_open_table(suite) -> [];
+snmp_open_table(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = local_snmp_table,
+
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ Def1 =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{disc_copies, [Node1]}, {ram_copies, [Node2]}]
+ end,
+
+ Tab2 = ext_snmp_table,
+ Def2 = [{Storage, [Node2]}],
+ ErrTab = non_existing_tab,
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab2, [{key, integer}])),
+ ?match({aborted, _}, mnesia:snmp_open_table(ErrTab, [{key, integer}])),
+ ?verify_mnesia(Nodes, []).
+
+snmp_close_table(suite) -> [];
+snmp_close_table(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = local_snmp_table,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ Def1 =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{disc_copies, [Node1]}, {ram_copies, [Node2]}]
+ end,
+
+ Tab2 = ext_snmp_table,
+ Def2 = [{snmp, [{key, integer}]}, {Storage, [Node2]}],
+ ErrTab = non_existing_tab,
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(no_snmp_tab, [])),
+ add_some_records(Tab1, Tab2, 3),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key, integer}])),
+ add_some_records(Tab1, Tab2, 5),
+ ?match({atomic, ok}, mnesia:snmp_close_table(Tab1)),
+
+ Transform = fun(Tab, Key) ->
+ [{T,K,V}] = mnesia:read(Tab, Key, write),
+ mnesia:delete(T,K, write),
+ mnesia:write({T, {K,K}, V, 43+V})
+ end,
+
+ ?match({atomic, ok}, mnesia:transform_table(Tab1, ignore, [key,val,new])),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() ->
+ mnesia:write_lock_table(Tab1),
+ Keys = mnesia:select(Tab1, [{{'_','$1','_'}, [],
+ ['$1']}]),
+ [Transform(Tab1, Key) || Key <- Keys],
+ ok
+ end)),
+ ?match([{Tab1, {1, 1}, 1, 44}], mnesia:dirty_read(Tab1, {1, 1})),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key,{integer,integer}}])),
+
+ ?match({atomic, ok}, mnesia:snmp_close_table(Tab2)),
+ ?match({atomic, ok}, mnesia:transform_table(Tab2, ignore, [key,val,new])),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() ->
+ mnesia:write_lock_table(Tab2),
+ Keys = mnesia:select(Tab2, [{{'_','$1','_'}, [],
+ ['$1']}]),
+ [Transform(Tab2, Key) || Key <- Keys],
+ ok
+ end)),
+
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab2, [{key,{integer,integer}}])),
+
+ %% Should be aborted ????
+ ?match({atomic, ok}, mnesia:snmp_close_table(no_snmp_tab)),
+ ?match({aborted, _}, mnesia:snmp_close_table(ErrTab)),
+ ?verify_mnesia(Nodes, []).
+
+snmp_get_next_index(suite) -> [];
+snmp_get_next_index(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = local_snmp_table,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ Def1 =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{disc_copies, [Node1]}, {ram_copies, [Node2]}]
+ end,
+
+ Tab2 = ext_snmp_table,
+ Def2 = [{Storage, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab2, [{key, integer}])),
+ add_some_records(Tab1, Tab2, 1),
+ Test =
+ fun() ->
+ %% Test local tables
+ {success, Res11} = ?match({ok, _}, mnesia:snmp_get_next_index(Tab1,[])),
+ {ok, Index11} = Res11,
+ {success, _Res12} =
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab1, Index11)),
+ ?match({'EXIT',_}, mnesia:snmp_get_next_index(Tab1, endOfTable)),
+
+ %% Test external table
+ {success, Res21} =
+ ?match({ok, _}, mnesia:snmp_get_next_index(Tab2, [])),
+ {ok, Index21} = Res21,
+ {success, _Res22} =
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab2, Index21)),
+
+ {ok, Row} = mnesia:snmp_get_row(Tab1, Index11),
+ ?match(ok, mnesia:dirty_delete(Tab1, hd(Index11))),
+
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab1,[])),
+
+ ok = mnesia:dirty_write(Row), %% Reset to coming tests
+
+ %% Test of non existing table
+ %%?match(endOfTable, mnesia:snmp_get_next_index(ErrTab, [])),
+ ok
+ end,
+ ?match(ok, Test()),
+ ?match({atomic,ok}, mnesia:transaction(Test)),
+ ?match(ok, mnesia:sync_dirty(Test)),
+ ?match(ok, mnesia:activity(transaction,Test,mnesia)),
+
+ %%io:format("**********Before ~p~n", [mnesia_lib:val({Tab1,snmp})]),
+ %%io:format(" ~p ~n", [ets:tab2list(mnesia_lib:val({local_snmp_table,{index,snmp}}))]),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab1, Tab2])),
+ %%io:format("**********After ~p~n", [mnesia_lib:val({Tab1,snmp})]),
+ %%io:format(" ~p ~n", [ets:tab2list(mnesia_lib:val({local_snmp_table,{index,snmp}}))]),
+
+ ?match(ok, Test()),
+ ?match({atomic,ok}, mnesia:transaction(Test)),
+ ?match(ok, mnesia:sync_dirty(Test)),
+ ?match(ok, mnesia:activity(transaction,Test,mnesia)),
+
+ ?verify_mnesia(Nodes, []).
+
+add_some_records(Tab1, Tab2, N) ->
+ Recs1 = [{Tab1, I, I} || I <- lists:reverse(lists:seq(1, N))],
+ Recs2 = [{Tab2, I, I} || I <- lists:reverse(lists:seq(20, 20+N-1))],
+ lists:foreach(fun(R) -> mnesia:dirty_write(R) end, Recs1),
+ Fun = fun(R) -> mnesia:write(R) end,
+ Trans = fun() -> lists:foreach(Fun, Recs2) end,
+ {atomic, ok} = mnesia:transaction(Trans),
+ %% Sync things, so everything gets everywhere!
+ {atomic, ok} = mnesia:sync_transaction(fun() -> mnesia:write(lists:last(Recs1)) end),
+ {atomic, ok} = mnesia:sync_transaction(fun() -> mnesia:write(lists:last(Recs2)) end),
+ ?sort(Recs1 ++ Recs2).
+
+snmp_get_row(suite) -> [];
+snmp_get_row(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = local_snmp_table,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ Def1 =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{disc_copies, [Node1]}, {ram_copies, [Node2]}]
+ end,
+ Tab2 = ext_snmp_table,
+ Def2 = [{Storage, [Node2]}],
+ Tab3 = snmp_table,
+ Def3 = [{Storage, [Node1]},
+ {attributes, [key, data1, data2]}],
+
+ Setup = fun() ->
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab2, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(
+ Tab3, [{key, {fix_string,integer}}])),
+ add_some_records(Tab1, Tab2, 1)
+ end,
+ Clear = fun() ->
+ ?match({atomic, ok}, mnesia:delete_table(Tab1)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab2)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab3))
+ end,
+ Test =
+ fun() ->
+ %% Test local tables
+ {success, Res11} =
+ ?match({ok, [1]}, mnesia:snmp_get_next_index(Tab1,[])),
+ {ok, Index11} = Res11,
+ ?match({ok, {Tab1,1,1}}, mnesia:snmp_get_row(Tab1, Index11)),
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab1, Index11)),
+ ?match({'EXIT',_}, mnesia:snmp_get_row(Tab1, endOfTable)),
+ ?match(undefined, mnesia:snmp_get_row(Tab1, [73])),
+
+ Add = fun() -> mnesia:write({Tab3, {"f_string", 3}, data1, data2}) end,
+ ?match({atomic, ok}, mnesia:transaction(Add)),
+ {success, {ok, Index31}} = ?match({ok, RowIndex31} when is_list(RowIndex31),
+ mnesia:snmp_get_next_index(Tab3,[])),
+ ?match({ok, Row31} when is_tuple(Row31),
+ mnesia:snmp_get_row(Tab3, Index31)),
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab3, Index31)),
+ Del = fun() -> mnesia:delete({Tab3,{"f_string",3}}) end,
+ ?match({atomic, ok}, mnesia:transaction(Del)),
+ ?match(undefined, mnesia:snmp_get_row(Tab3, "f_string" ++ [3])),
+ ?match(undefined, mnesia:snmp_get_row(Tab3, "f_string" ++ [73])),
+
+ %% Test external table
+ {success, Res21} = ?match({ok,[20]}, mnesia:snmp_get_next_index(Tab2, [])),
+ {ok, Index21} = Res21,
+ ?match({ok, Row2} when is_tuple(Row2), mnesia:snmp_get_row(Tab2, Index21)),
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab2, Index21)),
+ %% Test of non existing table
+ %% ?match(endOfTable, mnesia:snmp_get_next_index(ErrTab, [])),
+ ok
+ end,
+ Setup(),
+ ?match(ok, Test()),
+ Clear(), Setup(),
+ ?match({atomic,ok}, mnesia:transaction(Test)),
+ Clear(), Setup(),
+ ?match(ok, mnesia:sync_dirty(Test)),
+ Clear(), Setup(),
+ ?match(ok, mnesia:activity(transaction,Test,mnesia)),
+
+ Clear(), Setup(),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab1, Tab2])),
+ ?match(ok, Test()),
+ Clear(), Setup(),
+ ?match([], mnesia_test_lib:stop_mnesia(Nodes)),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab1, Tab2])),
+ ?match({atomic,ok}, mnesia:transaction(Test)),
+
+ ?verify_mnesia(Nodes, []).
+
+snmp_get_mnesia_key(suite) -> [];
+snmp_get_mnesia_key(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = local_snmp_table,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ Def1 =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{disc_copies, [Node1]}, {ram_copies, [Node2]}]
+ end,
+
+ Tab2 = ext_snmp_table,
+ Def2 = [{Storage, [Node2]}],
+
+ Tab3 = fix_string,
+ Setup = fun() ->
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def1)),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab1, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab2, [{key, integer}])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab3, [{key, {fix_string,integer}}])),
+
+ add_some_records(Tab1, Tab2, 1)
+ end,
+ Clear = fun() ->
+ ?match({atomic, ok}, mnesia:delete_table(Tab1)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab2)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab3))
+ end,
+ Test =
+ fun() ->
+ %% Test local tables
+ {success, Res11} =
+ ?match({ok, [1]}, mnesia:snmp_get_next_index(Tab1,[])),
+ {ok, Index11} = Res11,
+ ?match({ok, 1}, mnesia:snmp_get_mnesia_key(Tab1, Index11)),
+ %% Test external tables
+ {success, Res21} =
+ ?match({ok, [20]}, mnesia:snmp_get_next_index(Tab2, [])),
+ {ok, Index21} = Res21,
+ ?match({ok, 20}, mnesia:snmp_get_mnesia_key(Tab2, Index21)),
+ ?match(undefined, mnesia:snmp_get_mnesia_key(Tab2, [97])),
+ ?match({'EXIT', _}, mnesia:snmp_get_mnesia_key(Tab2, 97)),
+
+ ?match({atomic,ok}, mnesia:transaction(fun() -> mnesia:delete({Tab1,1}) end)),
+ ?match(undefined, mnesia:snmp_get_mnesia_key(Tab1, Index11)),
+
+ ?match({atomic,ok},mnesia:transaction(fun() -> mnesia:write({Tab1,73,7}) end)),
+ ?match({ok, 73}, mnesia:snmp_get_mnesia_key(Tab1, [73])),
+ ?match({atomic,ok}, mnesia:transaction(fun() -> mnesia:delete({Tab1,73}) end)),
+ ?match(undefined, mnesia:snmp_get_mnesia_key(Tab1, [73])),
+
+ ?match({atomic,ok},mnesia:transaction(fun() -> mnesia:write({Tab3,{"S",5},7}) end)),
+ ?match({ok,{"S",5}}, mnesia:snmp_get_mnesia_key(Tab3, [$S,5])),
+ ?match({atomic,ok},mnesia:transaction(fun() -> mnesia:delete({Tab3,{"S",5}}) end)),
+ ?match(undefined, mnesia:snmp_get_mnesia_key(Tab3, [$S,5])),
+
+ ok
+ end,
+ Setup(),
+ ?match(ok, Test()),
+ Clear(), Setup(),
+ ?match({atomic,ok}, mnesia:transaction(Test)),
+ Clear(), Setup(),
+ ?match(ok, mnesia:sync_dirty(Test)),
+ Clear(), Setup(),
+ ?match(ok, mnesia:activity(transaction,Test,mnesia)),
+ ?verify_mnesia(Nodes, []).
+
+snmp_update_counter(doc) ->
+ ["Verify that counters may be updated for tables with SNMP property"];
+snmp_update_counter(suite) -> [];
+snmp_update_counter(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = snmp_update_counter,
+ Def = [{attributes, [key, value]},
+ {snmp, [{key, integer}]},
+ {ram_copies, [Node1]}
+ ],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ Oid = {Tab, 1},
+ ?match([], mnesia:dirty_read(Oid)),
+ ?match(ok, mnesia:dirty_write({Tab, 1, 1})),
+ ?match([{Tab, _Key, 1}], mnesia:dirty_read(Oid)),
+ ?match(3, mnesia:dirty_update_counter(Oid, 2)),
+ ?match([{Tab, _Key, 3}], mnesia:dirty_read(Oid)),
+ ?verify_mnesia(Nodes, []).
+
+snmp_order(doc) ->
+ ["Verify that sort order is correct in transactions and dirty variants"];
+snmp_order(suite) -> [];
+snmp_order(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = snmp_order,
+ Def = [{attributes, [key, value]},
+ {snmp, [{key, {integer, integer, integer}}]},
+ {ram_copies, [Node1]}
+ ],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ Oid = {Tab, 1},
+ ?match([], mnesia:dirty_read(Oid)),
+ ?match({'EXIT', {aborted, _}}, mnesia:dirty_write({Tab, 1, 1})),
+ [mnesia:dirty_write({Tab, {A,B,2}, default}) ||
+ A <- lists:seq(1, 9, 2),
+ B <- lists:seq(2, 8, 2)],
+
+ Test1 = fun() ->
+ Keys0 = get_keys(Tab, []),
+ ?match(Keys0, lists:sort(Keys0)),
+ ?match([[1,2,2]|_], Keys0),
+ Keys1 = get_keys(Tab, [5]),
+ ?match(Keys1, lists:sort(Keys1)),
+ ?match([[5,2,2]|_], Keys1),
+ Keys2 = get_keys(Tab, [7, 4]),
+ ?match(Keys2, lists:sort(Keys2)),
+ ?match([[7,4,2]|_], Keys2),
+ ok
+ end,
+ ?match(ok, Test1()),
+ ?match({atomic, ok},mnesia:transaction(Test1)),
+ ?match(ok,mnesia:sync_dirty(Test1)),
+
+ Test2 = fun() ->
+ mnesia:write(Tab, {Tab,{0,0,2},updated}, write),
+ mnesia:write(Tab, {Tab,{5,3,2},updated}, write),
+ mnesia:write(Tab, {Tab,{10,10,2},updated}, write),
+ Keys0 = get_keys(Tab, []),
+ ?match([[0,0,2],[1,2,2]|_], Keys0),
+ ?match(Keys0, lists:sort(Keys0)),
+
+ Keys1 = get_keys(Tab, [5]),
+ ?match([[5,2,2],[5,3,2]|_], Keys1),
+ ?match(Keys1, lists:sort(Keys1)),
+
+ Keys2 = get_keys(Tab, [7,4]),
+ ?match([[7,4,2]|_], Keys2),
+ ?match(Keys2, lists:sort(Keys2)),
+ ?match([10,10,2], lists:last(Keys0)),
+ ?match([10,10,2], lists:last(Keys1)),
+ ?match([10,10,2], lists:last(Keys2)),
+
+ ?match([[10,10,2]], get_keys(Tab, [10])),
+ ok
+ end,
+
+ ?match({atomic, ok},mnesia:transaction(Test2)),
+
+ ?verify_mnesia(Nodes, []).
+
+get_keys(Tab, Key) ->
+ case mnesia:snmp_get_next_index(Tab, Key) of
+ endOfTable -> [];
+ {ok, Next} ->
+ [Next|get_keys(Tab, Next)]
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+iteration(doc) ->
+ ["Verify that the iteration functions works as expected"];
+iteration(suite) ->
+ [foldl].
+
+
+foldl(suite) ->
+ [];
+foldl(doc) ->
+ [""];
+foldl(Config) when is_list(Config) ->
+ Nodes = [_N1, N2] = ?acquire_nodes(2, Config),
+ Tab1 = fold_local,
+ Tab2 = fold_remote,
+ Tab3 = fold_ordered,
+ ?match({atomic, ok}, mnesia:create_table(Tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, [{ram_copies, [N2]}, {type, bag}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, [{ram_copies, Nodes},
+ {type, ordered_set}])),
+
+ Tab1Els = [{Tab1, N, N} || N <- lists:seq(1, 10)],
+ Tab2Els = ?sort([{Tab2, 1, 2} | [{Tab2, N, N} || N <- lists:seq(1, 10)]]),
+ Tab3Els = [{Tab3, N, N} || N <- lists:seq(1, 10)],
+
+ [mnesia:sync_transaction(fun() -> mnesia:write(E) end) || E <- Tab1Els],
+ [mnesia:sync_transaction(fun() -> mnesia:write(E) end) || E <- Tab2Els],
+ [mnesia:sync_transaction(fun() -> mnesia:write(E) end) || E <- Tab3Els],
+
+ Fold = fun(Tab) ->
+ lists:reverse(mnesia:foldl(fun(E, A) -> [E | A] end, [], Tab))
+ end,
+ Fold2 = fun(Tab, Lock) ->
+ lists:reverse(mnesia:foldl(fun(E, A) -> [E | A] end, [], Tab, Lock))
+ end,
+ Exit = fun(Tab) ->
+ lists:reverse(mnesia:foldl(fun(_E, _A) -> exit(testing) end, [], Tab))
+ end,
+ %% Errors
+ ?match({aborted, _}, mnesia:transaction(Fold, [error])),
+ ?match({aborted, _}, mnesia:transaction(fun(Tab) -> mnesia:foldl(badfun,[],Tab) end,
+ [Tab1])),
+ ?match({aborted, testing}, mnesia:transaction(Exit, [Tab1])),
+ ?match({aborted, _}, mnesia:transaction(Fold2, [Tab1, read_lock])),
+
+ %% Success
+ ?match({atomic, Tab1Els}, sort_res(mnesia:transaction(Fold, [Tab1]))),
+ ?match({atomic, Tab2Els}, sort_res(mnesia:transaction(Fold, [Tab2]))),
+ ?match({atomic, Tab3Els}, mnesia:transaction(Fold, [Tab3])),
+
+ ?match({atomic, Tab1Els}, sort_res(mnesia:transaction(Fold2, [Tab1, read]))),
+ ?match({atomic, Tab1Els}, sort_res(mnesia:transaction(Fold2, [Tab1, write]))),
+
+ ?match(Tab1Els, sort_res(mnesia:sync_dirty(Fold, [Tab1]))),
+ ?match(Tab2Els, sort_res(mnesia:async_dirty(Fold, [Tab2]))),
+
+ ?verify_mnesia(Nodes, []).
+
+sort_res({atomic, List}) ->
+ {atomic, ?sort(List)};
+sort_res(Else) when is_list(Else) ->
+ ?sort(Else);
+sort_res(Else) ->
+ Else.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+debug_support(doc) ->
+ ["Check that the debug support has not decayed."];
+debug_support(suite) ->
+ [
+ info,
+ schema_0,
+ schema_1,
+ view_0,
+ view_1,
+ view_2,
+ lkill,
+ kill
+ ].
+
+info(suite) -> [];
+info(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ ?match(ok, mnesia:info()),
+ ?verify_mnesia(Nodes, []).
+
+schema_0(suite) -> [];
+schema_0(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ ?match(ok, mnesia:schema()),
+ ?verify_mnesia(Nodes, []).
+
+schema_1(suite) -> [];
+schema_1(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ Tab = schema_1,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [])),
+ ?match(ok, mnesia:schema(Tab)),
+ ?verify_mnesia(Nodes, []).
+
+view_0(suite) -> [];
+view_0(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ ?match(ok, mnesia_lib:view()),
+ ?verify_mnesia(Nodes, []).
+
+view_1(suite) -> [];
+view_1(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ BinCore = mnesia_lib:mkcore({crashinfo, "Just testing..."}),
+ CoreFile = lists:concat(["MnesiaCore.", node(), ".view_1.", ?MODULE]),
+ ?match(ok, file:write_file(CoreFile, BinCore)),
+ ?match(ok, mnesia_lib:view(CoreFile)),
+ ?match(ok, file:delete(CoreFile)),
+
+ ?match(stopped, mnesia:stop()),
+ Dir = mnesia:system_info(directory),
+ ?match(eof, mnesia_lib:view(filename:join(Dir, "LATEST.LOG"))),
+ ?match(ok, mnesia_lib:view(filename:join(Dir, "schema.DAT"))),
+ ?verify_mnesia([], Nodes).
+
+view_2(suite) -> [];
+view_2(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ BinCore = mnesia_lib:mkcore({crashinfo, "More testing..."}),
+ File = lists:concat([?MODULE, "view_2.", node()]),
+ ?match(ok, file:write_file(File, BinCore)),
+ ?match(ok, mnesia_lib:view(File, core)),
+ ?match(ok, file:delete(File)),
+
+ ?match(stopped, mnesia:stop()),
+ Dir = mnesia:system_info(directory),
+ ?match(ok, file:rename(filename:join(Dir, "LATEST.LOG"), File)),
+ ?match(eof, mnesia_lib:view(File, log)),
+ ?match(ok, file:delete(File)),
+
+ ?match(ok, file:rename(filename:join(Dir, "schema.DAT"), File)),
+ ?match(ok, mnesia_lib:view(File, dat)),
+ ?match(ok, file:delete(File)),
+ ?verify_mnesia([], Nodes).
+
+lkill(suite) -> [];
+lkill(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+
+ ?match(yes, rpc:call(Node1, mnesia, system_info, [is_running])),
+ ?match(yes, rpc:call(Node2, mnesia, system_info, [is_running])),
+ ?match(ok, rpc:call(Node2, mnesia, lkill, [])),
+ ?match(yes, rpc:call(Node1, mnesia, system_info, [is_running])),
+ ?match(no, rpc:call(Node2, mnesia, system_info, [is_running])),
+ ?verify_mnesia([Node1], [Node2]).
+
+kill(suite) -> [];
+kill(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+
+ ?match(yes, rpc:call(Node1, mnesia, system_info, [is_running])),
+ ?match(yes, rpc:call(Node2, mnesia, system_info, [is_running])),
+ ?match({_, []}, rpc:call(Node2, mnesia, kill, [])),
+ ?match(no, rpc:call(Node1, mnesia, system_info, [is_running])),
+ ?match(no, rpc:call(Node2, mnesia, system_info, [is_running])),
+ ?verify_mnesia([], [Node1, Node2]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+record_name(doc) ->
+ ["Verify that record names may be differ from the name of ",
+ "the hosting table. Check at least access, restore, "
+ "registry, subscriptions and traveres_backup"];
+record_name(suite) ->
+ [
+ record_name_dirty_access
+ ].
+
+record_name_dirty_access(suite) ->
+ [
+ record_name_dirty_access_ram,
+ record_name_dirty_access_disc,
+ record_name_dirty_access_disc_only
+ ].
+
+record_name_dirty_access_ram(suite) ->
+ [];
+record_name_dirty_access_ram(Config) when is_list(Config) ->
+ record_name_dirty_access(ram_copies, Config).
+
+record_name_dirty_access_disc(suite) ->
+ [];
+record_name_dirty_access_disc(Config) when is_list(Config) ->
+ record_name_dirty_access(disc_copies, Config).
+
+record_name_dirty_access_disc_only(suite) ->
+ [];
+record_name_dirty_access_disc_only(Config) when is_list(Config) ->
+ record_name_dirty_access(disc_only_copies, Config).
+
+record_name_dirty_access(Storage, Config) ->
+ [Node1, _Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ List = lists:concat([record_name_dirty_access_, Storage]),
+ Tab = list_to_atom(List),
+ RecName = some_record,
+ Attr = val,
+ TabDef = [{type, bag},
+ {record_name, RecName},
+ {index, [Attr]},
+ {Storage, Nodes}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, TabDef)),
+
+ ?match(RecName, mnesia:table_info(Tab, record_name)),
+
+ ?match(ok, mnesia:dirty_write(Tab, {RecName, 2, 20})),
+ ?match(ok, mnesia:dirty_write(Tab, {RecName, 2, 21})),
+ ?match(ok, mnesia:dirty_write(Tab, {RecName, 2, 22})),
+
+ %% Backup test
+ BupFile = List ++ ".BUP",
+ CpName = cpname,
+ CpArgs = [{name, CpName}, {min, [Tab]}, {ram_overrides_dump, true}],
+ ?match({ok, CpName, _}, mnesia:activate_checkpoint(CpArgs)),
+ ?match(ok, mnesia:backup_checkpoint(CpName, BupFile)),
+ ?match(ok, mnesia:deactivate_checkpoint(CpName)),
+
+ ?match(ok, mnesia:dirty_write(Tab, {RecName, 1, 10})),
+ ?match({ok, Node1}, mnesia:subscribe({table, Tab})),
+ ?match(ok, mnesia:dirty_write(Tab, {RecName, 3, 10})),
+
+ Twos =?sort( [{RecName, 2, 20}, {RecName, 2, 21}, {RecName, 2, 22}]),
+ ?match(Twos, ?sort(mnesia:dirty_read(Tab, 2))),
+
+ ?match(ok, mnesia:dirty_delete_object(Tab, {RecName, 2, 21})),
+
+ Tens = ?sort([{RecName, 1, 10}, {RecName, 3, 10}]),
+ TenPat = {RecName, '_', 10},
+ ?match(Tens, ?sort(mnesia:dirty_match_object(Tab, TenPat))),
+ ?match(Tens, ?sort(mnesia:dirty_select(Tab, [{TenPat, [], ['$_']}]))),
+
+ %% Subscription test
+ E = mnesia_table_event,
+ ?match_receive({E, {write, {Tab, 3, 10}, _}}),
+ ?match_receive({E, {delete_object, {Tab, 2, 21}, _}}),
+ ?match({ok, Node1}, mnesia:unsubscribe({table, Tab})),
+
+ ?match([], mnesia_test_lib:stop_mnesia([Node1])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [Tab])),
+
+ ?match(Tens, ?sort(mnesia:dirty_index_match_object(Tab, TenPat, Attr) )),
+ ?match(Tens, ?sort(mnesia:dirty_index_read(Tab, 10, Attr))),
+
+ ?match([1, 2, 3], ?sort(mnesia:dirty_all_keys(Tab))),
+
+ ?match({ok, Node1}, mnesia:subscribe({table, Tab})),
+ ?match(ok, mnesia:dirty_delete(Tab, 2)),
+ ?match([], mnesia:dirty_read(Tab, 2)),
+
+ ?match_receive({E, {delete, {Tab, 2}, _}}),
+ ?match([], mnesia_test_lib:flush()),
+ ?match({ok, Node1}, mnesia:unsubscribe({table, Tab})),
+
+ %% Restore test
+ ?match({atomic, [Tab]}, mnesia:restore(BupFile, [{recreate_tables, [Tab]}])),
+ ?match(RecName, mnesia:table_info(Tab, record_name)),
+
+ ?match(Twos, ?sort(mnesia:dirty_match_object(Tab, mnesia:table_info(Tab, wild_pattern)))),
+ ?match(Twos, ?sort(mnesia:dirty_select(Tab,
+ [{mnesia:table_info(Tab, wild_pattern),
+ [],['$_']}]))),
+
+ %% Traverse backup test
+
+ Fun = fun(Rec, {Good, Bad}) ->
+ ?verbose("BUP: ~p~n", [Rec]),
+ case Rec of
+ {T, K, V} when T == Tab ->
+ Good2 = Good ++ [{RecName, K, V}],
+ {[Rec], {?sort(Good2), Bad}};
+ {T, K} when T == Tab ->
+ Good2 = [G || G <- Good, element(2, G) /= K],
+ {[Rec], {?sort(Good2), Bad}};
+ _ when element(1, Rec) == schema ->
+ {[Rec], {Good, Bad}};
+ _ ->
+ Bad2 = Bad ++ [Rec],
+ {[Rec], {Good, ?sort(Bad2)}}
+ end
+ end,
+
+ ?match({ok, {Twos, []}}, mnesia:traverse_backup(BupFile, mnesia_backup,
+ dummy, read_only,
+ Fun, {[], []})),
+ ?match(ok, file:delete(BupFile)),
+
+ %% Update counter test
+
+ CounterTab = list_to_atom(lists:concat([Tab, "_counter"])),
+ CounterTabDef = [{record_name, some_counter}],
+ C = my_counter,
+ ?match({atomic, ok}, mnesia:create_table(CounterTab, CounterTabDef)),
+ ?match(some_counter, mnesia:table_info(CounterTab, record_name)),
+ ?match(0, mnesia:dirty_update_counter(CounterTab, gurka, -10)),
+ ?match(10, mnesia:dirty_update_counter(CounterTab, C, 10)),
+ ?match(11, mnesia:dirty_update_counter(CounterTab, C, 1)),
+ ?match(4711, mnesia:dirty_update_counter(CounterTab, C, 4700)),
+ ?match([{some_counter, C, 4711}], mnesia:dirty_read(CounterTab, C)),
+ ?match(0, mnesia:dirty_update_counter(CounterTab, C, -4747)),
+
+ %% Registry tests
+
+ RegTab = list_to_atom(lists:concat([Tab, "_registry"])),
+ RegTabDef = [{record_name, some_reg}],
+ ?match(ok, mnesia_registry:create_table(RegTab, RegTabDef)),
+ ?match(some_reg, mnesia:table_info(RegTab, record_name)),
+ {success, RegRecs} =
+ ?match([_ | _], mnesia_registry_test:dump_registry(node(), RegTab)),
+
+ R = ?sort(RegRecs),
+ ?match(R, ?sort(mnesia_registry_test:restore_registry(node(), RegTab))),
+
+ ?verify_mnesia(Nodes, []).
+
+sorted_ets(suite) ->
+ [];
+sorted_ets(Config) when is_list(Config) ->
+ [N1, N2, N3] = All = ?acquire_nodes(3, Config),
+
+ Tab = sorted_tab,
+ Def = case mnesia_test_lib:diskless(Config) of
+ true -> [{name, Tab}, {type, ordered_set}, {ram_copies, All}];
+ false -> [{name, Tab}, {type, ordered_set},
+ {ram_copies, [N1]},
+ {disc_copies,[N2, N3]}]
+ end,
+
+ ?match({atomic, ok}, mnesia:create_table(Def)),
+ ?match({aborted, _}, mnesia:create_table(fel, [{disc_only_copies, N1}])),
+
+ ?match([ok | _],
+ [mnesia:dirty_write({Tab, {dirty, N}, N}) || N <- lists:seq(1, 10)]),
+ ?match({atomic, _},
+ mnesia:sync_transaction(fun() ->
+ [mnesia:write({Tab, {trans, N}, N}) ||
+ N <- lists:seq(1, 10)]
+ end)),
+
+ List = mnesia:dirty_match_object({Tab, '_', '_'}),
+ ?match(List, ?sort(List)),
+ ?match(List, rpc:call(N2, mnesia, dirty_match_object, [{Tab, '_', '_'}])),
+ ?match(List, rpc:call(N3, mnesia, dirty_match_object, [{Tab, '_', '_'}])),
+
+ mnesia_test_lib:stop_mnesia(All),
+ mnesia_test_lib:start_mnesia(All, [sorted_tab]),
+
+ List = mnesia:dirty_match_object({Tab, '_', '_'}),
+ ?match(List, ?sort(List)),
+ ?match(List, rpc:call(N2, mnesia, dirty_match_object, [{Tab, '_', '_'}])),
+ ?match(List, rpc:call(N3, mnesia, dirty_match_object, [{Tab, '_', '_'}])),
+
+ ?match(List, rpc:call(N3, mnesia, dirty_select, [Tab, [{{Tab, '_', '_'},[],['$_']}]])),
+
+ TransMatch = fun() ->
+ mnesia:write({Tab, {trans, 0}, 0}),
+ mnesia:write({Tab, {trans, 11}, 11}),
+ mnesia:match_object({Tab, '_', '_'})
+ end,
+ TransSelect = fun() ->
+ mnesia:write({Tab, {trans, 0}, 0}),
+ mnesia:write({Tab, {trans, 11}, 11}),
+ mnesia:select(Tab, [{{Tab, '_', '_'},[],['$_']}])
+ end,
+
+ TList = mnesia:transaction(TransMatch),
+ STList = ?sort(TList),
+ ?match(STList, TList),
+ ?match(STList, rpc:call(N2, mnesia, transaction, [TransMatch])),
+ ?match(STList, rpc:call(N3, mnesia, transaction, [TransMatch])),
+
+ TSel = mnesia:transaction(TransSelect),
+ ?match(STList, TSel),
+ ?match(STList, rpc:call(N2, mnesia, transaction, [TransSelect])),
+ ?match(STList, rpc:call(N3, mnesia, transaction, [TransSelect])),
+
+ ?match({atomic, ok}, mnesia:create_table(rec, [{type, ordered_set}])),
+ [ok = mnesia:dirty_write(R) || R <- [{rec,1,1}, {rec,2,1}]],
+ ?match({atomic, ok}, mnesia:add_table_index(rec, 3)),
+ TestIt = fun() ->
+ ok = mnesia:write({rec,1,1}),
+ mnesia:index_read(rec, 1, 3)
+ end,
+ ?match({atomic, [{rec,1,1}, {rec,2,1}]}, mnesia:transaction(TestIt)).
+
+
diff --git a/lib/mnesia/test/mnesia_examples_test.erl b/lib/mnesia/test/mnesia_examples_test.erl
new file mode 100644
index 0000000000..d1b1409c9d
--- /dev/null
+++ b/lib/mnesia/test/mnesia_examples_test.erl
@@ -0,0 +1,160 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_examples_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(init(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema],
+ N, Config, ?FILE, ?LINE)).
+
+opt_net_load(ExampleMod) ->
+ opt_net_load([node() | nodes()], ExampleMod, ok).
+
+opt_net_load([Node | Nodes], ExampleMod, Res) ->
+ case rpc:call(Node, ?MODULE, opt_load, [ExampleMod]) of
+ {module, ExampleMod} ->
+ opt_net_load(Nodes, ExampleMod, Res);
+ {error, Reason} ->
+ Error = {opt_net_load, ExampleMod, Node, Reason},
+ opt_net_load(Nodes, ExampleMod, {error, Error});
+ {badrpc, Reason} ->
+ Error = {opt_net_load, ExampleMod, Node, Reason},
+ opt_net_load(Nodes, ExampleMod, {error, Error})
+ end;
+opt_net_load([], _ExampleMod, Res) ->
+ Res.
+
+opt_load(Mod) ->
+ case code:is_loaded(Mod) of
+ {file, _} ->
+ {module, Mod};
+ false ->
+ Abs = filename:join([code:lib_dir(mnesia), examples, Mod]),
+ code:load_abs(Abs)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Run all examples mentioned in the documentation",
+ "Are really all examples covered?"];
+all(suite) ->
+ [
+ bup,
+ company,
+ meter,
+ tpcb
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bup(doc) -> ["Run the backup examples in bup.erl"];
+bup(suite) -> [];
+bup(Config) when is_list(Config) ->
+ Nodes = ?init(3, Config),
+ opt_net_load(bup),
+ ?match(ok, bup:test(Nodes)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+company(doc) ->
+ ["Run the company examples in company.erl and company_o.erl"].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+tpcb(doc) ->
+ ["Run the sample configurations of the stress tests in mnesia_tpcb.erl"];
+tpcb(suite) ->
+ [
+ replica_test,
+ sticky_replica_test,
+ dist_test,
+ conflict_test,
+ frag_test,
+ frag2_test,
+ remote_test,
+ remote_frag2_test
+ ].
+
+replica_test(suite) -> [];
+replica_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(replica_test, ram_copies))).
+
+sticky_replica_test(suite) -> [];
+sticky_replica_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(sticky_replica_test, ram_copies))).
+
+dist_test(suite) -> [];
+dist_test(Config) when is_list(Config) ->
+ ?init(3, [{tc_timeout, timer:minutes(10)} | Config]),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(dist_test, ram_copies))).
+
+conflict_test(suite) -> [];
+conflict_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(conflict_test, ram_copies))).
+
+frag_test(suite) -> [];
+frag_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(frag_test, ram_copies))).
+
+frag2_test(suite) -> [];
+frag2_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(frag2_test, ram_copies))).
+
+remote_test(suite) -> [];
+remote_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(remote_test, ram_copies))).
+
+remote_frag2_test(suite) -> [];
+remote_frag2_test(Config) when is_list(Config) ->
+ ?init(3, Config),
+ opt_net_load(mnesia_tpcb),
+ ?match({ok, _}, mnesia_tpcb:start(mnesia_tpcb:config(remote_frag2_test, ram_copies))).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+meter(doc) ->
+ ["Run the meter example in mnesia_meter.erl"];
+meter(suite) ->
+ [];
+meter(Config) when is_list(Config) ->
+ [N | _] = ?init(3, Config),
+ opt_net_load(mnesia_meter),
+ ?match(ok, mnesia_meter:go(ram_copies, [N])).
+
+
diff --git a/lib/mnesia/test/mnesia_frag_test.erl b/lib/mnesia/test/mnesia_frag_test.erl
new file mode 100644
index 0000000000..4add340254
--- /dev/null
+++ b/lib/mnesia/test/mnesia_frag_test.erl
@@ -0,0 +1,875 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_frag_test).
+-author('[email protected]').
+-include("mnesia_test_lib.hrl").
+
+-compile([export_all]).
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(match_dist(ExpectedRes, Expr),
+ case ?match(ExpectedRes, Expr) of
+
+ mnesia_test_lib:error(Format, Args,?FILE,?LINE)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+all(doc) ->
+ ["Verify the functionality of fragmented tables"];
+all(suite) ->
+ [
+ light,
+ medium
+ ].
+
+light(suite) ->
+ [
+ nice,
+ evil
+ ].
+
+medium(suite) ->
+ [
+ consistency
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+nice(suite) ->
+ [
+ nice_single,
+ nice_multi,
+ nice_access,
+ iter_access
+ ].
+
+nice_single(suite) -> [];
+nice_single(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ %% Create a table with 2 fragments and 12 records
+ Tab = nice_frag,
+ Props = [{n_fragments, 2}, {node_pool, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{frag_properties, Props}])),
+ Records = [{Tab, N, -N} || N <- lists:seq(1, 12)],
+ [frag_write(Tab, R) || R <- Records],
+ ?match([{Node1, 2}], frag_dist(Tab)),
+ ?match([8, 4], frag_rec_dist(Tab)),
+
+ %% Adding a new node to pool should not affect distribution
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_node, Node2})),
+ Dist = frag_dist(Tab),
+ ?match([{Node2, 0}, {Node1, 2}], Dist),
+ ?match([8, 4], frag_rec_dist(Tab)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist})),
+ Dist2 = frag_dist(Tab),
+ ?match([{Node2, 1}, {Node1, 2}], Dist2),
+ ?match([3, 4, 5], frag_rec_dist(Tab)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist2})),
+ Dist3 = frag_dist(Tab),
+ ?match([{Node1, 2}, {Node2, 2}], Dist3),
+ ?match([3, 2, 5, 2], frag_rec_dist(Tab)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist3})),
+ Dist4 = frag_dist(Tab),
+ ?match([{Node2, 2}, {Node1, 3}], Dist4),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+
+ %% Dropping a node in pool should not affect distribution
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {del_node, Node1})),
+ ?match([{Node2, 2}, {Node1, 3}], frag_dist(Tab)),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+
+ %% Dropping a fragment
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ Dist5 = frag_dist(Tab),
+ ?match([{Node2, 2}, {Node1, 2}], Dist5),
+ ?match([3, 2, 5, 2], frag_rec_dist(Tab)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist5})),
+ Dist6 = frag_dist(Tab),
+ ?match([{Node2, 3}, {Node1, 2}], Dist6),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+
+ %% Dropping all fragments but one
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([3, 2, 5, 2], frag_rec_dist(Tab)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([3, 4, 5], frag_rec_dist(Tab)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([8, 4], frag_rec_dist(Tab)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([{Node2, 0}, {Node1, 1}], frag_dist(Tab)),
+ ?match([12], frag_rec_dist(Tab)),
+
+ %% Defragmenting the table clears frag_properties
+ ?match(Len when Len > 0,
+ length(mnesia:table_info(Tab, frag_properties))),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, deactivate)),
+ ?match(0, length(mnesia:table_info(Tab, frag_properties))),
+
+ %% Making the table fragmented again
+ Props2 = [{n_fragments, 1}],
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {activate, Props2})),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, frag_dist(Tab)})),
+ Dist7 = frag_dist(Tab),
+ ?match([{Node1, 1}, {Node2, 1}], Dist7),
+ ?match([8, 4], frag_rec_dist(Tab)),
+
+ %% Deleting the fragmented table
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+ ?match(false, lists:member(Tab, mnesia:system_info(tables))),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+nice_multi(doc) ->
+ ["Extending the nice case with one more node, ",
+ "one more replica and a foreign key"];
+nice_multi(suite) -> [];
+nice_multi(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+
+ %% Create a table with 2 fragments and 8 records
+ Tab = frag_master,
+ Name = frag_rec,
+ Type = case mnesia_test_lib:diskless(Config) of
+ true -> n_ram_copies;
+ false -> n_disc_copies
+ end,
+ Props = [{n_fragments, 2},
+ {Type, 2},
+ {node_pool, [Node2, Node1]}],
+ Def = [{frag_properties, Props},
+ {attributes, [id, data]},
+ {record_name, Name}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ [frag_write(Tab, {Name, Id, -Id}) || Id <- lists:seq(1, 8)],
+ ?match([6, 2], frag_rec_dist(Tab)),
+ ?match([{Node2, 2}, {Node1, 2}], frag_dist(Tab)),
+
+ %% And connect another table to it, via a foreign key
+ TabF = frag_slave,
+ PropsF = [{foreign_key, {Tab, foreign_id}}],
+ DefF = [{frag_properties, PropsF},
+ {attributes, [id, foreign_id]}],
+
+ ?match({atomic, ok}, mnesia:create_table(TabF, DefF)),
+ [frag_write(TabF, {TabF, {Id}, Id}) || Id <- lists:seq(1, 16)],
+ ?match([10, 6], frag_rec_dist(TabF)),
+ ?match([{Node2, 2}, {Node1, 2}], frag_dist(TabF)),
+
+ %% Adding a new node to pool should not affect distribution
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_node, Node3})),
+ Dist = frag_dist(Tab),
+ ?match([{Node3, 0}, {Node2, 2}, {Node1, 2}], Dist),
+ ?match([6, 2], frag_rec_dist(Tab)),
+ DistF = frag_dist(TabF),
+ ?match([{Node3, 0}, {Node2, 2}, {Node1, 2}], DistF),
+ ?match([10, 6], frag_rec_dist(TabF)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist})),
+ Dist2 = frag_dist(Tab),
+ ?match([{Node3, 1},{Node1, 2},{Node2,3}], Dist2),
+ ?match([_, _, _], frag_rec_dist(Tab)),
+ DistF2 = frag_dist(TabF),
+ ?match([{Node3, 1},{Node1, 2},{Node2,3}], DistF2),
+ ?match([_, _, _], frag_rec_dist(TabF)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist2})),
+ Dist3 = frag_dist(Tab),
+ ?match([{Node3, 2},{Node2,3},{Node1, 3}], Dist3),
+ ?match([3, 0, 3, 2], frag_rec_dist(Tab)),
+ DistF3 = frag_dist(TabF),
+ ?match([{Node3, 2},{Node2,3},{Node1, 3}], DistF3),
+ ?match([3, 3, 7, 3], frag_rec_dist(TabF)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist3})),
+ Dist4 = frag_dist(Tab),
+ ?match([{Node1, 3}, {Node3, 3},{Node2, 4}], Dist4),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+ DistF4 = frag_dist(TabF),
+ ?match([{Node1, 3}, {Node3, 3},{Node2, 4}], DistF4),
+ ?match([_, _, _, _, _], frag_rec_dist(TabF)),
+
+ %% Dropping a node in pool should not affect distribution
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {del_node, Node1})),
+ ?match([{Node3, 3},{Node2, 4}, {Node1, 3}], frag_dist(Tab)),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+ ?match([{Node3, 3},{Node2, 4}, {Node1, 3}], frag_dist(TabF)),
+ ?match([_, _, _, _, _], frag_rec_dist(TabF)),
+
+ %% Dropping a fragment
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ Dist5 = frag_dist(Tab),
+ ?match([{Node3, 2},{Node2,3},{Node1, 3}], Dist5),
+ ?match([3, 0, 3, 2], frag_rec_dist(Tab)),
+ DistF5 = frag_dist(Tab),
+ ?match([{Node3, 2},{Node2,3},{Node1, 3}], DistF5),
+ ?match([3, 3, 7, 3], frag_rec_dist(TabF)),
+
+ %% Add new fragment hopefully on the new node
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist5})),
+ Dist6 = frag_dist(Tab),
+ ?match([{Node3, 3},{Node2, 4},{Node1, 3}], Dist6),
+ ?match([_, _, _, _, _], frag_rec_dist(Tab)),
+ DistF6 = frag_dist(TabF),
+ ?match([{Node3, 3},{Node2, 4},{Node1, 3}], DistF6),
+ ?match([_, _, _, _, _], frag_rec_dist(TabF)),
+
+ %% Dropping all fragments but one
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([3, 0, 3, 2], frag_rec_dist(Tab)),
+ ?match([3, 3, 7, 3], frag_rec_dist(TabF)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([_, _, _], frag_rec_dist(Tab)),
+ ?match([_, _, _], frag_rec_dist(TabF)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([6, 2], frag_rec_dist(Tab)),
+ ?match([10, 6], frag_rec_dist(TabF)),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, del_frag)),
+ ?match([{Node3, 0}, {Node2, 1}, {Node1, 1}], frag_dist(Tab)),
+ ?match([8], frag_rec_dist(Tab)),
+ ?match([{Node3, 0}, {Node2, 1}, {Node1, 1}], frag_dist(TabF)),
+ ?match([16], frag_rec_dist(TabF)),
+
+ %% Defragmenting the tables clears frag_properties
+ ?match(Len when Len > 0,
+ length(mnesia:table_info(TabF, frag_properties))),
+ ?match({atomic, ok}, mnesia:change_table_frag(TabF, deactivate)),
+ ?match(0, length(mnesia:table_info(TabF, frag_properties))),
+ ?match(Len when Len > 0,
+ length(mnesia:table_info(Tab, frag_properties))),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, deactivate)),
+ ?match(0, length(mnesia:table_info(Tab, frag_properties))),
+
+ %% Making the tables fragmented again
+ Props2 = [{n_fragments, 1}, {node_pool, [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {activate, Props2})),
+ ?match({atomic, ok}, mnesia:delete_table(TabF)),
+ ?match({atomic, ok}, mnesia:create_table(TabF, DefF)),
+ [frag_write(TabF, {TabF, {Id}, Id}) || Id <- lists:seq(1, 16)],
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, frag_dist(Tab)})),
+ ?match([{Node1, 2}, {Node2, 2}], frag_dist(Tab)),
+ ?match([6, 2], frag_rec_dist(Tab)),
+ ?match([{Node1, 2}, {Node2, 2}], frag_dist(TabF)),
+ ?match([10, 6], frag_rec_dist(TabF)),
+
+ %% Deleting the fragmented tables
+ ?match({atomic, ok}, mnesia:delete_table(TabF)),
+ ?match(false, lists:member(TabF, mnesia:system_info(tables))),
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+ ?match(false, lists:member(Tab, mnesia:system_info(tables))),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+nice_access(doc) ->
+ ["Cover entire callback interface"];
+nice_access(suite) -> [];
+nice_access(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config),
+
+ Tab = frag_access,
+ Pool = lists:sort(Nodes),
+ Props = [{n_fragments, 20},
+ {n_ram_copies, 2},
+ {node_pool, Pool}],
+ Def = [{frag_properties, Props},
+ {type, ordered_set},
+ {index, [val]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ [frag_write(Tab, {Tab, Id, Id}) || Id <- lists:seq(1, 400)],
+
+ %% And connect another table to it, via a foreign key
+ TabF = frag_access_slave,
+ PropsF = [{foreign_key, {Tab, val}}],
+ DefF = [{frag_properties, PropsF},
+ {index, [val]}],
+ ?match({atomic, ok}, mnesia:create_table(TabF, DefF)),
+ [frag_write(TabF, {TabF, Id, Id}) || Id <- lists:seq(1, 400)],
+
+ ?match(done, mnesia:activity(transaction, fun do_access/3, [Tab, Tab, Pool], mnesia_frag)),
+ ?match(done, mnesia:activity(transaction, fun do_access/3, [TabF, Tab, Pool], mnesia_frag)),
+
+ ?verify_mnesia(Nodes, []).
+
+do_access(Tab, Master, Pool) ->
+ ?match(20, mnesia:table_info(Tab, n_fragments)),
+ ?match(Pool, mnesia:table_info(Tab, node_pool)),
+ ?match(2, mnesia:table_info(Tab, n_ram_copies)),
+ ?match(0, mnesia:table_info(Tab, n_disc_copies)),
+ ?match(0, mnesia:table_info(Tab, n_disc_only_copies)),
+ ?match(20, length(mnesia:table_info(Tab, frag_names))),
+ ?match(20, length(mnesia:table_info(Tab, frag_size))),
+ ?match(20, length(mnesia:table_info(Tab, frag_memory))),
+ PoolSize = length(Pool),
+ ?match(PoolSize, length(mnesia:table_info(Tab, frag_dist))),
+ ?match(400, mnesia:table_info(Tab, size)),
+ ?match(I when is_integer(I), mnesia:table_info(Tab, memory)),
+ ?match(Tab, mnesia:table_info(Tab, base_table)),
+
+ Foreign =
+ if
+ Master == Tab ->
+ ?match(undefined, mnesia:table_info(Tab, foreign_key)),
+ ?match([_], mnesia:table_info(Tab, foreigners)),
+ ?match({'EXIT', {aborted, {combine_error, Tab, frag_properties, {foreign_key, undefined}}}},
+ mnesia:read({Tab, 5}, 5, read)),
+ fun({T, _K}) -> T end;
+ true ->
+ ?match({Master, 3}, mnesia:table_info(Tab, foreign_key)),
+ ?match([], mnesia:table_info(Tab, foreigners)),
+ fun({T, K}) -> {T, K} end
+ end,
+
+ Attr = val,
+ ?match(400, mnesia:table_info(Tab, size)),
+ Count = fun(_, N) -> N + 1 end,
+ ?match(400, mnesia:foldl(Count, 0, Tab)),
+ ?match(400, mnesia:foldr(Count, 0, Tab)),
+ ?match(ok, mnesia:write({Tab, [-1], 1})),
+ ?match(401, length(mnesia:match_object(Tab, {Tab, '_', '_'}, read))),
+ ?match(401, length(mnesia:select(Tab, [{{Tab, '_', '$1'}, [], ['$1']}], read))),
+
+ First = mnesia:select(Tab, [{{Tab, '_', '$1'}, [], ['$1']}], 10, read),
+ TestCont = fun('$end_of_table', Total, _This) ->
+ Total;
+ ({Res,Cont1}, Total, This) ->
+ Cont = mnesia:select(Cont1),
+ This(Cont, length(Res) + Total, This)
+ end,
+ ?match(401, TestCont(First, 0, TestCont)),
+
+ %% OTP
+ [_, Frag2|_] = frag_names(Tab),
+ Frag2key = mnesia:dirty_first(Frag2),
+ ?match({[Frag2key],_},mnesia:select(Tab,[{{Tab,Frag2key,'$1'},[],['$1']}],100,read)),
+
+ ?match([{Tab, [-1], 1}], mnesia:read(Foreign({Tab, 1}), [-1], read)),
+ ?match(401, mnesia:foldl(Count, 0, Tab)),
+ ?match(401, mnesia:foldr(Count, 0, Tab)),
+ ?match(ok, mnesia:delete(Foreign({Tab, 2}), 2, write)),
+ ?match([], mnesia:read(Foreign({Tab, 2}), 2, read)),
+ ?match([{Tab, 3, 3}], mnesia:read(Foreign({Tab, 3}), 3, read)),
+ ?match(400, mnesia:foldl(Count, 0, Tab)),
+ ?match(400, mnesia:foldr(Count, 0, Tab)),
+ ?match(ok, mnesia:delete_object({Tab, 3, 3})),
+ ?match([], mnesia:read(Foreign({Tab, 3}), 3, read)),
+ One = lists:sort([{Tab, 1, 1}, {Tab, [-1], 1}]),
+ Pat = {Tab, '$1', 1},
+ ?match(One, lists:sort(mnesia:match_object(Tab, Pat, read))),
+ ?match([1,[-1]], lists:sort(mnesia:select(Tab, [{Pat, [], ['$1']}], read))),
+ ?match([[[-1]]], lists:sort(mnesia:select(Tab, [{Pat, [{is_list, '$1'}], [['$1']]}], read))),
+ ?match([[1, 100]], lists:sort(mnesia:select(Tab, [{Pat, [{is_integer, '$1'}], [['$1',100]]}], read))),
+ ?match([1,[-1]], lists:sort(mnesia:select(Tab, [{Pat, [{is_list, '$1'}], ['$1']},{Pat, [{is_integer, '$1'}], ['$1']}], read))),
+ ?match(One, lists:sort(mnesia:index_match_object(Tab, Pat, Attr, read) )),
+ ?match(One, lists:sort(mnesia:index_read(Tab, 1, Attr))),
+ Keys = mnesia:all_keys(Tab),
+ ?match([-1], lists:max(Keys)), %% OTP-3779
+ ?match(399, length(Keys)),
+ ?match(399, mnesia:foldl(Count, 0, Tab)),
+ ?match(399, mnesia:foldr(Count, 0, Tab)),
+
+ ?match(Pool, lists:sort(mnesia:lock({table, Tab}, write))),
+
+ done.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+iter_access(doc) ->
+ ["Cover table iteration via callback interface"];
+iter_access(suite) -> [];
+iter_access(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config),
+
+ Tab = frag_access,
+ Pool = lists:sort(Nodes),
+ Props = [{n_fragments, 20},
+ {n_ram_copies, 2},
+ {node_pool, Pool}],
+ Def = [{frag_properties, Props},
+ {type, ordered_set},
+ {index, [val]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ [frag_write(Tab, {Tab, Id, Id}) || Id <- lists:seq(1, 400)],
+
+ FragNames = frag_names(Tab),
+ RawRead =
+ fun(Frag) ->
+ Node = mnesia:table_info(Frag, where_to_read),
+ {Frag, rpc:call(Node, ets, tab2list, [Frag])}
+ end,
+
+ ?match(done, mnesia:activity(transaction, fun nice_iter_access/3, [Tab, FragNames, RawRead], mnesia_frag)),
+
+ FragNames = frag_names(Tab),
+ [First, Second | _] = FragNames,
+ [Last, LastButOne | _] = lists:reverse(FragNames),
+
+ ?match({atomic, ok}, mnesia:clear_table(First)),
+ ?match({atomic, ok}, mnesia:clear_table(Second)),
+ ?match({atomic, ok}, mnesia:clear_table(lists:nth(8, FragNames))),
+ ?match({atomic, ok}, mnesia:clear_table(lists:nth(9, FragNames))),
+ ?match({atomic, ok}, mnesia:clear_table(lists:nth(10, FragNames))),
+ ?match({atomic, ok}, mnesia:clear_table(lists:nth(11, FragNames))),
+ ?match({atomic, ok}, mnesia:clear_table(LastButOne)),
+ ?match({atomic, ok}, mnesia:clear_table(Last)),
+
+ ?match(done, mnesia:activity(transaction, fun evil_iter_access/3, [Tab, FragNames, RawRead], mnesia_frag)),
+ Size = fun(Table) -> mnesia:table_info(Table, size) end,
+ ?match(true, 0 < mnesia:activity(transaction, Size, [Tab], mnesia_frag)),
+ ?match({atomic, ok}, mnesia:activity(ets, fun() -> mnesia:clear_table(Tab) end, mnesia_frag)),
+ ?match(0, mnesia:activity(transaction, Size, [Tab], mnesia_frag)),
+
+ ?verify_mnesia(Nodes, []).
+
+nice_iter_access(Tab, FragNames, RawRead) ->
+ RawData = ?ignore(lists:map(RawRead, FragNames)),
+ Keys = [K || {_, Recs} <- RawData, {_, K, _} <- Recs],
+ ExpectedFirst = hd(Keys),
+ ?match(ExpectedFirst, mnesia:first(Tab)),
+ ExpectedLast = lists:last(Keys),
+ ?match(ExpectedLast, mnesia:last(Tab)),
+
+ ExpectedAllPrev = ['$end_of_table' | lists:reverse(tl(lists:reverse(Keys)))],
+ ?match(ExpectedAllPrev, lists:map(fun(K) -> mnesia:prev(Tab, K) end, Keys)),
+
+ ExpectedAllNext = tl(Keys) ++ ['$end_of_table'],
+ ?match(ExpectedAllNext, lists:map(fun(K) -> mnesia:next(Tab, K) end, Keys)),
+
+ done.
+
+evil_iter_access(Tab, FragNames, RawRead) ->
+ RawData = ?ignore(lists:map(RawRead, FragNames)),
+ Keys = [K || {_, Recs} <- RawData, {_, K, _} <- Recs],
+ ExpectedFirst = hd(Keys),
+ ?match(ExpectedFirst, mnesia:first(Tab)),
+ ExpectedLast = lists:last(Keys),
+ ?match(ExpectedLast, mnesia:last(Tab)),
+
+ ExpectedAllPrev = ['$end_of_table' | lists:reverse(tl(lists:reverse(Keys)))],
+ ?match(ExpectedAllPrev, lists:map(fun(K) -> mnesia:prev(Tab, K) end, Keys)),
+
+ ExpectedAllNext = tl(Keys) ++ ['$end_of_table'],
+ ?match(ExpectedAllNext, lists:map(fun(K) -> mnesia:next(Tab, K) end, Keys)),
+
+ done.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+consistency(doc) ->
+ ["Add and delete fragments during TPC-B"];
+consistency(suite) -> [];
+consistency(Config) when is_list(Config) ->
+ ?skip("Not yet implemented (NYI).~n", []),
+ Nodes = ?acquire_nodes(2, Config),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil(doc) ->
+ ["Evil coverage of fragmentation API."];
+evil(suite) ->
+ [
+ evil_create,
+ evil_delete,
+ evil_change,
+ evil_combine,
+ evil_loop,
+ evil_delete_db_node
+ ].
+
+evil_create(suite) -> [];
+evil_create(Config) when is_list(Config) ->
+ [Node1, _Node2] = Nodes = ?acquire_nodes(2, Config),
+
+ Create = fun(T, D, P) -> mnesia:create_table(T, [{frag_properties, P}| D]) end,
+
+ Tab = evil_create,
+ %% Props in general
+ ?match({aborted, {badarg, Tab, {frag_properties, no_list}}},
+ Create(Tab, [], no_list)),
+ ?match({aborted, {badarg,Tab , [no_tuple]}},
+ Create(Tab, [], [no_tuple])),
+ ?match({aborted,{badarg, Tab, bad_key}},
+ Create(Tab, [], [{bad_key, 7}])),
+
+ %% n_fragments
+ ?match({aborted,{badarg, Tab, [{n_fragments}]}},
+ Create(Tab, [], [{n_fragments}])),
+ ?match({aborted,{badarg, Tab, [{n_fragments, 1, 1}]}},
+ Create(Tab, [], [{n_fragments, 1, 1}])),
+ ?match({aborted, {bad_type,Tab, {n_fragments, a}}},
+ Create(Tab, [], [{n_fragments, a}])),
+ ?match({aborted, {bad_type, Tab, {n_fragments, 0}}},
+ Create(Tab, [], [{n_fragments, 0}])),
+
+ %% *_copies
+ ?match({aborted, {bad_type, Tab, {n_ram_copies, -1}}},
+ Create(Tab, [], [{n_ram_copies, -1}, {n_fragments, 1}])),
+ ?match({aborted, {bad_type, Tab, {n_disc_copies, -1}}},
+ Create(Tab, [], [{n_disc_copies, -1}, {n_fragments, 1}])),
+ ?match({aborted, {bad_type, Tab, {n_disc_only_copies, -1}}},
+ Create(Tab, [], [{n_disc_only_copies, -1}, {n_fragments, 1}])),
+
+ %% node_pool
+ ?match({aborted, {bad_type, Tab, {node_pool, 0}}},
+ Create(Tab, [], [{node_pool, 0}])),
+ ?match({aborted, {combine_error, Tab, "Too few nodes in node_pool"}},
+ Create(Tab, [], [{n_ram_copies, 2}, {node_pool, [Node1]}])),
+
+ %% foreign_key
+ ?match({aborted, {bad_type, Tab, {foreign_key, bad_key}}},
+ Create(Tab, [], [{foreign_key, bad_key}])),
+ ?match({aborted,{bad_type, Tab, {foreign_key, {bad_key}}}},
+ Create(Tab, [], [{foreign_key, {bad_key}}])),
+ ?match({aborted, {no_exists, {bad_tab, frag_properties}}},
+ Create(Tab, [], [{foreign_key, {bad_tab, val}}])),
+ ?match({aborted, {combine_error, Tab, {Tab, val}}},
+ Create(Tab, [], [{foreign_key, {Tab, val}}])),
+ ?match({atomic, ok},
+ Create(Tab, [], [{n_fragments, 1}])),
+
+ ?match({aborted, {already_exists, Tab}},
+ Create(Tab, [], [{n_fragments, 1}])),
+
+ Tab2 = evil_create2,
+ ?match({aborted, {bad_type, no_attr}},
+ Create(Tab2, [], [{foreign_key, {Tab, no_attr}}])),
+ ?match({aborted, {combine_error, Tab2, _, _, _}},
+ Create(Tab2, [], [{foreign_key, {Tab, val}},
+ {node_pool, [Node1]}])),
+ ?match({aborted, {combine_error, Tab2, _, _, _}},
+ Create(Tab2, [], [{foreign_key, {Tab, val}},
+ {n_fragments, 2}])),
+ ?match({atomic, ok},
+ Create(Tab2, [{attributes, [a, b, c]}], [{foreign_key, {Tab, c}}])),
+ Tab3 = evil_create3,
+ ?match({aborted, {combine_error, Tab3, _, _, _}},
+ Create(Tab3, [{attributes, [a, b]}], [{foreign_key, {Tab2, b}}])),
+ ?match({atomic, ok},
+ Create(Tab3, [{attributes, [a, b]}], [{foreign_key, {Tab, b}}])),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil_delete(suite) -> [];
+evil_delete(Config) when is_list(Config) ->
+ ?skip("Not yet implemented (NYI).~n", []),
+ Nodes = ?acquire_nodes(2, Config),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil_change(suite) -> [];
+evil_change(Config) when is_list(Config) ->
+ [N1,N2,_N3] = Nodes = ?acquire_nodes(3, Config),
+ Create = fun(T, D, P) -> mnesia:create_table(T, [{frag_properties, P}| D]) end,
+ Props1 = [{n_fragments, 2}, {node_pool, [N1]}],
+ Tab1 = evil_change_ram,
+ ?match({atomic, ok}, Create(Tab1, [], Props1)),
+
+ ?match({atomic,ok}, mnesia:change_table_frag(Tab1, {add_frag, Nodes})),
+ Dist10 = frag_dist(Tab1),
+ ?match([{N1,3}], Dist10),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab1, {add_node, N2})),
+ Dist11 = frag_dist(Tab1),
+ ?match([{N2,0},{N1,3}], Dist11),
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match({aborted,_}, mnesia:change_table_frag(Tab1, {add_frag, [N2,N1]})),
+ ?verbose("~p~n",[frag_dist(Tab1)]),
+ mnesia_test_lib:start_mnesia([N2]),
+
+ Tab2 = evil_change_disc,
+ ?match({atomic,ok}, Create(Tab2,[],[{n_disc_copies,1},{n_fragments,1},{node_pool,[N1,N2]}])),
+ ?verbose("~p~n", [frag_dist(Tab2)]),
+ ?match({atomic,ok}, mnesia:change_table_frag(Tab2, {add_frag, [N1,N2]})),
+ _Dist20 = frag_dist(Tab2),
+ mnesia_test_lib:kill_mnesia([N2]),
+ ?match({atomic,ok}, mnesia:change_table_frag(Tab2, {add_frag, [N1,N2]})),
+ ?match({aborted,_}, mnesia:change_table_frag(Tab2, {add_frag, [N2,N1]})),
+
+ mnesia_test_lib:start_mnesia([N2]),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil_combine(doc) -> ["Bug in mnesia_4.1.5. and earlier"];
+evil_combine(suite) -> [];
+evil_combine(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ ?match({atomic, ok},mnesia:create_table(tab1, [{disc_copies, [Node1]},
+ {frag_properties, [{n_fragments, 2},
+ {node_pool, [Node1]},
+ {n_disc_copies, 1}]}])),
+ ?match({atomic, ok},mnesia:create_table(tab2, [{disc_copies, [Node1]}])),
+ mnesia:wait_for_tables([tab1, tab2], infinity),
+
+ Add2 = fun() ->
+ mnesia:transaction(fun() ->
+ mnesia:write({tab2,1,1})
+ end)
+ end,
+ Fun = fun() ->
+ Add2(),
+ mnesia:write({tab1,9,10})
+ end,
+ ?match(ok, mnesia:activity({transaction, 1}, Fun, [], mnesia_frag)),
+
+ Read = fun(T, K) ->
+ mnesia:read(T, K, read)
+ end,
+
+ ?match([{tab1,9,10}],mnesia:activity(async_dirty, Read, [tab1, 9], mnesia_frag)),
+ ?match([{tab2,1,1}],mnesia:activity(async_dirty, Read, [tab2, 1], mnesia_frag)),
+
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil_loop(doc) -> ["Test select/[14]"];
+evil_loop(suite) -> [];
+evil_loop(Config) when is_list(Config) ->
+ [Node1,_Node2] = ?acquire_nodes(2, Config),
+ Tab1 = ss_oset,
+ Tab2 = ss_set,
+ Tab3 = ss_bag,
+ Tabs = [Tab1, Tab2, Tab3],
+ RecName = ss,
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab1},
+ {ram_copies, [Node1]},
+ {record_name, RecName},
+ {type, ordered_set}])),
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab2},
+ {record_name, RecName},
+ {ram_copies, [Node1]},
+ {type, set}])),
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab3},
+ {record_name, RecName},
+ {ram_copies, [Node1]},
+ {type, bag}])),
+ Keys = [-3, -2] ++ lists:seq(1, 5, 2) ++ lists:seq(6, 10),
+ Recs = [{RecName, K, K} || K <- Keys],
+ [mnesia:dirty_write(Tab1, R) || R <- Recs],
+ [mnesia:dirty_write(Tab2, R) || R <- Recs],
+ [mnesia:dirty_write(Tab3, R) || R <- Recs],
+
+ Activate =
+ fun(Tab) ->
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {activate, []})),
+ Dist = frag_dist(Tab),
+ ?match({atomic, ok}, mnesia:change_table_frag(Tab, {add_frag, Dist}))
+ end,
+
+ Activate(Tab1),
+ Activate(Tab2),
+ Activate(Tab3),
+
+ Match = fun(Tab) -> mnesia:match_object(Tab, {'_', '_', '_'}, write) end,
+ Select = fun(Tab) -> mnesia:select(Tab, [{'_', [], ['$_']}]) end,
+ Trans = fun(Fun, Args) -> mnesia:activity(transaction, Fun, Args, mnesia_frag) end,
+ LoopHelp = fun('$end_of_table',_) ->
+ [];
+ ({Res,Cont},Fun) ->
+ Sel = mnesia:select(Cont),
+ Res ++ Fun(Sel, Fun)
+ end,
+ SelLoop = fun(Table) ->
+ Sel = mnesia:select(Table, [{'_', [], ['$_']}], 1, read),
+ LoopHelp(Sel, LoopHelp)
+ end,
+
+ R1 = {RecName, 2, 2},
+ R2 = {RecName, 4, 4},
+ R3 = {RecName, 2, 3},
+ R4 = {RecName, 3, 1},
+ R5 = {RecName, 104, 104},
+ W1 = fun(Tab,Search) ->
+ mnesia:write(Tab,R1,write),
+ mnesia:write(Tab,R2,write),
+ Search(Tab)
+ end,
+ S1 = lists:sort([R1, R2| Recs]),
+ ?match(S1, sort_res(Trans(W1, [Tab1, Select]))),
+ ?match(S1, sort_res(Trans(W1, [Tab1, Match]))),
+ ?match(S1, sort_res(Trans(W1, [Tab1, SelLoop]))),
+ ?match(S1, sort_res(Trans(W1, [Tab2, Select]))),
+ ?match(S1, sort_res(Trans(W1, [Tab2, SelLoop]))),
+ ?match(S1, sort_res(Trans(W1, [Tab2, Match]))),
+ ?match(S1, sort_res(Trans(W1, [Tab3, Select]))),
+ ?match(S1, sort_res(Trans(W1, [Tab3, SelLoop]))),
+ ?match(S1, sort_res(Trans(W1, [Tab3, Match]))),
+ [mnesia:dirty_delete_object(Frag, R) || R <- [R1, R2],
+ Tab <- Tabs,
+ Frag <- frag_names(Tab)],
+
+ W2 = fun(Tab, Search) ->
+ mnesia:write(Tab, R3, write),
+ mnesia:write(Tab, R1, write),
+ Search(Tab)
+ end,
+ S2 = lists:sort([R1 | Recs]),
+ S2Bag = lists:sort([R1, R3 | Recs]),
+ io:format("S2 = ~p\n", [S2]),
+ ?match(S2, sort_res(Trans(W2, [Tab1, Select]))),
+ ?match(S2, sort_res(Trans(W2, [Tab1, SelLoop]))),
+ ?match(S2, sort_res(Trans(W2, [Tab1, Match]))),
+ ?match(S2, sort_res(Trans(W2, [Tab2, Select]))),
+ ?match(S2, sort_res(Trans(W2, [Tab2, SelLoop]))),
+ ?match(S2, sort_res(Trans(W2, [Tab2, Match]))),
+ io:format("S2Bag = ~p\n", [S2Bag]),
+ ?match(S2Bag, sort_res(Trans(W2, [Tab3, Select]))),
+ ?match(S2Bag, sort_res(Trans(W2, [Tab3, SelLoop]))),
+ ?match(S2Bag, sort_res(Trans(W2, [Tab3, Match]))),
+
+ W3 = fun(Tab,Search) ->
+ mnesia:write(Tab, R4, write),
+ mnesia:delete(Tab, element(2, R1), write),
+ Search(Tab)
+ end,
+ S3Bag = lists:sort([R4 | lists:delete(R1, Recs)]),
+ S3 = lists:delete({RecName, 3, 3}, S3Bag),
+ ?match(S3, sort_res(Trans(W3, [Tab1, Select]))),
+ ?match(S3, sort_res(Trans(W3, [Tab1, SelLoop]))),
+ ?match(S3, sort_res(Trans(W3, [Tab1, Match]))),
+ ?match(S3, sort_res(Trans(W3, [Tab2, SelLoop]))),
+ ?match(S3, sort_res(Trans(W3, [Tab2, Select]))),
+ ?match(S3, sort_res(Trans(W3, [Tab2, Match]))),
+ ?match(S3Bag, sort_res(Trans(W3, [Tab3, Select]))),
+ ?match(S3Bag, sort_res(Trans(W3, [Tab3, SelLoop]))),
+ ?match(S3Bag, sort_res(Trans(W3, [Tab3, Match]))),
+
+ W4 = fun(Tab,Search) ->
+ mnesia:delete(Tab, -1, write),
+ mnesia:delete(Tab, 4 , write),
+ mnesia:delete(Tab, 17, write),
+ mnesia:delete_object(Tab, {RecName, -1, x}, write),
+ mnesia:delete_object(Tab, {RecName, 4, x}, write),
+ mnesia:delete_object(Tab, {RecName, 42, x}, write),
+ mnesia:delete_object(Tab, R2, write),
+ mnesia:write(Tab, R5, write),
+ Search(Tab)
+ end,
+ S4Bag = lists:sort([R5 | S3Bag]),
+ S4 = lists:sort([R5 | S3]),
+ ?match(S4, sort_res(Trans(W4, [Tab1, Select]))),
+ ?match(S4, sort_res(Trans(W4, [Tab1, SelLoop]))),
+ ?match(S4, sort_res(Trans(W4, [Tab1, Match]))),
+ ?match(S4, sort_res(Trans(W4, [Tab2, Select]))),
+ ?match(S4, sort_res(Trans(W4, [Tab2, SelLoop]))),
+ ?match(S4, sort_res(Trans(W4, [Tab2, Match]))),
+ ?match(S4Bag, sort_res(Trans(W4, [Tab3, Select]))),
+ ?match(S4Bag, sort_res(Trans(W4, [Tab3, SelLoop]))),
+ ?match(S4Bag, sort_res(Trans(W4, [Tab3, Match]))),
+ [mnesia:dirty_delete_object(Tab, R) || R <- [{RecName, 3, 3}, R5], Tab <- Tabs],
+
+ %% hmmm anything more??
+
+ ?verify_mnesia([Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+evil_delete_db_node(doc) ->
+ ["Delete db_node with a repicated table with foreign key"];
+evil_delete_db_node(suite) -> [];
+evil_delete_db_node(Config) when is_list(Config) ->
+ Nodes = lists:sort(?acquire_nodes(2, Config)),
+ Local = node(),
+ Remote = hd(Nodes -- [Local]),
+
+ Type = case mnesia_test_lib:diskless(Config) of
+ true -> n_ram_copies;
+ false -> n_disc_copies
+ end,
+ Tab = frag_master,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{frag_properties, [{Type, 2}, {node_pool, Nodes}]}])),
+ ExtraTab = frag_foreigner,
+ ?match({atomic, ok}, mnesia:create_table(ExtraTab, [{frag_properties, [{foreign_key, {Tab, key}}, {node_pool, Nodes}]}])),
+
+ GetPool = fun(T) ->
+ case lists:keysearch(node_pool, 1, mnesia:table_info (T, frag_properties)) of
+ {value, {node_pool, N}} -> lists:sort(N);
+ false -> []
+ end
+ end,
+ ?match(Nodes, GetPool(Tab)),
+ ?match(Nodes, GetPool(ExtraTab)),
+
+
+ ?match(stopped, rpc:call(Remote, mnesia, stop, [])),
+ ?match({atomic, ok}, mnesia:del_table_copy(schema, Remote)),
+
+ ?match([Local], GetPool(Tab)),
+ ?match([Local], GetPool(ExtraTab)),
+
+ ?verify_mnesia([Local], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Misc convenient helpers
+
+frag_write(Tab, Rec) ->
+ Fun = fun() -> mnesia:write(Tab, Rec, write) end,
+ mnesia:activity(sync_dirty, Fun, mnesia_frag).
+
+frag_dist(Tab) ->
+ Fun = fun() -> mnesia:table_info(Tab, frag_dist) end,
+ mnesia:activity(sync_dirty, Fun, mnesia_frag).
+
+frag_names(Tab) ->
+ Fun = fun() -> mnesia:table_info(Tab, frag_names) end,
+ mnesia:activity(sync_dirty, Fun, mnesia_frag).
+
+frag_rec_dist(Tab) ->
+ Fun = fun() -> mnesia:table_info(Tab, frag_size) end,
+ [Size || {_, Size} <- mnesia:activity(sync_dirty, Fun, mnesia_frag)].
+
+table_size(Tab) ->
+ Node = mnesia:table_info(Tab, where_to_read),
+ rpc:call(Node, mnesia, table_info, [Tab, size]).
+
+sort_res(List) when is_list(List) ->
+ lists:sort(List);
+sort_res(Else) ->
+ Else.
+
+rev_res(List) when is_list(List) ->
+ lists:reverse(List);
+rev_res(Else) ->
+ Else.
diff --git a/lib/mnesia/test/mnesia_inconsistent_database_test.erl b/lib/mnesia/test/mnesia_inconsistent_database_test.erl
new file mode 100644
index 0000000000..b19cd8e01b
--- /dev/null
+++ b/lib/mnesia/test/mnesia_inconsistent_database_test.erl
@@ -0,0 +1,74 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_inconsistent_database_test).
+-author('[email protected]').
+
+-behaviour(gen_event).
+
+%%-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+%% gen_event callback interface
+-export([init/1, handle_event/2, handle_call/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+
+init(_Args) ->
+ ?verbose("~p installed as event_module~n", [?MODULE]),
+ {ok, []}.
+
+handle_event(Msg, State) ->
+ handle_any_event(Msg, State).
+
+handle_info(Msg, State) ->
+ handle_any_event(Msg, State).
+
+
+handle_call(Msg, State) ->
+ handle_any_event(Msg, State).
+
+
+%% The main...
+
+handle_any_event({mnesia_system_event, Event}, State)
+ when element(1, Event) == inconsistent_database ->
+ ?error("Got event: ~p~n", [Event]),
+ {ok, State};
+handle_any_event(Msg, State) ->
+ ?verbose("Got event: ~p~n", [Msg]),
+ {ok, State}.
+
+%%-----------------------------------------------------------------
+%% terminate(Reason, State) ->
+%% AnyVal
+%%-----------------------------------------------------------------
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%----------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Upgrade process when its code is to be changed
+%% Returns: {ok, NewState}
+%%----------------------------------------------------------------------
+code_change(_OldVsn, _State, _Extra) ->
+ exit(not_supported).
+
diff --git a/lib/mnesia/test/mnesia_install_test.erl b/lib/mnesia/test/mnesia_install_test.erl
new file mode 100644
index 0000000000..42a2a19f37
--- /dev/null
+++ b/lib/mnesia/test/mnesia_install_test.erl
@@ -0,0 +1,342 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_install_test).
+-author('[email protected]').
+
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Run some small but demanding test cases in order to verify",
+ "that the basic functionality in Mnesia still works.",
+ "",
+ "Try some very simple things to begin with and increase the",
+ "difficulty stepwise. This test suite should be run before",
+ "all the others if you expect to find bugs.",
+ "",
+ "The function mnesia_install_test:silly() does not use the whole",
+ "infra structure of the test suite. Invoke it on a single node to",
+ "begin with. If that works, proceed with pong = net_adm:ping(SomeOtherNode)",
+ "and rerun silly() in order to perform some distributed tests."];
+all(suite) ->
+ [
+ silly_durability,
+ silly_move,
+ silly_upgrade
+ %,stress
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Stepwise of more and more advanced features
+silly() ->
+ Nodes = [node()] ++ nodes(),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ Config = [{nodes, Nodes}],
+ mnesia_test_lib:eval_test_case(?MODULE, silly2, Config).
+
+silly2(Config) when is_list(Config) ->
+ [Node1 | _] = Nodes = ?acquire_nodes(3, Config),
+ mnesia_test_lib:kill_mnesia(Nodes),
+ ?ignore([mnesia:delete_schema([N]) || N <- Nodes]),
+ ?match(ok, mnesia:create_schema([Node1])),
+ ?match(ok, rpc:call(Node1, mnesia, start, [])),
+ ?match(ok, rpc:call(Node1, mnesia, wait_for_tables,
+ [[schema], infinity])),
+ Res = silly_durability(Config),
+ StressFun = fun(F) -> apply(?MODULE, F, [Config]) end,
+ R =
+ case length(Nodes) of
+ L when L > 1 ->
+ Node2 = lists:nth(2, Nodes),
+ AddDb = [schema, Node2, ram_copies],
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia, add_table_copy, AddDb)),
+ Args = [[{extra_db_nodes, [Node1]}]],
+ ?match(ok, rpc:call(Node2, mnesia, start, Args)),
+ ChangeDb = [schema, Node2, disc_copies],
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia, change_table_copy_type,
+ ChangeDb)),
+ ?match([], mnesia_test_lib:sync_tables([Node1, Node2],
+ [schema])),
+ MoveRes = silly_move(Config),
+ UpgradeRes = silly_upgrade(Config),
+ StressRes = [StressFun(F) || F <- stress(suite)],
+ ?verify_mnesia([Node2], []),
+ [Res, MoveRes, UpgradeRes] ++ StressRes;
+ _ ->
+ StressRes = [StressFun(F) || F <- stress(suite)],
+ ?warning("Too few nodes. Perform net_adm:ping(OtherNode) "
+ "and rerun!!!~n", []),
+ [Res | StressRes]
+ end,
+ ?verify_mnesia([Node1], []),
+ R.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+silly_durability(doc) ->
+ ["Simple test of durability"];
+silly_durability(suite) -> [];
+silly_durability(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = silly,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+
+ ?match({atomic, ok}, rpc:call(Node1, mnesia,
+ create_table, [Tab, [{Storage, [Node1]}]])),
+
+ Read = fun() -> mnesia:read({Tab, a}) end,
+ Write = fun() -> mnesia:write({Tab, a, b}) end,
+
+ ?match({atomic, []},
+ rpc:call(Node1, mnesia, transaction, [Read])),
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia, transaction, [Write])),
+ ?match({atomic, [{Tab, a, b}]},
+ rpc:call(Node1, mnesia, transaction, [Read])),
+
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ ?match(ok, rpc:call(Node1, mnesia, start, [])),
+ case mnesia_test_lib:diskless(Config) of
+ true ->
+ skip;
+ false ->
+ ?match(ok, rpc:call(Node1, mnesia, wait_for_tables, [[Tab], infinity])),
+ ?match({atomic, [{Tab, a, b}]},
+ rpc:call(Node1, mnesia, transaction, [Read]))
+ end,
+ ?verify_mnesia([Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+silly_move(doc) ->
+ ["Simple test of movement of a replica from one node to another"];
+silly_move(suite) -> [];
+silly_move(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+ Tab = silly_move,
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia,
+ create_table, [Tab, [{ram_copies, [Node2]}]])),
+ ?match([], mnesia_test_lib:sync_tables([Node1, Node2], [Tab])),
+
+ Read = fun() -> mnesia:read({Tab, a}) end,
+ Write = fun() -> mnesia:write({Tab, a, b}) end,
+
+ ?match({atomic, []},
+ rpc:call(Node1, mnesia, transaction, [Read])),
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia, transaction, [Write])),
+ ?match({atomic, [{Tab, a, b}]},
+ rpc:call(Node1, mnesia, transaction, [Read])),
+
+ case mnesia_test_lib:diskless(Config) of
+ true -> skip;
+ false ->
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia,
+ change_table_copy_type, [Tab, Node2, disc_only_copies])),
+ ?match([], mnesia_test_lib:sync_tables([Node1, Node2], [Tab]))
+ end,
+ ?match({atomic, [{Tab, a, b}]}, rpc:call(Node1, mnesia, transaction, [Read])),
+
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia,
+ move_table_copy, [Tab, Node2, Node1])),
+ ?match([], mnesia_test_lib:sync_tables([Node1, Node2], [Tab])),
+ ?match({atomic, [{Tab, a, b}]},
+ rpc:call(Node1, mnesia, transaction, [Read])),
+ ?verify_mnesia([Node1], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+silly_upgrade(doc) ->
+ ["Simple test of a schema upgrade and restore from backup"];
+silly_upgrade(suite) -> [];
+silly_upgrade(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Name = silly_upgrade,
+ Tab1 = silly_upgrade1,
+ Tab2 = silly_upgrade2,
+ Bup = "silly_upgrade.BUP",
+ Bup2 = "silly_upgrade_part.BUP",
+ ?match({atomic, ok}, mnesia:create_table(Tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, [{disc_only_copies, Nodes}])),
+
+ CpState = add_some_records(Tab1, Tab2, []),
+ ?match(match, verify_state(Tab1, Tab2, CpState)),
+ file:delete(Bup),
+ ?match(ok, mnesia:backup(Bup)),
+ Args = [{name, Name}, {ram_overrides_dump, true},
+ {min, [Tab1, schema]}, {max, [Tab2]}],
+ ?match({ok, Name, _}, mnesia:activate_checkpoint(Args)),
+
+ IgnoreState = add_more_records(Tab1, Tab2, CpState),
+ ?match(match, verify_state(Tab1, Tab2, IgnoreState)),
+ ?match({mismatch, _, _}, verify_state(Tab1, Tab2, CpState)),
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab2, Node1)),
+ file:delete(Bup2),
+ ?match(ok, mnesia:backup_checkpoint(Name, Bup2)),
+
+ UpgradeState = transform_some_records(Tab1, Tab2, IgnoreState),
+ ?match({mismatch, _, _}, verify_state(Tab1, Tab2, CpState)),
+ ?match({mismatch, _, _}, verify_state(Tab1, Tab2, IgnoreState)),
+ ?match(match, verify_state(Tab1, Tab2, UpgradeState)),
+
+ ?match(ok, mnesia:deactivate_checkpoint(Name)),
+ ?match(match, verify_state(Tab1, Tab2, UpgradeState)),
+
+ ?match(ok, mnesia:install_fallback(Bup2)),
+ file:delete(Bup2),
+ %% Will generate intentional crash, fatal error
+ ?match([], mnesia_test_lib:stop_mnesia([Node2])),
+ wait_till_dead([Node1, Node2]),
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2], [Tab1, Tab2])),
+ ?match(match, verify_state(Tab1, Tab2, CpState)),
+
+ ?match(ok, mnesia:install_fallback(Bup)),
+ file:delete(Bup),
+ %% Will generate intentional crash, fatal error
+ ?match([], mnesia_test_lib:stop_mnesia([Node1, Node2])),
+ wait_till_dead([Node1, Node2]),
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2], [Tab1, Tab2])),
+ CpState2 = [X || X <- CpState, element(1, X) /= Tab1],
+ ?match(match, verify_state(Tab1, Tab2, CpState2)),
+ ?verify_mnesia(Nodes, []).
+
+wait_till_dead([]) -> ok;
+wait_till_dead([N|Ns]) ->
+ Apps = rpc:call(N, application, which_applications, []),
+ case lists:keymember(mnesia, 1, Apps) of
+ true ->
+ timer:sleep(10),
+ wait_till_dead([N|Ns]);
+ false ->
+ wait_till_dead(Ns)
+ end.
+
+add_some_records(Tab1, Tab2, Old) ->
+ Recs1 = [{Tab1, I, I} || I <- lists:seq(1, 30)],
+ Recs2 = [{Tab2, I, I} || I <- lists:seq(20, 40)],
+ lists:foreach(fun(R) -> mnesia:dirty_write(R) end, Recs1),
+ Fun = fun(R) -> mnesia:write(R) end,
+ Trans = fun() -> lists:foreach(Fun, Recs2) end,
+ ?match({atomic, _}, mnesia:transaction(Trans)),
+ lists:sort(Old ++ Recs1 ++ Recs2).
+
+add_more_records(Tab1, Tab2, Old) ->
+ Change1 = [{T, K, V+100} || {T, K, V} <- Old, K==23],
+ Change2 = [{T, K, V+100} || {T, K, V} <- Old, K==24],
+ Del = [{T, K} || {T, K, _V} <- Old, K>=25],
+ New = [{Tab1, 50, 50}, {Tab2, 50, 50}],
+ lists:foreach(fun(R) -> mnesia:dirty_write(R) end, Change1),
+ lists:foreach(fun(R) -> mnesia:dirty_delete(R) end, Del),
+ Fun = fun(R) -> mnesia:write(R) end,
+ Trans = fun() -> lists:foreach(Fun, Change2 ++ New) end,
+ ?match({atomic, ok}, mnesia:transaction(Trans)),
+ Recs = [{T, K, V} || {T, K, V} <- Old, K<23] ++ Change1 ++ Change2 ++ New,
+ lists:sort(Recs).
+
+
+verify_state(Tab1, Tab2, Exp) ->
+ Fun = fun() ->
+ Act1 = [mnesia:read({Tab1, K}) || K <- mnesia:all_keys(Tab1)],
+ Act2 = [mnesia:read({Tab2, K}) || K <- mnesia:all_keys(Tab2)],
+ Act = lists:append(Act1) ++ lists:append(Act2),
+ {ok, Act -- Exp, Exp -- Act}
+ end,
+ case mnesia:transaction(Fun) of
+ {atomic, {ok, [], []}} -> match;
+ {atomic, {ok, More, Less}} -> {mismatch, More, Less};
+ {aborted, Reason} -> {error, Reason}
+ end.
+
+transform_some_records(Tab1, _Tab2, Old) ->
+ Fun = fun(Rec) ->
+ list_to_tuple(tuple_to_list(Rec) ++ [4711])
+ end,
+ ?match({atomic, ok},
+ mnesia:transform_table(Tab1, Fun, [key, val, extra])),
+ Filter = fun(Rec) when element(1, Rec) == Tab1 -> {true, Fun(Rec)};
+ (_) -> true
+ end,
+ lists:sort(lists:zf(Filter, Old)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+stress(doc) ->
+ ["Stress the system a little"];
+stress(suite) ->
+ [
+ conflict,
+ dist
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dist(doc) ->
+ ["Avoid lock conflicts in order to maximize thruput",
+ "Ten drivers per node, tables replicated to all nodes, lots of branches"];
+dist(suite) -> [];
+dist(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, 10 * 60000}]),
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ ?match({ok, _}, mnesia_tpcb:start(dist_args(Nodes, Storage))).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+conflict(doc) ->
+ ["Provoke a lot of lock conflicts.",
+ "Ten drivers per node, tables replicated to all nodes, single branch"];
+conflict(suite) -> [];
+conflict(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, 10 * 60000}]),
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ ?match({ok, _}, mnesia_tpcb:start(conflict_args(Nodes, Storage))).
+
+conflict_args(Nodes, ReplicaType) ->
+ [{db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 10},
+ {n_branches, 1},
+ {n_accounts_per_branch, 10},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(5)},
+ {report_interval, timer:seconds(10)},
+ {use_running_mnesia, true},
+ {reuse_history_id, true}].
+
+dist_args(Nodes, ReplicaType) ->
+ [{db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 10},
+ {n_branches, length(Nodes) * 100},
+ {n_accounts_per_branch, 10},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(5)},
+ {report_interval, timer:seconds(10)},
+ {use_running_mnesia, true},
+ {reuse_history_id, true}].
+
diff --git a/lib/mnesia/test/mnesia_isolation_test.erl b/lib/mnesia/test/mnesia_isolation_test.erl
new file mode 100644
index 0000000000..4fc6e8fe58
--- /dev/null
+++ b/lib/mnesia/test/mnesia_isolation_test.erl
@@ -0,0 +1,2419 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_isolation_test).
+-author('[email protected]').
+
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Verify the isolation property.",
+ "Operations of concurrent transactions must yield results which",
+ "are indistinguishable from the results which would be obtained by",
+ "forcing each transaction to be serially executed to completion in",
+ "some order. This means that repeated reads of the same records",
+ "within any committed transaction must have returned identical",
+ "data when run concurrently with any mix of arbitary transactions.",
+ "Updates in one transaction must not be visible in any other",
+ "transaction before the transaction has been committed."];
+all(suite) ->
+ [
+ locking,
+ visibility
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+locking(doc) ->
+ ["Verify locking semantics for various configurations",
+ " NoLock = lock_funs(no_lock, any_granularity)",
+ " SharedLock = lock_funs(shared_lock, any_granularity)",
+ " ExclusiveLock = lock_funs(exclusive_lock, any_granularity)",
+ " AnyLock = lock_funs(any_lock, any_granularity)"];
+locking(suite) ->
+ [no_conflict,
+ simple_queue_conflict,
+ advanced_queue_conflict,
+ simple_deadlock_conflict,
+ advanced_deadlock_conflict,
+ lock_burst,
+ sticky_locks,
+ unbound_locking,
+ admin_conflict,
+%% removed_resources,
+ nasty
+ ].
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+no_conflict(suite) -> [];
+no_conflict(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = no_conflict,
+ create_conflict_table(Tab, [Node1]),
+ Fun = fun(OtherOid, Lock1, Lock2) ->
+ %% Start two transactions
+ {success, [B, A]} = ?start_activities([Node1, Node1]),
+ ?start_transactions([B, A]),
+
+ A ! fun() -> Lock1(one_oid(Tab)), ok end,
+ ?match_receive({A, ok}),
+ B ! fun() -> Lock2(OtherOid), ok end,
+ ?match_receive({B, ok}),
+ A ! fun() -> mnesia:abort(ok) end,
+ ?match_receive({A, {aborted, ok}}),
+ B ! fun() -> mnesia:abort(ok) end,
+ ?match_receive({B, {aborted, ok}})
+ end,
+ NoLocks = lock_funs(no_lock, any_granularity),
+ SharedLocks = lock_funs(shared_lock, any_granularity),
+ AnyLocks = lock_funs(any_lock, any_granularity),
+ OneOneFun = fun(Lock1, Lock2) -> Fun(one_oid(Tab), Lock1, Lock2) end,
+ fun_loop(OneOneFun, NoLocks, AnyLocks),
+ fun_loop(OneOneFun, AnyLocks, NoLocks),
+ fun_loop(OneOneFun, SharedLocks, SharedLocks),
+
+ %% Lock different objects
+ OneOtherFun = fun(Lock1, Lock2) -> Fun(other_oid(Tab), Lock1, Lock2) end,
+ OneSharedLocks = lock_funs(shared_lock, one),
+ OneExclusiveLocks = lock_funs(exclusive_lock, one),
+ fun_loop(OneOtherFun, OneSharedLocks, OneExclusiveLocks),
+ fun_loop(OneOtherFun, OneExclusiveLocks, OneSharedLocks),
+ fun_loop(OneOtherFun, OneExclusiveLocks, OneExclusiveLocks),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+simple_queue_conflict(suite) -> [];
+simple_queue_conflict(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = simple_queue_conflict,
+ create_conflict_table(Tab, [Node1]),
+ Fun = fun(OneLock, OtherLock) ->
+ %% Start two transactions
+ {success, [B, A]} = ?start_activities([Node1, Node1]),
+ ?start_transactions([B, A]),
+
+ A ! fun() -> OneLock(one_oid(Tab)), ok end,
+ ?match_receive({A, ok}),
+ B ! fun() -> OtherLock(one_oid(Tab)), ok end,
+ wait_for_lock(B, [Node1], 20), % Max 10 sec
+ A ! end_trans,
+ ?match_multi_receive([{A, {atomic, end_trans}}, {B, ok}]),
+ B ! fun() -> mnesia:abort(ok) end,
+ ?match_receive({B, {aborted, ok}})
+ end,
+ OneSharedLocks = lock_funs(shared_lock, one),
+ AllSharedLocks = lock_funs(shared_lock, all),
+ OneExclusiveLocks = lock_funs(exclusive_lock, one),
+ AllExclusiveLocks = lock_funs(exclusive_lock, all),
+ fun_loop(Fun, OneExclusiveLocks, OneExclusiveLocks),
+ fun_loop(Fun, AllExclusiveLocks, AllExclusiveLocks),
+ fun_loop(Fun, OneExclusiveLocks, AllExclusiveLocks),
+ fun_loop(Fun, AllExclusiveLocks, OneExclusiveLocks),
+ fun_loop(Fun, OneSharedLocks, AllExclusiveLocks),
+ fun_loop(Fun, AllSharedLocks, OneExclusiveLocks),
+ ok.
+
+wait_for_lock(Pid, _Nodes, 0) ->
+ Queue = mnesia:system_info(lock_queue),
+ ?error("Timeout while waiting for lock on Pid ~p in queue ~p~n", [Pid, Queue]);
+wait_for_lock(Pid, Nodes, N) ->
+ rpc:multicall(Nodes, sys, get_status, [mnesia_locker]),
+ List = [rpc:call(Node, mnesia, system_info, [lock_queue]) || Node <- Nodes],
+ Q = lists:append(List),
+ check_q(Pid, Q, Nodes, N).
+
+check_q(Pid, [{_Oid, _Op, Pid, _Tid, _WFT} | _Tail], _N, _Count) -> ok;
+check_q(Pid, [_ | Tail], N, Count) -> check_q(Pid, Tail, N, Count);
+check_q(Pid, [], N, Count) ->
+ timer:sleep(500),
+ wait_for_lock(Pid, N, Count - 1).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+advanced_queue_conflict(suite) -> [];
+advanced_queue_conflict(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = advanced_queue_conflict,
+ create_conflict_table(Tab, [Node1]),
+ OneRec = {Tab, 3, 3},
+ OneOid = {Tab, 3},
+ OtherRec = {Tab, 4, 4},
+ OtherOid = {Tab, 4},
+
+ %% Start four transactions
+ {success, [D, C, B, A]} = ?start_activities(lists:duplicate(4, Node1)),
+ ?start_transactions([D, C, B, A]),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+
+ %% Acquire some locks
+ A ! fun() -> mnesia:write(OneRec) end,
+ ?match_receive({A, ok}),
+ A ! fun() -> mnesia:read(OneOid) end,
+ ?match_receive({A, [OneRec]}),
+
+ B ! fun() -> mnesia:write(OtherRec) end,
+ ?match_receive({B, ok}),
+ B ! fun() -> mnesia:read(OneOid) end,
+ ?match_receive(timeout),
+
+ C ! fun() -> mnesia:read(OtherOid) end,
+ ?match_receive(timeout),
+ D ! fun() -> mnesia:wread(OtherOid) end,
+ ?match_receive(timeout),
+
+ %% and release them in a certain order
+ A ! end_trans,
+ ?match_multi_receive([{A, {atomic, end_trans}}, {B, [OneRec]}]),
+ B ! end_trans,
+ ?match_multi_receive([{B, {atomic, end_trans}}, {C, [OtherRec]}]),
+ C ! end_trans,
+ ?match_multi_receive([{C, {atomic, end_trans}}, {D, [OtherRec]}]),
+ D ! end_trans,
+ ?match_receive({D, {atomic, end_trans}}),
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+simple_deadlock_conflict(suite) -> [];
+simple_deadlock_conflict(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = simple_deadlock_conflict,
+ create_conflict_table(Tab, [Node1]),
+ Rec = {Tab, 4, 4},
+ Oid = {Tab, 4},
+
+ %% Start two transactions
+ {success, [B, A]} = ?start_activities(lists:duplicate(2, Node1)),
+ mnesia_test_lib:start_transactions([B, A], 0), % A is newest
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+
+ B ! fun() -> mnesia:write(Rec) end,
+ ?match_receive({B, ok}),
+ A ! fun() -> mnesia:read(Oid) end,
+ ?match_receive({A, {aborted, nomore}}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+advanced_deadlock_conflict(suite) -> [];
+advanced_deadlock_conflict(Config) when is_list(Config) ->
+ [Node1, Node2] = ?acquire_nodes(2, Config),
+ Tab = advanced_deadlock_conflict,
+ create_conflict_table(Tab, [Node2]),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ Rec = {Tab, 4, 4},
+ Oid = {Tab, 4},
+
+ %% Start two transactions
+ {success, [B, A]} = ?start_activities([Node1, Node2]),
+ mnesia_test_lib:start_sync_transactions([B, A], 0), % A is newest
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+
+ B ! fun() -> mnesia:write(Rec) end,
+ ?match_receive({B, ok}),
+ A ! fun() -> mnesia:read(Oid) end,
+ ?match_receive({A, {aborted, nomore}}),
+ B ! end_trans,
+ ?match_receive({B, {atomic, end_trans}}),
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+one_oid(Tab) -> {Tab, 1}.
+other_oid(Tab) -> {Tab, 2}.
+
+create_conflict_table(Tab, Nodes) ->
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, Nodes},
+ {attributes, [key, val]},
+ {index, [val]}
+ ])),
+ ?match([], mnesia_test_lib:sync_tables(Nodes, [Tab])),
+ init_conflict_table(Tab).
+
+init_conflict_table(Tab) ->
+ Recs = mnesia:dirty_match_object({Tab, '_', '_'}),
+ lists:foreach(fun(R) -> mnesia:dirty_delete_object(R) end, Recs),
+ Keys = [one_oid(Tab), other_oid(Tab)],
+ [mnesia:dirty_write({T, K, K}) || {T, K} <- Keys].
+
+%% Apply Fun for each X and Y
+fun_loop(Fun, Xs, Ys) ->
+ lists:foreach(fun(X) -> lists:foreach(fun(Y) -> do_fun(Fun, X, Y) end, Ys) end, Xs).
+
+do_fun(Fun, X, Y) ->
+ Pid = spawn_link(?MODULE, do_fun, [self(), Fun, X, Y]),
+ receive
+ {done_fun, Pid} -> done_fun
+ end.
+
+do_fun(Monitor, Fun, X, Y) ->
+ ?log("{do_fun ~p~n", [[Fun, X, Y]]),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ Fun(X, Y),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ unlink(Monitor),
+ Monitor ! {done_fun, self()},
+ exit(done_fun).
+
+%% Returns a list of fun's
+lock_funs(no_lock, one) ->
+ [
+ fun(Oid) -> mnesia:dirty_read(Oid) end,
+ fun({Tab, Key}) -> mnesia:dirty_write({Tab, Key, Key}) end,
+ fun({Tab, Key}) -> mnesia:dirty_write({Tab, Key, Key}),
+ mnesia:dirty_update_counter({Tab, Key}, 0) end,
+ fun(Oid) -> mnesia:dirty_delete(Oid) end,
+ fun({Tab, Key}) -> mnesia:dirty_delete_object({Tab, Key, Key}) end,
+ fun({Tab, Key}) -> mnesia:dirty_match_object({Tab, Key, Key}) end,
+ fun({Tab, Key}) -> mnesia:dirty_index_match_object({Tab, Key, Key}, val) end,
+ fun({Tab, Key}) -> mnesia:dirty_index_read(Tab, Key, val) end,
+ fun({Tab, Key}) -> mnesia:dirty_index_match_object({Tab, '_', Key}, val) end
+ ];
+lock_funs(no_lock, all) ->
+ [
+ fun({Tab, _}) -> mnesia:dirty_match_object({Tab, '_', '_'}) end,
+ fun({Tab, _}) -> slot_iter(Tab) end,
+ fun({Tab, _}) -> key_iter(Tab) end
+ ];
+lock_funs(shared_lock, one) ->
+
+ [
+ fun(Oid) -> mnesia:read(Oid) end,
+ fun({Tab, Key}) ->
+ init_conflict_table(Tab),
+ mnesia:dirty_delete(other_oid(Tab)),
+ mnesia:match_object({Tab, Key, Key}) end
+ ];
+lock_funs(shared_lock, all) ->
+ [
+ fun({Tab, _}) -> mnesia:read_lock_table(Tab) end,
+ fun({Tab, Key}) -> mnesia:match_object({Tab, '_', Key}) end,
+ fun({Tab, _}) -> mnesia:match_object({Tab, '_', '_'}) end,
+ fun({Tab, _}) -> mnesia:all_keys(Tab) end,
+ fun({Tab, Key}) -> mnesia:index_match_object({Tab, '_', Key}, val) end,
+ fun({Tab, Key}) -> mnesia:index_read(Tab, Key, val) end
+ ];
+lock_funs(exclusive_lock, one) ->
+ [
+ fun(Oid) -> mnesia:wread(Oid) end,
+ fun({Tab, Key}) -> mnesia:write({Tab, Key, Key}) end,
+ fun(Oid) -> mnesia:delete(Oid) end,
+ fun({Tab, Key}) -> mnesia:delete_object({Tab, Key, Key}) end,
+ fun({Tab, Key}) -> mnesia:s_write({Tab, Key, Key}) end,
+ fun(Oid) -> mnesia:s_delete(Oid) end,
+ fun({Tab, Key}) -> mnesia:s_delete_object({Tab, Key, Key}) end
+ ];
+lock_funs(exclusive_lock, all) ->
+ [
+ fun({Tab, _}) -> mnesia:write_lock_table(Tab) end
+ ];
+lock_funs(Compatibility, any_granularity) ->
+ lists:append([lock_funs(Compatibility, Granularity) ||
+ Granularity <- [one, all]]);
+lock_funs(any_lock, Granularity) ->
+ lists:append([lock_funs(Compatibility, Granularity) ||
+ Compatibility <- [no_lock, shared_lock, exclusive_lock]]).
+
+slot_iter(Tab) ->
+ slot_iter(Tab, mnesia:dirty_slot(Tab, 0), 1).
+slot_iter(_Tab, '$end_of_table', _) ->
+ [];
+slot_iter(Tab, Recs, Slot) ->
+ Recs ++ slot_iter(Tab, mnesia:dirty_slot(Tab, Slot), Slot+1).
+
+key_iter(Tab) ->
+ key_iter(Tab, mnesia:dirty_first(Tab)).
+key_iter(_Tab, '$end_of_table') ->
+ [];
+key_iter(Tab, Key) ->
+ [Key | key_iter(Tab, mnesia:dirty_next(Tab, Key))].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+lock_burst(suite) -> [];
+lock_burst(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = burst,
+ ?match({atomic, ok}, mnesia:create_table(Tab,
+ [{attributes, [a, b]},
+ {ram_copies, [Node1]}])),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ?match(ok, burst_em(Tab, 1000)),
+ ?match([{burst,1,1000}], mnesia:dirty_read(Tab,1)),
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+burst_em(Tab, N) ->
+ spawn_link(?MODULE, burst_counter, [self(), Tab, N]),
+ receive
+ burst_counter_done -> ok
+ end.
+
+burst_counter(Monitor, Tab, N) when N > 0 ->
+ ?match(ok, burst_gen(Tab, N, self())),
+ Monitor ! burst_receiver(N).
+
+burst_receiver(0) ->
+ burst_counter_done;
+burst_receiver(N) ->
+ receive
+ burst_incr_done ->
+ burst_receiver(N-1)
+ end.
+
+burst_gen(_, 0, _) ->
+ ok;
+burst_gen(Tab, N, Father) when is_integer(N), N > 0 ->
+ spawn_link(?MODULE, burst_incr, [Tab, Father]),
+ burst_gen(Tab, N-1, Father).
+
+burst_incr(Tab, Father) ->
+ Fun = fun() ->
+ Val =
+ case mnesia:read({Tab, 1}) of
+ [{Tab, 1, V}] -> V;
+ [] -> 0
+ end,
+ mnesia:write({Tab, 1, Val+1})
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun)),
+ Father ! burst_incr_done.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+sticky_locks(doc) ->
+ ["Simple Tests of sticky locks"];
+
+sticky_locks(suite) ->
+ [
+ basic_sticky_functionality
+ %% Needs to be expandand a little bit further
+ ].
+
+basic_sticky_functionality(suite) -> [];
+basic_sticky_functionality(Config) when is_list(Config) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = basic_table,
+ Storage = mnesia_test_lib:storage_type(disc_copies, Config),
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Storage, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(sync, [{ram_copies, Nodes}])),
+ Trans1 = fun() ->
+ ?match(ok, mnesia:s_write({Tab, 1, 2})),
+ ?match([{Tab, 1, 2}], mnesia:read({Tab, 1})),
+ ?match(timeout, receive M -> M after 500 -> timeout end),
+ ?match(ok, mnesia:s_write({Tab, 2, 2})),
+ ?match(ok, mnesia:write({Tab, 42, 4711}))
+ end,
+ Trans2 = fun() ->
+ ?match([{Tab, 1, 2}], mnesia:read({Tab, 1})),
+ ?match(timeout, receive M -> M after 500 -> timeout end),
+ ?match(ok, mnesia:write({Tab, 1, 4711})),
+ ?match(ok, mnesia:s_write({Tab, 2, 4})),
+ ?match(ok, mnesia:delete({Tab, 42}))
+ end,
+ rpc:call(N1, mnesia, transaction, [Trans1]),
+ ?match([{Tab,N1}], rpc:call(N1, ?MODULE, get_sticky, [])),
+ ?match([{Tab,N1}], rpc:call(N2, ?MODULE, get_sticky, [])),
+
+ rpc:call(N2, mnesia, transaction, [Trans2]),
+ ?match([], rpc:call(N1, ?MODULE, get_sticky, [])),
+ ?match([], rpc:call(N2, ?MODULE, get_sticky, [])),
+
+ Slock = fun() -> mnesia:read({sync,sync}),get_sticky() end,
+ ?match({atomic, [{Tab,1, 4711}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match({atomic, [{Tab,2, 4}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 2}) end)),
+ ?match({atomic, [{Tab,N1}]}, rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 3}),Slock() end])),
+ ?match([{Tab,N1}], rpc:call(N2, ?MODULE, get_sticky, [])),
+
+ ?match({atomic,[]}, rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 4}),Slock() end])),
+
+ ?match([], rpc:call(N1, ?MODULE, get_sticky, [])),
+ ?match([], rpc:call(N2, ?MODULE, get_sticky, [])),
+
+ ?match({atomic,[{Tab,N2}]}, rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 4}),Slock() end])),
+
+ ?match({atomic,[]}, rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 5}),Slock() end])),
+ ?match({atomic,[{Tab,N1}]}, rpc:call(N1, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 5}),Slock() end])),
+ ?match({atomic,[]}, rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 6}),Slock() end])),
+ ?match({atomic,[{Tab,N2}]}, rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 7}),Slock() end])),
+
+ ?match([{Tab,N2}], get_sticky()),
+ ?match({atomic, [{Tab,1, 7}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match([{Tab,N2}], get_sticky()),
+ ?match({atomic, [{Tab,2, 4}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 2}) end)),
+ ?match([{Tab,N2}], get_sticky()),
+ ?match({atomic,[{Tab,N2}]}, rpc:call(N2, mnesia, transaction,
+ [fun() -> mnesia:s_write({Tab, 1, 6}),Slock() end])),
+ ?match([{Tab,N2}], get_sticky()),
+ ?match({atomic, [{Tab,1, 6}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match([{Tab,N2}], get_sticky()),
+ ?match({atomic, [{Tab,2, 4}]}, mnesia:transaction(fun() -> mnesia:read({Tab, 2}) end)),
+ ?match([{Tab,N2}], get_sticky()),
+ ?verify_mnesia(Nodes, []).
+
+get_sticky() ->
+ mnesia_locker ! {get_table, self(), mnesia_sticky_locks},
+ receive {mnesia_sticky_locks, Locks} -> Locks end.
+
+get_held() ->
+ mnesia_locker ! {get_table, self(), mnesia_sticky_locks},
+ receive {mnesia_sticky_locks, Locks} -> Locks end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+unbound_locking(suite) ->
+ [unbound1, unbound2];
+
+unbound_locking(doc) ->
+ ["Check that mnesia handles unbound key variables, GPRS bug."
+ "Ticket id: OTP-3342"].
+
+unbound1(suite) -> [];
+unbound1(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+
+ ?match({atomic, ok}, mnesia:create_table(ul, [])),
+
+ Tester = self(),
+ Write = fun() ->
+ mnesia:write({ul, {key, {17,42}}, val}),
+ ?log("~p Got write lock waiting...~n", [self()]),
+ Tester ! continue,
+ receive
+ continue ->
+ ok
+ end,
+ ?log("..continuing~n", []),
+ ok
+ end,
+
+ {success, [A]} = ?start_activities([Node1]),
+ ?start_transactions([A]),
+ A ! Write,
+
+ receive continue -> ok end,
+
+ Match = fun() ->
+ case catch mnesia:match_object({ul, {key, {'_', '$0'}}, '_'}) of
+ {'EXIT', What} -> %% Cyclic first time
+ ?log("Cyclic Restarting~n", []),
+ A ! continue,
+ A ! end_trans,
+ exit(What);
+ Res ->
+ ?log("Got match log ~p...~n", [Res]),
+ Res
+ end
+ end,
+ ?match({atomic, [{ul,{key,{17,42}},val}]}, mnesia:transaction(Match)),
+
+ ?match_receive({A, ok}),
+ ?match_receive({A, {atomic, end_trans}}),
+ ok.
+
+unbound2(suite) -> [];
+unbound2(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+
+ ?match({atomic, ok}, mnesia:create_table(ul, [])),
+
+ {success, [B, A]} = ?start_activities([Node1, Node1]),
+
+ Me = self(),
+
+ Write = fun() ->
+ mnesia:write({ul, {key, {17,42}}, val}),
+ ?log("~p Got write lock waiting... Tid ~p ~n",
+ [self(), get(mnesia_activity_state)]),
+ Me ! ok_lock,
+ receive
+ continue ->
+ ok
+ end,
+ ?log("..continuing~n", []),
+ ok
+ end,
+
+ Match = fun() ->
+ receive
+ continueB ->
+ ?log("~p, moving on TID ~p~n",
+ [self(), get(mnesia_activity_state)]),
+ Me ! {self(), continuing}
+ end,
+ case catch mnesia:match_object({ul, {key, {'_', '$0'}},
+ '_'}) of
+ {'EXIT', What} -> %% Cyclic first time
+ ?log("Cyclic Restarting ~p ~n", [What]),
+ {should_not_happen,What};
+ Res ->
+ ?log("Got match log ~p...~n", [Res]),
+ Res
+ end
+ end,
+
+ B ! fun() -> mnesia:transaction(Match) end,
+ timer:sleep(100), %% Let B be started first..
+ A ! fun() -> mnesia:transaction(Write) end,
+
+ receive ok_lock -> ok end,
+
+ B ! continueB,
+ ?match_receive({B, continuing}),
+
+ %% B should now be in lock queue.
+ A ! continue,
+ ?match_receive({A, {atomic, ok}}),
+ ?match_receive({B, {atomic, [{ul,{key,{17,42}},val}]}}),
+ ok.
+
+receiver() ->
+ receive
+ {_Pid, begin_trans} ->
+ receiver();
+ Else ->
+ Else
+ after
+ 10000 ->
+ timeout
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+admin_conflict(doc) ->
+ ["Provoke lock conflicts with schema transactions and checkpoints."];
+admin_conflict(suite) ->
+ [
+ create_table,
+ delete_table,
+ move_table_copy,
+ add_table_index,
+ del_table_index,
+ transform_table,
+ snmp_open_table,
+ snmp_close_table,
+ change_table_copy_type,
+ change_table_access,
+ add_table_copy,
+ del_table_copy,
+ dump_tables,
+ extra_admin_tests
+ ].
+
+create_table(suite) -> [];
+create_table(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = c_t_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ DiskMaybe = mnesia_test_lib:storage_type(disc_copies, Config),
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, create_table,
+ [test_tab1, [{DiskMaybe, [ThisNode]}]]]),
+ ?match_multi_receive([{Pid, {atomic, ok}},
+ {'EXIT', Pid, normal}]), %% No Locks! op should be exec.
+
+ Pid2 = spawn_link(?MODULE, op, [self(), mnesia, create_table,
+ [test_tab2, [{ram_copies, [Node2]}]]]),
+
+ ?match_multi_receive([{Pid2, {atomic, ok}},
+ {'EXIT', Pid2, normal}]), %% No Locks! op should be exec.
+
+ A ! end_trans,
+ ?match_receive({A,{atomic,end_trans}}),
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+delete_table(suite) -> [];
+delete_table(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = d_t_tab,
+ Def = [{ram_copies, [ThisNode, Node2]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:read({Tab, 1}) end,
+ ?match_receive({A, [{Tab, 1, 1, 0}]}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, delete_table,
+ [Tab]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+move_table_copy(suite) -> [];
+move_table_copy(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = m_t_c_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 2, 3}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, move_table_copy,
+ [Tab, ThisNode, Node2]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ timer:sleep(500), %% Don't know how to sync this !!!
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ sys:get_status(whereis(mnesia_tm)), % Explicit sync, release locks is async
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+add_table_index(suite) -> [];
+add_table_index(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config ++ [{tc_timeout, 60000}]),
+ Tab = a_t_i_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia,
+ add_table_index, [Tab, attr1]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+del_table_index(suite) -> [];
+del_table_index(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config),
+ Tab = d_t_i_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, attr1)),
+
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 51, 51, attr2}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, del_table_index,
+ [Tab, attr1]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+ %% Locks released! op should be exec.
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+transform_table(suite) -> [];
+transform_table(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = t_t_tab,
+ Def = [{ram_copies, [ThisNode, Node2]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:read({Tab, 1}) end,
+ ?match_receive({A, [{Tab, 1, 1, 0}]}), %% A is executed
+
+ Transform = fun({Table, Key, Attr1, Attr2}) -> % Need todo a transform
+ {Table, Key, {Attr1, Attr2}} end,
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, transform_table,
+ [Tab, Transform, [key, attr1]]]),
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+snmp_open_table(suite) -> [];
+snmp_open_table(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config),
+ Tab = s_o_t_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, 100}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, snmp_open_table,
+ [Tab, [{key, integer}]]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ %% Locks released! op should be exec. Can take a while (thats the timeout)
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+snmp_close_table(suite) -> [];
+snmp_close_table(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config),
+ Tab = s_c_t_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab, [{key, integer}])),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, 100}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, snmp_close_table, [Tab]]),
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ %% Locks released! op should be exec. Can take a while (thats the timeout)
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+change_table_copy_type(suite) -> [];
+change_table_copy_type(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config),
+ Tab = c_t_c_t_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, change_table_copy_type,
+ [Tab, ThisNode, disc_copies]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+change_table_access(suite) -> [];
+change_table_access(Config) when is_list(Config) ->
+ [ThisNode, _Node2] = ?acquire_nodes(2, Config),
+ Tab = c_t_a_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, change_table_access_mode,
+ [Tab, read_only]]),
+
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+add_table_copy(suite) -> [];
+add_table_copy(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = a_t_c_tab,
+ Def = [{ram_copies, [ThisNode]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, add_table_copy,
+ [Tab, Node2, ram_copies]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+del_table_copy(suite) -> [];
+del_table_copy(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = d_t_c_tab,
+ Def = [{ram_copies, [ThisNode, Node2]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+ A ! fun() -> mnesia:write({Tab, 1, 2, 5}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, del_table_copy,
+ [Tab, ThisNode]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A, {atomic,end_trans}}),
+
+ ?match_receive({Pid, {atomic, ok}}),
+ ?match_receive({'EXIT', Pid, normal}),
+
+ timer:sleep(500), %% Don't know how to sync this !!!
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ sys:get_status(whereis(mnesia_tm)), % Explicit sync, release locks is async
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+dump_tables(suite) -> [];
+dump_tables(Config) when is_list(Config) ->
+ [ThisNode, Node2] = ?acquire_nodes(2, Config),
+ Tab = dump_t_tab,
+ Def = [{ram_copies, [ThisNode, Node2]}, {attributes, [key, attr1, attr2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 50),
+ {success, [A]} = ?start_activities([ThisNode]),
+ mnesia_test_lib:start_sync_transactions([A], 0),
+ A ! fun() -> mnesia:write({Tab, 1, 1, updated}) end,
+ ?match_receive({A, ok}), %% A is executed
+
+ Pid = spawn_link(?MODULE, op, [self(), mnesia, dump_tables,
+ [[Tab]]]),
+
+ ?match_receive(timeout), %% op waits for locks occupied by A
+
+ A ! end_trans, %% Kill A, locks should be released
+ ?match_receive({A,{atomic,end_trans}}),
+
+ receive
+ Msg -> ?match({Pid, {atomic, ok}}, Msg)
+ after
+ timer:seconds(20) -> ?error("Operation timed out", [])
+ end,
+
+ sys:get_status(whereis(mnesia_locker)), % Explicit sync, release locks is async
+ ?match([], mnesia:system_info(held_locks)),
+ ?match([], mnesia:system_info(lock_queue)),
+ ok.
+
+op(Father, Mod, Fun, Args) ->
+ Res = apply(Mod, Fun, Args),
+ Father ! {self(), Res}.
+
+insert(_Tab, 0) -> ok;
+insert(Tab, N) when N > 0 ->
+ ok = mnesia:sync_dirty(fun() -> mnesia:write({Tab, N, N, 0}) end),
+ insert(Tab, N-1).
+
+extra_admin_tests(suite) ->
+ [del_table_copy_1,
+ del_table_copy_2,
+ del_table_copy_3,
+ add_table_copy_1,
+ add_table_copy_2,
+ add_table_copy_3,
+ add_table_copy_4,
+ move_table_copy_1,
+ move_table_copy_2,
+ move_table_copy_3,
+ move_table_copy_4].
+
+update_own(Tab, Key, Acc) ->
+ Update =
+ fun() ->
+ Res = mnesia:read({Tab, Key}),
+ case Res of
+ [{Tab, Key, Extra, Acc}] ->
+ mnesia:write({Tab,Key,Extra, Acc+1});
+ Val ->
+ {read, Val, {acc, Acc}}
+ end
+ end,
+ receive
+ {Pid, quit} -> Pid ! {self(), Acc}
+ after
+ 0 ->
+ case mnesia:transaction(Update) of
+ {atomic, ok} ->
+ update_own(Tab, Key, Acc+1);
+ Else ->
+ ?error("Trans failed on ~p with ~p~n"
+ "Info w2read ~p w2write ~p w2commit ~p storage ~p ~n",
+ [node(),
+ Else,
+ mnesia:table_info(Tab, where_to_read),
+ mnesia:table_info(Tab, where_to_write),
+ mnesia:table_info(Tab, where_to_commit),
+ mnesia:table_info(Tab, storage_type)])
+ end
+ end.
+
+update_shared(Tab, Me, Acc) ->
+ Update =
+ fun() ->
+ W2R = mnesia:table_info(Tab, where_to_read),
+ Res = mnesia:read({Tab, 0}),
+ case Res of
+ [{Tab, Key, Extra, Val}] when element(Me, Extra) == Acc ->
+ Extra1 = setelement(Me, Extra, Acc+1),
+ Term = {Tab, Key, Extra1, Val+1},
+ ok = mnesia:write(Term),
+% ?log("At ~p: ~p w2r ~p w2w ~p ~n",
+% [node(), Term,
+% mnesia:table_info(Tab, where_to_read),
+ W2W = mnesia:table_info(Tab, where_to_write),
+ W2C = mnesia:table_info(Tab, where_to_commit),
+%% mnesia:table_info(Tab, storage_type)
+% ]),
+ {_Mod, Tid, Ts} = get(mnesia_activity_state),
+ io:format("~p ~p~n", [Tid, ets:tab2list(element(2,Ts))]),
+ {ok,Term,{W2R,W2W,W2C}};
+ Val ->
+ Info = [{acc, Acc}, {me, Me},
+ {tid, element(2, mnesia:get_activity_id())},
+ {locks, mnesia:system_info(held_locks)}],
+ {read, Val, Info}
+ end
+ end,
+ receive
+ {Pid, quit} -> Pid ! {self(), Acc}
+ after
+ 0 ->
+ case mnesia:transaction(Update) of
+ {atomic, {ok,Term,W2}} ->
+ io:format("~p:~p:(~p,~p) ~w@~w~n", [erlang:now(),node(),Me,Acc,Term,W2]),
+ update_shared(Tab, Me, Acc+1);
+ Else ->
+ ?error("Trans failed on ~p with ~p~n"
+ "Info w2read ~p w2write ~p w2commit ~p storage ~p ~n",
+ [node(),
+ Else,
+ mnesia:table_info(Tab, where_to_read),
+ mnesia:table_info(Tab, where_to_write),
+ mnesia:table_info(Tab, where_to_commit),
+ mnesia:table_info(Tab, storage_type)
+ ])
+ end
+ end.
+
+init_admin(Def, N1, N2, N3) ->
+ Tab = schema_ops,
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert(Tab, 1002),
+
+ Pid1 = spawn_link(N1, ?MODULE, update_own, [Tab, 1, 0]),
+ Pid2 = spawn_link(N2, ?MODULE, update_own, [Tab, 2, 0]),
+ Pid3 = spawn_link(N3, ?MODULE, update_own, [Tab, 3, 0]),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 0, {0,0,0}, 0}) end)),
+
+ Pid4 = spawn_link(N1, ?MODULE, update_shared, [Tab, 1, 0]),
+ Pid5 = spawn_link(N2, ?MODULE, update_shared, [Tab, 2, 0]),
+ Pid6 = spawn_link(N3, ?MODULE, update_shared, [Tab, 3, 0]),
+
+ {Pid1, Pid2, Pid3, Pid4, Pid5, Pid6}.
+
+verify_results({P1, P2, P3, P4, P5, P6}) ->
+ Tab = schema_ops, N1 = node(P1), N2 = node(P2), N3 = node(P3),
+
+ try
+ P1 ! {self(), quit},
+ R1 = receive {P1, Res1} -> Res1 after 9000 -> throw({timeout,P1}) end,
+ P2 ! {self(), quit},
+ R2 = receive {P2, Res2} -> Res2 after 9000 -> throw({timeout,P2}) end,
+ P3 ! {self(), quit},
+ R3 = receive {P3, Res3} -> Res3 after 9000 -> throw({timeout,P3}) end,
+
+ P4 ! {self(), quit},
+ R4 = receive {P4, Res4} -> Res4 after 9000 -> throw({timeout,P4}) end,
+ P5 ! {self(), quit},
+ R5 = receive {P5, Res5} -> Res5 after 9000 -> throw({timeout,P5}) end,
+ P6 ! {self(), quit},
+ R6 = receive {P6, Res6} -> Res6 after 9000 -> throw({timeout,P6}) end,
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write_lock_table(Tab) end)),
+ ?log("Should be ~p~n", [R1]),
+ ?match([{_, _, _, R1}], rpc:call(N1, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{_, _, _, R1}], rpc:call(N2, mnesia, dirty_read, [{Tab, 1}])),
+ ?match([{_, _, _, R1}], rpc:call(N3, mnesia, dirty_read, [{Tab, 1}])),
+ ?log("Should be ~p~n", [R2]),
+ ?match([{_, _, _, R2}], rpc:call(N1, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{_, _, _, R2}], rpc:call(N2, mnesia, dirty_read, [{Tab, 2}])),
+ ?match([{_, _, _, R2}], rpc:call(N3, mnesia, dirty_read, [{Tab, 2}])),
+ ?log("Should be ~p~n", [R3]),
+ ?match([{_, _, _, R3}], rpc:call(N1, mnesia, dirty_read, [{Tab, 3}])),
+ ?match([{_, _, _, R3}], rpc:call(N2, mnesia, dirty_read, [{Tab, 3}])),
+ ?match([{_, _, _, R3}], rpc:call(N3, mnesia, dirty_read, [{Tab, 3}])),
+
+ Res = R4+R5+R6,
+ ?log("Should be {~p+~p+~p}= ~p~n", [R4, R5, R6, Res]),
+ ?match([{_, _, {R4,R5,R6}, Res}], rpc:call(N1, mnesia, dirty_read, [{Tab, 0}])),
+ ?match([{_, _, {R4,R5,R6}, Res}], rpc:call(N2, mnesia, dirty_read, [{Tab, 0}])),
+ ?match([{_, _, {R4,R5,R6}, Res}], rpc:call(N3, mnesia, dirty_read, [{Tab, 0}]))
+ catch throw:{timeout, Pid} ->
+ mnesia_lib:dist_coredump(),
+ ?error("Timeout ~p ~n", [Pid])
+ end.
+
+
+get_info(Tab) ->
+ Info = mnesia:table_info(Tab, all),
+ mnesia_lib:verbose("~p~n", [Info]).
+
+del_table_copy_1(suite) -> [];
+del_table_copy_1(Config) when is_list(Config) ->
+ [_Node1, Node2, _Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node2, Node2, Nodes). %Called on same Node as deleted
+del_table_copy_2(suite) -> [];
+del_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, _Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node1, Node2, Nodes). %Called from other Node
+del_table_copy_3(suite) -> [];
+del_table_copy_3(Config) when is_list(Config) ->
+ [_Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ del_table(Node3, Node2, Nodes). %Called from Node w.o. table
+
+%%% The actual test
+del_table(CallFrom, DelNode, [Node1, Node2, Node3]) ->
+ Def = [{ram_copies, [Node1]}, {disc_copies, [Node2]},
+ {attributes, [key, attr1, attr2]}],
+ Tab = schema_ops,
+ Pids = init_admin(Def, Node1, Node2, Node3),
+
+ ?log("Call from ~p delete table from ~p ~n", [CallFrom, DelNode]),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+
+ ?match({atomic, ok},
+ rpc:call(CallFrom, mnesia, del_table_copy, [Tab, DelNode])),
+
+ verify_results(Pids),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+
+add_table_copy_1(suite) -> [];
+add_table_copy_1(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_only_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node1, Node3, Nodes, Def).
+add_table_copy_2(suite) -> [];
+add_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_only_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node2, Node3, Nodes, Def).
+add_table_copy_3(suite) -> [];
+add_table_copy_3(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_only_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node3, Node3, Nodes, Def).
+add_table_copy_4(suite) -> [];
+add_table_copy_4(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_only_copies, [Node1]},
+ {attributes, [key, attr1, attr2]}],
+ add_table(Node2, Node3, Nodes, Def).
+%%% The actual test
+add_table(CallFrom, AddNode, [Node1, Node2, Node3], Def) ->
+ Pids = init_admin(Def, Node1, Node2, Node3),
+ Tab = schema_ops,
+ ?log("Call from ~p add table to ~p ~n", [CallFrom, AddNode]),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+ ?match({atomic, ok}, rpc:call(CallFrom, mnesia, add_table_copy,
+ [Tab, AddNode, ram_copies])),
+ verify_results(Pids),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+move_table_copy_1(suite) -> [];
+move_table_copy_1(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node1, Node1, Node3, Nodes, Def).
+move_table_copy_2(suite) -> [];
+move_table_copy_2(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node2, Node1, Node3, Nodes, Def).
+move_table_copy_3(suite) -> [];
+move_table_copy_3(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_copies, [Node1, Node2]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node3, Node1, Node3, Nodes, Def).
+move_table_copy_4(suite) -> [];
+move_table_copy_4(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Def = [{disc_copies, [Node1]},
+ {attributes, [key, attr1, attr2]}],
+ move_table(Node2, Node1, Node3, Nodes, Def).
+%%% The actual test
+move_table(CallFrom, FromNode, ToNode, [Node1, Node2, Node3], Def) ->
+ Pids = init_admin(Def, Node1, Node2, Node3),
+ Tab = schema_ops,
+ ?log("Call from ~p move table from ~p to ~p ~n", [CallFrom, FromNode, ToNode]),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+ ?match({atomic, ok}, rpc:call(CallFrom, mnesia, move_table_copy,
+ [Tab, FromNode, ToNode])),
+ verify_results(Pids),
+ rpc:multicall([Node1, Node2, Node3], ?MODULE, get_info, [Tab]),
+ ?verify_mnesia([Node1, Node2, Node3], []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+visibility(doc) ->
+ ["Verify the visibility semantics for various configurations"];
+visibility(suite) ->
+ [
+ dirty_updates_visible_direct,
+ dirty_reads_regardless_of_trans,
+ trans_update_invisibible_outside_trans,
+ trans_update_visible_inside_trans,
+ write_shadows,
+ delete_shadows,
+%% delete_shadows2,
+ write_delete_shadows_bag,
+ write_delete_shadows_bag2,
+ iteration,
+ shadow_search,
+ snmp_shadows
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+dirty_updates_visible_direct(doc) ->
+ ["One process can immediately see dirty updates of another"];
+dirty_updates_visible_direct(suite) -> [];
+dirty_updates_visible_direct(Config) when is_list(Config) ->
+ dirty_visibility(outside_trans, Config).
+
+dirty_reads_regardless_of_trans(doc) ->
+ ["Dirty reads are not affected by transaction context"];
+dirty_reads_regardless_of_trans(suite) -> [];
+dirty_reads_regardless_of_trans(Config) when is_list(Config) ->
+ dirty_visibility(inside_trans, Config).
+
+dirty_visibility(Mode, Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = list_to_atom(lists:concat([dirty_visibility, '_', Mode])),
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab}, {ram_copies, [Node1]}])),
+ ValPos = 3,
+
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ %% Start two processes
+ {success, [A]} = ?start_activities([Node1]),
+
+ case Mode of
+ inside_trans ->
+ ?start_transactions([A]),
+ A ! fun() ->
+ mnesia:write({Tab, a, 11}),
+ mnesia:write({Tab, b, 22}),
+ mnesia:write({Tab, c, 1}),
+ mnesia:write({Tab, d, 2}),
+ mnesia:write({Tab, e, 3}),
+ lists:sort(mnesia:all_keys(Tab))
+ end,
+ ?match_receive({A, [a, b, c, d, e]});
+ outside_trans ->
+ ignore
+ end,
+
+ RecA = {Tab, a, 1},
+ PatA = {Tab, '$1', 1},
+ RecB = {Tab, b, 3},
+ PatB = {Tab, '$1', 3},
+ RecB2 = {Tab, b, 2},
+ PatB2 = {Tab, '$1', 2},
+ ?match([], mnesia:dirty_read({Tab, a})),
+ ?match([], mnesia:dirty_read({Tab, b})),
+ ?match([], mnesia:dirty_match_object(PatA)),
+ ?match([], mnesia:dirty_match_object(PatB)),
+ ?match([], mnesia:dirty_match_object(PatB2)),
+ ?match([], mnesia:dirty_index_read(Tab, 1, ValPos)),
+ ?match([], mnesia:dirty_index_read(Tab, 3, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatA, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatB, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatB2, ValPos)),
+ ?match('$end_of_table', mnesia:dirty_first(Tab)),
+
+ %% dirty_write
+ A ! fun() -> mnesia:dirty_write(RecA) end,
+ ?match_receive({A, ok}),
+ ?match([RecA], mnesia:dirty_read({Tab, a})),
+ ?match([RecA], mnesia:dirty_match_object(PatA)),
+ ?match(a, mnesia:dirty_first(Tab)),
+ ?match([RecA], mnesia:dirty_index_read(Tab, 1, ValPos)),
+ ?match([RecA], mnesia:dirty_index_match_object(PatA, ValPos)),
+ ?match('$end_of_table', mnesia:dirty_next(Tab, a)),
+
+ %% dirty_create
+ A ! fun() -> mnesia:dirty_write(RecB) end,
+ ?match_receive({A, ok}),
+ ?match([RecB], mnesia:dirty_read({Tab, b})),
+ ?match([RecB], mnesia:dirty_match_object(PatB)),
+ ?match([RecB], mnesia:dirty_index_read(Tab, 3, ValPos)),
+ ?match([RecB], mnesia:dirty_index_match_object(PatB, ValPos)),
+ ?match('$end_of_table',
+ mnesia:dirty_next(Tab, mnesia:dirty_next(Tab, mnesia:dirty_first(Tab)))),
+
+ %% dirty_update_counter
+ A ! fun() -> mnesia:dirty_update_counter({Tab, b}, -1) end,
+ ?match_receive({A, _}),
+ ?match([RecB2], mnesia:dirty_read({Tab, b})),
+ ?match([], mnesia:dirty_match_object(PatB)),
+ ?match([RecB2], mnesia:dirty_match_object(PatB2)),
+ ?match([RecB2], mnesia:dirty_index_read(Tab, 2, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatB, ValPos)),
+ ?match([RecB2], mnesia:dirty_index_match_object(PatB2, ValPos)),
+ ?match('$end_of_table',
+ mnesia:dirty_next(Tab, mnesia:dirty_next(Tab, mnesia:dirty_first(Tab)))),
+
+ %% dirty_delete
+ A ! fun() -> mnesia:dirty_delete({Tab, b}) end,
+ ?match_receive({A, ok}),
+ ?match([], mnesia:dirty_read({Tab, b})),
+ ?match([], mnesia:dirty_match_object(PatB2)),
+ ?match([], mnesia:dirty_index_read(Tab, 3, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatB2, ValPos)),
+ ?match(a, mnesia:dirty_first(Tab)),
+ ?match('$end_of_table', mnesia:dirty_next(Tab, a)),
+
+ %% dirty_delete_object
+ ?match([RecA], mnesia:dirty_match_object(PatA)),
+ A ! fun() -> mnesia:dirty_delete_object(RecA) end,
+ ?match_receive({A, ok}),
+ ?match([], mnesia:dirty_read({Tab, a})),
+ ?match([], mnesia:dirty_match_object(PatA)),
+ ?match([], mnesia:dirty_index_read(Tab, 1, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatA, ValPos)),
+ ?match('$end_of_table', mnesia:dirty_first(Tab)),
+
+ case Mode of
+ inside_trans ->
+ A ! end_trans,
+ ?match_receive({A, {atomic, end_trans}});
+ outside_trans ->
+ ignore
+ end,
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+trans_update_invisibible_outside_trans(doc) ->
+ ["Updates in a transaction are invisible outside the transaction"];
+trans_update_invisibible_outside_trans(suite) -> [];
+trans_update_invisibible_outside_trans(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = trans_update_invisibible_outside_trans,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ ValPos = 3,
+ RecA = {Tab, a, 1},
+ PatA = {Tab, '$1', 1},
+ RecB = {Tab, b, 3},
+ PatB = {Tab, '$1', 3},
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Verify =
+ fun() ->
+ ?match([], mnesia:dirty_read({Tab, a})),
+ ?match([], mnesia:dirty_read({Tab, b})),
+ ?match([], mnesia:dirty_match_object(PatA)),
+ ?match([], mnesia:dirty_match_object(PatB)),
+ ?match([], mnesia:dirty_index_read(Tab, 1, ValPos)),
+ ?match([], mnesia:dirty_index_read(Tab, 3, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatA, ValPos)),
+ ?match([], mnesia:dirty_index_match_object(PatB, ValPos)),
+ ?match('$end_of_table', mnesia:dirty_first(Tab))
+ end,
+
+ Fun = fun() ->
+ ?match(ok, mnesia:write(RecA)),
+ Verify(),
+
+ ?match(ok, mnesia:write(RecB)),
+ Verify(),
+
+ ?match(ok, mnesia:delete({Tab, b})),
+ Verify(),
+
+ ?match([RecA], mnesia:match_object(PatA)),
+ Verify(),
+
+ ?match(ok, mnesia:delete_object(RecA)),
+ Verify(),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun)),
+ Verify(),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+trans_update_visible_inside_trans(doc) ->
+ ["Updates in a transaction are visible in the same transaction"];
+trans_update_visible_inside_trans(suite) -> [];
+trans_update_visible_inside_trans(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = trans_update_visible_inside_trans,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]}])),
+ ValPos = 3,
+ RecA = {Tab, a, 1},
+ PatA = {Tab, '$1', 1},
+ RecB = {Tab, b, 3},
+ PatB = {Tab, '$1', 3},
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Fun = fun() ->
+ %% write
+ ?match(ok, mnesia:write(RecA)),
+ ?match([RecA], mnesia:read({Tab, a})),
+ ?match([RecA], mnesia:wread({Tab, a})),
+ ?match([RecA], mnesia:match_object(PatA)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA], mnesia:index_match_object(PatA, ValPos)),
+ ?match([RecA], mnesia:index_read(Tab, 1, ValPos)),
+
+ %% create
+ ?match(ok, mnesia:write(RecB)),
+ ?match([RecB], mnesia:read({Tab, b})),
+ ?match([RecB], mnesia:wread({Tab, b})),
+ ?match([RecB], mnesia:match_object(PatB)),
+ ?match([RecB], mnesia:index_match_object(PatB, ValPos)),
+ ?match([RecB], mnesia:index_read(Tab, 3, ValPos)),
+
+ %% delete
+ ?match(ok, mnesia:delete({Tab, b})),
+ ?match([], mnesia:read({Tab, b})),
+ ?match([], mnesia:wread({Tab, b})),
+ ?match([], mnesia:match_object(PatB)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatB, ValPos)),
+ ?match([], mnesia:index_read(Tab, 2, ValPos)),
+ ?match([], mnesia:index_read(Tab, 3, ValPos)),
+
+ %% delete_object
+ ?match(ok, mnesia:delete_object(RecA)),
+ ?match([], mnesia:read({Tab, a})),
+ ?match([], mnesia:wread({Tab, a})),
+ ?match([], mnesia:match_object(PatA)),
+ ?match([], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatA, ValPos)),
+ ?match([], mnesia:index_read(Tab, 2, ValPos)),
+ ?match([], mnesia:index_read(Tab, 3, ValPos)),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+write_shadows(doc) ->
+ ["Tests whether the shadow shows the correct object when",
+ "writing to the table"];
+write_shadows(suite) -> [];
+write_shadows(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = write_shadows,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]},
+ {type, set}])),
+ ValPos = 3,
+ RecA1 = {Tab, a, 1},
+ PatA1 = {Tab, '$1', 1},
+ RecA2 = {Tab, a, 2},
+ PatA2 = {Tab, '$1', 2},
+
+
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Fun1 = fun() ->
+ ?match(ok, mnesia:write(RecA1)),
+ ok
+ end,
+
+ ?match({atomic, ok}, mnesia:transaction(Fun1)),
+
+ Fun2 = fun() ->
+ %% write shadow old write - is the confirmed value visable
+ %% in the shadow ?
+ ?match([RecA1], mnesia:read({Tab, a})),
+ ?match([RecA1], mnesia:wread({Tab, a})),
+ ?match([RecA1], mnesia:match_object(PatA1)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA1], mnesia:index_match_object(PatA1, ValPos)),
+ ?match([RecA1], mnesia:index_read(Tab, 1, ValPos)),
+
+ %% write shadow new write - is a new value visable instead
+ %% of the old value ?
+ ?match(ok, mnesia:write(RecA2)),
+
+ ?match([RecA2], mnesia:read({Tab, a})),
+ ?match([RecA2], mnesia:wread({Tab, a})),
+ ?match([RecA2], mnesia:match_object(PatA2)), %% delete shadow old but not new write - is the new value visable
+
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA2], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([RecA2], mnesia:index_read(Tab, 2, ValPos)),
+ ok
+
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun2)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+delete_shadows(doc) ->
+ ["Test whether the shadow shows the correct object when deleting objects"];
+delete_shadows(suite) -> [];
+delete_shadows(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = delete_shadows,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]},
+ {type, set}])),
+ ValPos = 3,
+ OidA = {Tab, a},
+ RecA1 = {Tab, a, 1},
+ PatA1 = {Tab, '$1', 1},
+ RecA2 = {Tab, a, 2},
+ PatA2 = {Tab, '$1', 2},
+
+
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Fun1 = fun() ->
+ ?match(ok, mnesia:write(RecA1)),
+ ok
+ end,
+
+ ?match({atomic, ok}, mnesia:transaction(Fun1)),
+
+ Fun2 = fun() ->
+
+
+ %% delete shadow old write - is the confirmed value invisible
+ %% when deleted in the transaction ?
+ ?match(ok, mnesia:delete(OidA)),
+
+ ?match([], mnesia:read({Tab, a})),
+ ?match([], mnesia:wread({Tab, a})),
+ ?match([], mnesia:match_object(PatA1)),
+ ?match([], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatA1, ValPos)),
+ ?match([], mnesia:index_read(Tab, 1, ValPos)),
+
+ %% delete shadow old but not new write - is the new value visable
+ %% when the old one was deleted ?
+ ?match(ok, mnesia:write(RecA2)),
+
+ ?match([RecA2], mnesia:read({Tab, a})),
+ ?match([RecA2], mnesia:wread({Tab, a})),
+ ?match([RecA2], mnesia:match_object(PatA2)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA2], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([RecA2], mnesia:index_read(Tab, 2, ValPos)),
+
+ %% delete shadow old and new write - is the new value invisable
+ %% when deleted ?
+ ?match(ok, mnesia:delete(OidA)),
+
+ ?match([], mnesia:read({Tab, a})),
+ ?match([], mnesia:wread({Tab, a})),
+ ?match([], mnesia:match_object(PatA2)),
+ ?match([], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([], mnesia:index_read(Tab, 2, ValPos)),
+ ok
+
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun2)),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+write_delete_shadows_bag(doc) ->
+ ["Test the visibility of written and deleted objects in an bag type table"];
+write_delete_shadows_bag(suite) -> [];
+write_delete_shadows_bag(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = write_delete_shadows_bag,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]},
+ {type, bag}])),
+ ValPos = 3,
+ OidA = {Tab, a},
+
+ RecA1 = {Tab, a, 1},
+ PatA1 = {Tab, '$1', 1},
+
+ RecA2 = {Tab, a, 2},
+ PatA2 = {Tab, '$1', 2},
+
+ RecA3 = {Tab, a, 3},
+ PatA3 = {Tab, '$1', 3},
+
+ PatA = {Tab, a, '_'},
+
+
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Fun1 = fun() ->
+ ?match(ok, mnesia:write(RecA1)),
+ ?match(ok, mnesia:write(RecA2)),
+ ok
+ end,
+
+ ?match({atomic, ok}, mnesia:transaction(Fun1)),
+
+ Fun2 = fun() ->
+ %% delete shadow old write - is the confirmed value invisible
+ %% when deleted in the transaction ?
+ ?match(ok, mnesia:delete_object(RecA1)),
+
+ ?match([RecA2], mnesia:read({Tab, a})),
+ ?match([RecA2], mnesia:wread({Tab, a})),
+ ?match([RecA2], mnesia:match_object(PatA2)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA2], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([RecA2], mnesia:index_read(Tab, 2, ValPos)),
+
+ ?match(ok, mnesia:delete(OidA)),
+
+ ?match([], mnesia:read({Tab, a})),
+ ?match([], mnesia:wread({Tab, a})),
+ ?match([], mnesia:match_object(PatA1)),
+ ?match([], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatA1, ValPos)),
+ ?match([], mnesia:index_read(Tab, 1, ValPos)),
+
+ %% delete shadow old but not new write - are both new value visable
+ %% when the old one was deleted ?
+ ?match(ok, mnesia:write(RecA2)),
+ ?match(ok, mnesia:write(RecA3)),
+
+
+ ?match([RecA2, RecA3], lists:sort(mnesia:read({Tab, a}))),
+ ?match([RecA2, RecA3], lists:sort(mnesia:wread({Tab, a}))),
+ ?match([RecA2], mnesia:match_object(PatA2)),
+ ?match([a], mnesia:all_keys(Tab)),
+ ?match([RecA2, RecA3], lists:sort(mnesia:match_object(PatA))),
+ ?match([RecA2], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([RecA3], mnesia:index_match_object(PatA3, ValPos)),
+ ?match([RecA2], mnesia:index_read(Tab, 2, ValPos)),
+
+ %% delete shadow old and new write - is the new value invisable
+ %% when deleted ?
+ ?match(ok, mnesia:delete(OidA)),
+
+ ?match([], mnesia:read({Tab, a})),
+ ?match([], mnesia:wread({Tab, a})),
+ ?match([], mnesia:match_object(PatA2)),
+ ?match([], mnesia:all_keys(Tab)),
+ ?match([], mnesia:index_match_object(PatA2, ValPos)),
+ ?match([], mnesia:index_read(Tab, 2, ValPos)),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun2)),
+ ok.
+
+write_delete_shadows_bag2(doc) ->
+ ["Test the visibility of written and deleted objects in an bag type table "
+ "and verifies the results"];
+write_delete_shadows_bag2(suite) -> [];
+write_delete_shadows_bag2(Config) when is_list(Config) ->
+
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab = w_d_s_b,
+
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab},
+ {ram_copies, [Node1]},
+ {type, bag}])),
+ Del = fun() ->
+ R1 = mnesia:read({Tab, 1}),
+ mnesia:delete({Tab, 1}),
+ R2 = mnesia:read({Tab, 1}),
+ mnesia:write({Tab, 1, 1}),
+ mnesia:write({Tab, 1, 2}),
+ R3 = mnesia:read({Tab, 1}),
+ {R1, R2, R3}
+ end,
+ DelObj = fun() ->
+ R1 = mnesia:read({Tab, 2}),
+ mnesia:delete_object({Tab, 2, 2}),
+ R2 = mnesia:read({Tab, 2}),
+ mnesia:write({Tab, 2, 1}),
+ mnesia:write({Tab, 2, 2}),
+ R3 = mnesia:read({Tab, 2}),
+ {R1, R2, R3}
+ end,
+ Both1 = [{Tab, 1, 1}, {Tab, 1, 2}],
+ Both2 = [{Tab, 2, 1}, {Tab, 2, 2}],
+ ?match({atomic, {[], [], Both1}}, mnesia:transaction(Del)),
+ ?match({atomic, {Both1, [], Both1}}, mnesia:transaction(Del)),
+ ?match({atomic, Both1}, mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match({atomic, {[], [], Both2}}, mnesia:transaction(DelObj)),
+ ?match({atomic, {Both2, [{Tab, 2, 1}], Both2}}, mnesia:transaction(DelObj)),
+ ?match({atomic, Both2}, mnesia:transaction(fun() -> mnesia:read({Tab, 2}) end)),
+ ?verify_mnesia([Node1], []).
+
+shadow_search(doc) ->
+ ["Verifies that ordered_set tables are ordered, and the order is kept"
+ "even when table is shadowed by transaction updates"];
+shadow_search(suite) -> [];
+shadow_search(Config) when is_list(Config) ->
+ [Node1] = ?acquire_nodes(1, Config),
+ Tab1 = ss_oset,
+ Tab2 = ss_set,
+ Tab3 = ss_bag,
+ Tabs = [Tab1,Tab2,Tab3],
+ RecName = ss,
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab1},
+ {ram_copies, [Node1]},
+ {record_name, RecName},
+ {type, ordered_set}])),
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab2},
+ {record_name, RecName},
+ {ram_copies, [Node1]},
+ {type, set}])),
+ ?match({atomic, ok}, mnesia:create_table([{name, Tab3},
+ {record_name, RecName},
+ {ram_copies, [Node1]},
+ {type, bag}])),
+ Recs = [{RecName, K, K} || K <- [1,3,5]],
+ [mnesia:dirty_write(Tab1, R) || R <- Recs],
+ [mnesia:dirty_write(Tab2, R) || R <- Recs],
+ [mnesia:dirty_write(Tab3, R) || R <- Recs],
+
+ Match = fun(Tab) -> mnesia:match_object(Tab, {'_','_','_'}, write) end,
+ Select = fun(Tab) -> mnesia:select(Tab, [{'_', [], ['$_']}]) end,
+% Trans = fun(Fun,Args) -> mnesia:transaction(Fun,Args) end,
+ LoopHelp = fun('$end_of_table',_) -> [];
+ ({Res,Cont},Fun) ->
+ Sel = mnesia:select(Cont),
+ Res ++ Fun(Sel, Fun)
+ end,
+ SelLoop = fun(Table) ->
+ Sel = mnesia:select(Table, [{'_', [], ['$_']}], 1, read),
+ LoopHelp(Sel,LoopHelp)
+ end,
+
+ R1 = {RecName, 2, 2}, R2 = {RecName, 4, 4},
+ R3 = {RecName, 2, 3}, R4 = {RecName, 3, 1},
+ R5 = {RecName, 104, 104},
+ W1 = fun(Tab,Search) -> mnesia:write(Tab,R1,write),
+ mnesia:write(Tab,R2,write),
+ Search(Tab)
+ end,
+ S1 = lists:sort([R1,R2|Recs]),
+ ?match({atomic,S1}, mnesia:transaction(W1, [Tab1,Select])),
+ ?match({atomic,S1}, mnesia:transaction(W1, [Tab1,Match])),
+ ?match({atomic,S1}, mnesia:transaction(W1, [Tab1,SelLoop])),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab2,Select]))),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab2,SelLoop]))),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab2,Match]))),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab3,Select]))),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab3,SelLoop]))),
+ ?match({atomic,S1}, sort_res(mnesia:transaction(W1, [Tab3,Match]))),
+ [mnesia:dirty_delete_object(Tab,R) || R <- [R1,R2], Tab <- Tabs],
+
+ W2 = fun(Tab,Search) ->
+ mnesia:write(Tab,R3,write),
+ mnesia:write(Tab,R1,write),
+ Search(Tab)
+ end,
+ S2 = lists:sort([R1|Recs]),
+ S2Bag = lists:sort([R1,R3|Recs]),
+ ?match({atomic,S2}, mnesia:transaction(W2, [Tab1,Select])),
+ ?match({atomic,S2}, mnesia:transaction(W2, [Tab1,SelLoop])),
+ ?match({atomic,S2}, mnesia:transaction(W2, [Tab1,Match])),
+ ?match({atomic,S2}, sort_res(mnesia:transaction(W2, [Tab2,Select]))),
+ ?match({atomic,S2}, sort_res(mnesia:transaction(W2, [Tab2,SelLoop]))),
+ ?match({atomic,S2}, sort_res(mnesia:transaction(W2, [Tab2,Match]))),
+ ?match({atomic,S2Bag}, sort_res(mnesia:transaction(W2, [Tab3,Select]))),
+ ?match({atomic,S2Bag}, sort_res(mnesia:transaction(W2, [Tab3,SelLoop]))),
+ ?match({atomic,S2Bag}, sort_res(mnesia:transaction(W2, [Tab3,Match]))),
+%% [mnesia:dirty_delete_object(Tab,R) || R <- [R1,R3], Tab <- Tabs],
+
+ W3 = fun(Tab,Search) ->
+ mnesia:write(Tab,R4,write),
+ mnesia:delete(Tab,element(2,R1),write),
+ Search(Tab)
+ end,
+ S3Bag = lists:sort([R4|lists:delete(R1,Recs)]),
+ S3 = lists:delete({RecName,3,3},S3Bag),
+ ?match({atomic,S3}, mnesia:transaction(W3, [Tab1,Select])),
+ ?match({atomic,S3}, mnesia:transaction(W3, [Tab1,SelLoop])),
+ ?match({atomic,S3}, mnesia:transaction(W3, [Tab1,Match])),
+ ?match({atomic,S3}, sort_res(mnesia:transaction(W3, [Tab2,SelLoop]))),
+ ?match({atomic,S3}, sort_res(mnesia:transaction(W3, [Tab2,Select]))),
+ ?match({atomic,S3}, sort_res(mnesia:transaction(W3, [Tab2,Match]))),
+ ?match({atomic,S3Bag}, sort_res(mnesia:transaction(W3, [Tab3,Select]))),
+ ?match({atomic,S3Bag}, sort_res(mnesia:transaction(W3, [Tab3,SelLoop]))),
+ ?match({atomic,S3Bag}, sort_res(mnesia:transaction(W3, [Tab3,Match]))),
+
+ W4 = fun(Tab,Search) ->
+ mnesia:delete(Tab,-1,write),
+ mnesia:delete(Tab,4 ,write),
+ mnesia:delete(Tab,17,write),
+ mnesia:delete_object(Tab,{RecName, -1, x},write),
+ mnesia:delete_object(Tab,{RecName, 4, x},write),
+ mnesia:delete_object(Tab,{RecName, 42, x},write),
+ mnesia:delete_object(Tab,R2,write),
+ mnesia:write(Tab, R5, write),
+ Search(Tab)
+ end,
+ S4Bag = lists:sort([R5|S3Bag]),
+ S4 = lists:sort([R5|S3]),
+ ?match({atomic,S4}, mnesia:transaction(W4, [Tab1,Select])),
+ ?match({atomic,S4}, mnesia:transaction(W4, [Tab1,SelLoop])),
+ ?match({atomic,S4}, mnesia:transaction(W4, [Tab1,Match])),
+ ?match({atomic,S4}, sort_res(mnesia:transaction(W4, [Tab2,Select]))),
+ ?match({atomic,S4}, sort_res(mnesia:transaction(W4, [Tab2,SelLoop]))),
+ ?match({atomic,S4}, sort_res(mnesia:transaction(W4, [Tab2,Match]))),
+ ?match({atomic,S4Bag}, sort_res(mnesia:transaction(W4, [Tab3,Select]))),
+ ?match({atomic,S4Bag}, sort_res(mnesia:transaction(W4, [Tab3,SelLoop]))),
+ ?match({atomic,S4Bag}, sort_res(mnesia:transaction(W4, [Tab3,Match]))),
+ [mnesia:dirty_delete_object(Tab,R) || R <- [{RecName,3,3},R5], Tab <- Tabs],
+
+ %% hmmm anything more??
+
+ ?verify_mnesia([Node1], []).
+
+removed_resources(suite) ->
+ [rr_kill_copy];
+removed_resources(doc) ->
+ ["Verify that the locking behave when resources are removed"].
+
+rr_kill_copy(suite) -> [];
+rr_kill_copy(Config) when is_list(Config) ->
+ Ns = ?acquire_nodes(3,Config ++ [{tc_timeout, 60000}]),
+ DeleteMe = fun(_Tab,Where2read) ->
+ ?match([], mnesia_test_lib:kill_mnesia([Where2read]))
+ end,
+ Del = removed_resources(Ns, DeleteMe),
+ ?verify_mnesia(Ns -- [Del], []).
+
+removed_resources([_N1,N2,N3], DeleteRes) ->
+ Tab = del_res,
+ ?match({atomic, ok}, mnesia:create_table(Tab,[{ram_copies, [N2,N3]}])),
+
+ Init = fun() -> [mnesia:write({Tab,Key,Key}) || Key <- lists:seq(0,99)] end,
+ ?match([], [Bad || Bad <- mnesia:sync_dirty(Init), Bad /= ok]),
+
+ Where2Read = mnesia:table_info(Tab, where_to_read),
+ [Keep] = [N2,N3] -- [Where2Read],
+ Tester = self(),
+
+ Conflict = fun() ->
+ %% Read a value..
+ [{Tab,1,Val}] = mnesia:read({Tab,1}),
+ case get(restart) of
+ undefined ->
+ Tester ! {pid_1, self()},
+ %% Wait for sync, the read value have been
+ %% updated and this function should be restarted.
+ receive {Tester,sync} -> ok end,
+ put(restart, restarted);
+ restarted ->
+ ok
+ end,
+ mnesia:write({Tab,1,Val+10})
+ end,
+ Lucky = fun() ->
+ [{Tab,1,Val}] = mnesia:read({Tab,1}),
+ mnesia:write({Tab,1,Val+100})
+ end,
+
+ CPid = spawn_link(fun() -> Tester ! {self(), mnesia:transaction(Conflict)} end),
+
+ %% sync first transaction
+ receive {pid_1, CPid} -> synced end,
+
+ DeleteRes(Tab, Where2Read),
+
+ ?match(Keep, mnesia:table_info(Tab, where_to_read)),
+
+ %% Run the other/Lucky transaction, this should work since
+ %% it won't grab a lock on the conflicting transactions Where2Read node.
+
+ LPid = spawn_link(Keep, fun() -> Tester ! {self(),mnesia:transaction(Lucky)} end),
+ ?match_receive({LPid,{atomic,ok}}),
+
+ %% Continue Transaction no 1
+ CPid ! {self(), sync},
+
+ ?match(ok, receive {CPid,{atomic,ok}} -> ok after 2000 -> process_info(self()) end),
+
+ ?match({atomic,[{del_res,1,111}]}, mnesia:transaction(fun() -> mnesia:read({Tab,1}) end)),
+ Where2Read.
+
+nasty(suite) -> [];
+
+nasty(doc) ->
+ ["Tries to fullfill a rather nasty locking scenario, where we have had "
+ "bugs, the testcase tries a combination of locks in locker queue"];
+
+%% This testcase no longer works as it was intended to show errors when
+%% tablelocks was allowed to be placed in the queue though locks existed
+%% in the queue with less Tid's. This is no longer allowed and the testcase
+%% has been update.
+
+nasty(Config) ->
+ ?acquire_nodes(1, Config),
+ Tab = nasty,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [])),
+ Coord = self(),
+ Write = fun(Key) ->
+ mnesia:write({Tab, Key, write}),
+ Coord ! {write, Key, self(), mnesia:get_activity_id()},
+ receive
+ continue ->
+ ok
+ end,
+ Coord ! {done, {write, Key}, self()}
+ end,
+
+ Update = fun(Key) ->
+ Coord ! {update, Key, self(), mnesia:get_activity_id()},
+ receive
+ continue ->
+ ok
+ end,
+ mnesia:read({Tab, Key}),
+ mnesia:write({Tab, Key, update}),
+ receive
+ continue ->
+ ok
+ end,
+
+ Coord ! {done, {update, Key}, self()}
+ end,
+
+ TabLock = fun() ->
+ Coord ! {tablock, Tab, self(), mnesia:get_activity_id()},
+ receive
+ continue ->
+ ok
+ end,
+ mnesia:lock({table, Tab}, write),
+ Coord ! {done, {tablock, Tab}, self()}
+ end,
+
+ Up = spawn_link(mnesia, transaction, [Update, [0]]),
+ ?match_receive({update, 0, Up, _Tid}),
+ TL = spawn_link(mnesia, transaction, [TabLock]),
+ ?match_receive({tablock, Tab, _Tl, _Tid}),
+ W0 = spawn_link(mnesia, transaction, [Write, [0]]),
+ ?match_receive({write, 0, W0, _Tid}),
+ W1 = spawn_link(mnesia, transaction, [Write, [1]]),
+ ?match_receive({write, 1, W1, _Tid}),
+
+ %% Nothing should be in msg queue!
+ ?match(timeout, receive A -> A after 1000 -> timeout end),
+ Up ! continue, %% Should be queued
+ ?match(timeout, receive A -> A after 1000 -> timeout end),
+ TL ! continue, %% Should be restarted
+% ?match({tablock, _, _, _}, receive A -> A after 1000 -> timeout end),
+ ?match(timeout, receive A -> A after 1000 -> timeout end),
+
+ LQ1 = mnesia_locker:get_lock_queue(),
+ ?match({2, _}, {length(LQ1), LQ1}),
+ W0 ! continue, % Up should be in queue
+ ?match_receive({done, {write, 0}, W0}),
+ ?match_receive({'EXIT', W0, normal}),
+
+ TL ! continue, % Should stay in queue W1
+ ?match(timeout, receive A -> A after 1000 -> timeout end),
+ Up ! continue, % Should stay in queue (TL got higher tid)
+ ?match(timeout, receive A -> A after 1000 -> timeout end),
+
+ LQ2 = mnesia_locker:get_lock_queue(),
+ ?match({2, _}, {length(LQ2), LQ2}),
+
+ W1 ! continue,
+ ?match_receive({done, {write, 1}, W1}),
+ get_exit(W1),
+ get_exit(TL),
+ ?match_receive({done, {tablock,Tab}, TL}),
+ get_exit(Up),
+ ?match_receive({done, {update, 0}, Up}),
+
+ ok.
+
+get_exit(Pid) ->
+ receive
+ {'EXIT', Pid, normal} ->
+ ok
+ after 10000 ->
+ ?error("Timeout EXIT ~p~n", [Pid])
+ end.
+
+iteration(doc) ->
+ ["Verify that the updates before/during iteration are visable "
+ "and that the order is preserved for ordered_set tables"];
+iteration(suite) ->
+ [foldl,first_next].
+
+foldl(doc) ->
+ [""];
+foldl(suite) ->
+ [];
+foldl(Config) when is_list(Config) ->
+ Nodes = [_,N2] = ?acquire_nodes(2, Config),
+ Tab1 = foldl_local,
+ Tab2 = foldl_remote,
+ Tab3 = foldl_ordered,
+ Tab11 = foldr_local,
+ Tab21 = foldr_remote,
+ Tab31 = foldr_ordered,
+ ?match({atomic, ok}, mnesia:create_table(Tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, [{ram_copies, [N2]}, {type, bag}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, [{ram_copies, Nodes},
+ {type, ordered_set}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab11, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab21, [{ram_copies, [N2]}, {type, bag}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab31, [{ram_copies, Nodes},
+ {type, ordered_set}])),
+
+
+ Tab1Els = [{Tab1, N, N} || N <- lists:seq(1, 10)],
+ Tab2Els = [{Tab2, 1, 2} | [{Tab2, N, N} || N <- lists:seq(1, 10)]],
+ Tab3Els = [{Tab3, N, N} || N <- lists:seq(1, 10)],
+ Tab11Els = [{Tab11, N, N} || N <- lists:seq(1, 10)],
+ Tab21Els = [{Tab21, 1, 2} | [{Tab21, N, N} || N <- lists:seq(1, 10)]],
+ Tab31Els = [{Tab31, N, N} || N <- lists:seq(1, 10)],
+
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab1Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab2Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab3Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab11Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab21Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab31Els],
+
+ Get = fun(E, A) -> [E | A] end,
+
+ %% Before
+ AddB = fun(Tab, Func) ->
+ mnesia:write({Tab, 0, 0}),
+ mnesia:write({Tab, 1, 0}),
+ mnesia:write({Tab, 11, 0}),
+ mnesia:Func(Get, [], Tab)
+ end,
+ AddT1 = [{Tab1, 0, 0}, {Tab1, 1, 0}] ++ tl(Tab1Els) ++ [{Tab1, 11, 0}],
+ AddT2 = lists:sort([{Tab2, 0, 0}, {Tab2, 1, 0}] ++ Tab2Els ++ [{Tab2, 11, 0}]),
+ AddT3 = [{Tab3, 0, 0}, {Tab3, 1, 0}] ++ tl(Tab3Els) ++ [{Tab3, 11, 0}],
+ AddT11 = [{Tab11, 0, 0}, {Tab11, 1, 0}] ++ tl(Tab11Els) ++ [{Tab11, 11, 0}],
+ AddT21 = lists:sort([{Tab21, 0, 0}, {Tab21, 1, 0}] ++ Tab21Els ++ [{Tab21, 11, 0}]),
+ AddT31 = [{Tab31, 0, 0}, {Tab31, 1, 0}] ++ tl(Tab31Els) ++ [{Tab31, 11, 0}],
+
+ ?match({atomic, AddT1}, sort_res(mnesia:transaction(AddB, [Tab1, foldl]))),
+ ?match({atomic, AddT2}, sort_res(mnesia:transaction(AddB, [Tab2, foldl]))),
+ ?match({atomic, AddT3}, rev_res(mnesia:transaction(AddB, [Tab3, foldl]))),
+ ?match({atomic, AddT11}, sort_res(mnesia:transaction(AddB, [Tab11, foldr]))),
+ ?match({atomic, AddT21}, sort_res(mnesia:transaction(AddB, [Tab21, foldr]))),
+ ?match({atomic, AddT31}, mnesia:transaction(AddB, [Tab31, foldr])),
+
+ ?match({atomic, ok}, mnesia:create_table(copy, [{ram_copies, [N2]},
+ {record_name, Tab1}])),
+ CopyRec = fun(NewRec, Acc) ->
+ %% OTP-5495
+ W = fun() -> mnesia:write(copy, NewRec, write), [NewRec| Acc] end,
+ {atomic,Res} = sort_res(mnesia:transaction(W)),
+ Res
+ end,
+ Copy = fun() ->
+ AddT1 = mnesia:foldl(CopyRec, [], Tab1),
+ AddT1 = sort_res(mnesia:foldl(Get, [], copy))
+ end,
+ ?match({atomic, AddT1}, sort_res(mnesia:transaction(Copy))),
+
+ Del = fun(E, A) -> mnesia:delete_object(E), [E|A] end,
+ DelD = fun(Tab) ->
+ mnesia:write({Tab, 12, 12}),
+ mnesia:delete({Tab, 0}),
+ mnesia:foldr(Del, [], Tab),
+ mnesia:foldl(Get, [], Tab)
+ end,
+ ?match({atomic, []}, sort_res(mnesia:transaction(DelD, [Tab1]))),
+ ?match({atomic, []}, sort_res(mnesia:transaction(DelD, [Tab2]))),
+ ?match({atomic, []}, rev_res(mnesia:transaction(DelD, [Tab3]))),
+
+ ListWrite = fun(Tab) -> %% OTP-3893
+ mnesia:write({Tab, [12], 12}),
+ mnesia:foldr(Get, [], Tab)
+ end,
+ ?match({atomic, [{Tab1, [12], 12}]}, sort_res(mnesia:transaction(ListWrite, [Tab1]))),
+ ?match({atomic, [{Tab2, [12], 12}]}, sort_res(mnesia:transaction(ListWrite, [Tab2]))),
+ ?match({atomic, [{Tab3, [12], 12}]}, rev_res(mnesia:transaction(ListWrite, [Tab3]))),
+
+ ?verify_mnesia(Nodes, []).
+
+sort_res({atomic, List}) when is_list(List) ->
+ {atomic, lists:sort(List)};
+sort_res(Else) when is_list(Else) ->
+ lists:sort(Else);
+sort_res(Else) ->
+ Else.
+
+rev_res({atomic, List}) ->
+ {atomic, lists:reverse(List)};
+rev_res(Else) ->
+ Else.
+
+
+first_next(doc) -> [""];
+first_next(suite) -> [];
+first_next(Config) when is_list(Config) ->
+ Nodes = [_,N2] = ?acquire_nodes(2, Config),
+ Tab1 = local,
+ Tab2 = remote,
+ Tab3 = ordered,
+ Tab4 = bag,
+ Tabs = [Tab1,Tab2,Tab3,Tab4],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, [{ram_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, [{ram_copies, Nodes},
+ {type, ordered_set}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab4, [{ram_copies, Nodes},
+ {type, bag}])),
+
+ %% Some Helpers
+ Trans = fun(Fun) -> mnesia:transaction(Fun) end,
+ Continue = fun(first) -> next;
+ (last) -> prev
+ end,
+ LoopHelp = fun('$end_of_table',_,_,_Fun) -> [];
+ (Key,Tab,Op,Fun) ->
+ Next = mnesia:Op(Tab,Key),
+ [Next |Fun(Next,Tab,Op,Fun)]
+ end,
+ Loop = fun(Tab,Start) ->
+ First = mnesia:Start(Tab),
+ Res = [First|LoopHelp(First,Tab,Continue(Start),LoopHelp)],
+ case mnesia:table_info(Tab, type) of
+ ordered_set when Start == first -> Res;
+ ordered_set ->
+ {L1,L2} = lists:split(length(Res)-1,Res),
+ lists:reverse(L1) ++ L2;
+ _ -> lists:sort(Res)
+ end
+ end,
+
+ %% Verify empty tables
+ [?match({atomic, ['$end_of_table']},
+ Trans(fun() -> Loop(Tab,first) end))
+ || Tab <- Tabs],
+ [?match({atomic, ['$end_of_table']},
+ Trans(fun() -> Loop(Tab,last) end))
+ || Tab <- Tabs],
+ %% Verify that trans write is visible inside trans
+ [?match({atomic, [0,10,'$end_of_table']},
+ Trans(fun() ->
+ mnesia:write({Tab,0,0}),
+ mnesia:write({Tab,10,10}),
+ Loop(Tab,first) end))
+ || Tab <- Tabs],
+ [?match({atomic, ['$end_of_table']},
+ Trans(fun() ->
+ mnesia:delete({Tab,0}),
+ mnesia:delete({Tab,10}),
+ Loop(Tab,first) end))
+ || Tab <- Tabs],
+
+ [?match({atomic, [0,10,'$end_of_table']},
+ Trans(fun() ->
+ mnesia:write({Tab,0,0}),
+ mnesia:write({Tab,10,10}),
+ Loop(Tab,last) end))
+ || Tab <- Tabs],
+ [?match({atomic, ['$end_of_table']},
+ Trans(fun() ->
+ mnesia:delete({Tab,0}),
+ mnesia:delete({Tab,10}),
+ Loop(Tab,last) end))
+ || Tab <- Tabs],
+
+ Tab1Els = [{Tab1, N, N} || N <- lists:seq(1, 5)],
+ Tab2Els = [{Tab2, N, N} || N <- lists:seq(1, 5)],
+ Tab3Els = [{Tab3, N, N} || N <- lists:seq(1, 5)],
+ Tab4Els = [{Tab4, 1, 2} | [{Tab4, N, N} || N <- lists:seq(1, 5)]],
+
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab1Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab2Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab3Els],
+ [mnesia:sync_dirty(fun() -> mnesia:write(E) end) || E <- Tab4Els],
+ Keys = lists:sort(mnesia:dirty_all_keys(Tab1)),
+ R1 = Keys++ ['$end_of_table'],
+ [?match({atomic, R1}, Trans(fun() -> Loop(Tab,first) end))
+ || Tab <- Tabs],
+
+ [?match({atomic, R1}, Trans(fun() -> Loop(Tab,last) end))
+ || Tab <- Tabs],
+ R2 = R1 -- [3],
+
+ [?match({atomic, R2}, Trans(fun() -> mnesia:delete({Tab,3}),Loop(Tab,first) end))
+ || Tab <- Tabs],
+ [?match({atomic, R1}, Trans(fun() -> mnesia:write({Tab,3,3}),Loop(Tab,first) end))
+ || Tab <- Tabs],
+ [?match({atomic, R2}, Trans(fun() -> mnesia:delete({Tab,3}),Loop(Tab,last) end))
+ || Tab <- Tabs],
+ [?match({atomic, R1}, Trans(fun() -> mnesia:write({Tab,3,3}),Loop(Tab,last) end))
+ || Tab <- Tabs],
+ [?match({atomic, R1}, Trans(fun() -> mnesia:write({Tab,4,19}),Loop(Tab,first) end))
+ || Tab <- Tabs],
+ [?match({atomic, R1}, Trans(fun() -> mnesia:write({Tab,4,4}),Loop(Tab,last) end))
+ || Tab <- Tabs],
+
+ ?verify_mnesia(Nodes, []).
+
+
+snmp_shadows(doc) -> [""];
+snmp_shadows(suite) -> [];
+snmp_shadows(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ Tab = snmp_shadows,
+ io:format("With fixstring~n", []),
+ ?match({atomic, ok}, mnesia:create_table(Tab,[{snmp,[{key,{fix_string,integer}}]}])),
+ snmp_shadows_test(Tab),
+ ?match({atomic, ok}, mnesia:delete_table(Tab)),
+ io:format("Without fixstring~n", []),
+ ?match({atomic, ok}, mnesia:create_table(Tab,[{snmp,[{key,{string,integer}}]}])),
+ snmp_shadows_test(Tab),
+ ?verify_mnesia(Nodes, []).
+
+snmp_shadows_test(Tab) ->
+ [mnesia:dirty_write({Tab, {"string", N}, {N, init}}) || N <- lists:seq(2,8,2)],
+
+ CheckOrder = fun(A={_,_,{_,_,State}}, Prev) ->
+ ?match({true, A, Prev}, {Prev < A, A, Prev}),
+ {State,A}
+ end,
+ R1 = mnesia:sync_dirty(fun() -> loop_snmp(Tab, []) end),
+ lists:mapfoldl(CheckOrder, {[],foo,foo}, R1),
+ R2 = mnesia:transaction(fun() -> loop_snmp(Tab, []) end),
+ ?match({atomic, R1}, R2),
+
+ Shadow = fun() ->
+ ok = mnesia:write({Tab, {"string",1}, {1,update}}),
+ ok = mnesia:write({Tab, {"string",4}, {4,update}}),
+ ok = mnesia:write({Tab, {"string",6}, {6,update}}),
+ ok = mnesia:delete({Tab, {"string",6}}),
+ ok = mnesia:write({Tab, {"string",9}, {9,update}}),
+ ok = mnesia:write({Tab, {"string",3}, {3,update}}),
+ ok = mnesia:write({Tab, {"string",5}, {5,update}}),
+ [Row5] = mnesia:read({Tab, {"string",5}}),
+ ok = mnesia:delete_object(Row5),
+ loop_snmp(Tab, [])
+ end,
+ R3 = mnesia:sync_dirty(Shadow),
+ {L3,_} = lists:mapfoldl(CheckOrder, {[],foo,foo}, R3),
+ ?match([{1,update},{2,init},{3,update},{4,update},{8,init},{9,update}], L3),
+ ?match({atomic, ok}, mnesia:clear_table(Tab)),
+
+ [mnesia:dirty_write({Tab, {"string", N}, {N, init}}) || N <- lists:seq(2,8,2)],
+ {atomic, R3} = mnesia:transaction(Shadow),
+ {L4,_} = lists:mapfoldl(CheckOrder, {[],foo,foo}, R3),
+ ?match([{1,update},{2,init},{3,update},{4,update},{8,init},{9,update}], L4),
+ ok.
+
+loop_snmp(Tab,Prev) ->
+ case mnesia:snmp_get_next_index(Tab,Prev) of
+ {ok, SKey} ->
+ {{ok,Row},_} = {mnesia:snmp_get_row(Tab, SKey),{?LINE,Prev,SKey}},
+ {{ok,MKey},_} = {mnesia:snmp_get_mnesia_key(Tab,SKey),{?LINE,Prev,SKey}},
+ ?match({[Row],Row,SKey,MKey}, {mnesia:read({Tab,MKey}),Row,SKey,MKey}),
+ [{SKey, MKey, Row} | loop_snmp(Tab, SKey)];
+ endOfTable ->
+ []
+ end.
diff --git a/lib/mnesia/test/mnesia_measure_test.erl b/lib/mnesia/test/mnesia_measure_test.erl
new file mode 100644
index 0000000000..fbf804dbec
--- /dev/null
+++ b/lib/mnesia/test/mnesia_measure_test.erl
@@ -0,0 +1,203 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_measure_test).
+-author('[email protected]').
+-compile([export_all]).
+
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(init(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema],
+ N, Config, ?FILE, ?LINE)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+all(doc) ->
+ ["Measure various aspects of Mnesia",
+ "Verify that Mnesia has predictable response times,",
+ "that the transaction system has fair algoritms,",
+ "resource consumption, scalabilitym system limits etc.",
+ "Perform some benchmarks."];
+all(suite) ->
+ [
+ prediction,
+ consumption,
+ scalability,
+ benchmarks
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+prediction(doc) ->
+ ["The system must have predictable response times.",
+ "The maintenance of the system should not impact on the",
+ "availability. Make sure that the response times does not vary too",
+ "much from the undisturbed normal usage.",
+ "Verify that deadlocks never occurs."];
+prediction(suite) ->
+ [
+ reader_disturbed_by_node_down,
+ writer_disturbed_by_node_down,
+ reader_disturbed_by_node_up,
+ writer_disturbed_by_node_up,
+ reader_disturbed_by_schema_ops,
+ writer_disturbed_by_schema_ops,
+ reader_disturbed_by_checkpoint,
+ writer_disturbed_by_checkpoint,
+ reader_disturbed_by_dump_log,
+ writer_disturbed_by_dump_log,
+ reader_disturbed_by_backup,
+ writer_disturbed_by_backup,
+ reader_disturbed_by_restore,
+ writer_disturbed_by_restore,
+ fairness
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+fairness(doc) ->
+ ["Verify that the transaction system behaves fair, even under intense",
+ "stress. Combine different access patterns (transaction profiles)",
+ "in order to verify that concurrent applications gets a fair share",
+ "of the database resource. Verify that starvation never may occur."];
+fairness(suite) ->
+ [
+ reader_competing_with_reader,
+ reader_competing_with_writer,
+ writer_competing_with_reader,
+ writer_competing_with_writer
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+consumption(doc) ->
+ ["Measure the resource consumption and publish the outcome. Make",
+ "sure that resources are released after failures."];
+consumption(suite) ->
+ [
+ measure_resource_consumption,
+ determine_resource_leakage
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+scalability(doc) ->
+ ["Try out where the system limits are. We must at least meet the",
+ "documented system limits.",
+ "Redo the performance meters for various configurations and load,",
+ "especially near system limits."];
+scalability(suite) ->
+ [
+ determine_system_limits,
+ performance_at_min_config,
+ performance_at_max_config,
+ performance_at_full_load,
+ resource_consumption_at_min_config,
+ resource_consumption_at_max_config,
+ resource_consumption_at_full_load
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+benchmarks(doc) ->
+ ["Measure typical database operations and publish them. Try to",
+ "verify that new releases of Mnesia always outperforms old",
+ "releases, or at least that the meters does not get worse."];
+benchmarks(suite) ->
+ [
+ meter,
+ cost,
+ dbn_meters,
+ measure_all_api_functions,
+ tpcb,
+ mnemosyne_vs_mnesia_kernel
+ ].
+
+dbn_meters(suite) -> [];
+dbn_meters(Config) when is_list(Config) ->
+ _Nodes = ?init(3, Config),
+ ?match(ok, mnesia_dbn_meters:start()),
+ ok.
+
+tpcb(suite) ->
+ [
+ ram_tpcb,
+ disc_tpcb,
+ disc_only_tpcb
+ ].
+
+tpcb(ReplicaType, Config) ->
+ HarakiriDelay = {tc_timeout, timer:minutes(20)},
+ Nodes = ?acquire_nodes(2, Config ++ [HarakiriDelay]),
+ Args = [{n_branches, 2},
+ {n_drivers_per_node, 1},
+ {replica_nodes, Nodes},
+ {driver_nodes, [hd(Nodes)]},
+ {use_running_mnesia, true},
+ {use_sticky_locks, true},
+ {replica_type, ReplicaType}],
+ ?match({ok, _}, mnesia_tpcb:start(Args)),
+ ?verify_mnesia(Nodes, []).
+
+ram_tpcb(suite) -> [];
+ram_tpcb(Config) when is_list(Config) ->
+ tpcb(ram_copies, Config).
+
+disc_tpcb(suite) -> [];
+disc_tpcb(Config) when is_list(Config) ->
+ tpcb(disc_copies, Config).
+
+disc_only_tpcb(suite) -> [];
+disc_only_tpcb(Config) when is_list(Config) ->
+ tpcb(disc_only_copies, Config).
+
+meter(suite) ->
+ [
+ ram_meter,
+ disc_meter,
+ disc_only_meter
+ ].
+
+ram_meter(suite) -> [];
+ram_meter(Config) when is_list(Config) ->
+ HarakiriDelay = [{tc_timeout, timer:minutes(20)}],
+ Nodes = ?init(3, Config ++ HarakiriDelay),
+ ?match(ok, mnesia_meter:go(ram_copies, Nodes)).
+
+disc_meter(suite) -> [];
+disc_meter(Config) when is_list(Config) ->
+ HarakiriDelay = [{tc_timeout, timer:minutes(20)}],
+ Nodes = ?init(3, Config ++ HarakiriDelay),
+ ?match(ok, mnesia_meter:go(disc_copies, Nodes)).
+
+disc_only_meter(suite) -> [];
+disc_only_meter(Config) when is_list(Config) ->
+ HarakiriDelay = [{tc_timeout, timer:minutes(20)}],
+ Nodes = ?init(3, Config ++ HarakiriDelay),
+ ?match(ok, mnesia_meter:go(disc_only_copies, Nodes)).
+
+cost(suite) -> [];
+cost(Config) when is_list(Config) ->
+ Nodes = ?init(3, Config),
+ ?match(ok, mnesia_cost:go(Nodes)),
+ file:delete("MNESIA_COST").
diff --git a/lib/mnesia/test/mnesia_meter.erl b/lib/mnesia/test/mnesia_meter.erl
new file mode 100644
index 0000000000..68094c4431
--- /dev/null
+++ b/lib/mnesia/test/mnesia_meter.erl
@@ -0,0 +1,465 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Getting started:
+%%
+%% 1 Start one or more distributed Erlang nodes
+%% 2a Connect the nodes, e.g. with net_adm:ping/1
+%% 3a Run mnesia_meter:go()
+%% 3b Run mnesia_meter:go(ReplicaType)
+%% 3c Run mnesia_meter:go(ReplicaType, Nodes)
+
+-module(mnesia_meter).
+-author('[email protected]').
+-export([
+ go/0,
+ go/1,
+ go/2,
+ repeat_meter/2
+ ]).
+
+-record(person, {name, %% atomic, unique key
+ data, %% compound structure
+ married_to, %% name of partner or undefined
+ children}). %% list of children
+
+-record(meter, {desc, init, meter, micros}).
+
+-record(result, {desc, list}).
+
+-define(TIMES, 1000).
+
+go() ->
+ go(ram_copies).
+
+go(ReplicaType) ->
+ go(ReplicaType, [node() | nodes()]).
+
+go(ReplicaType, Nodes) ->
+ {ok, FunOverhead} = tc(fun(_) -> {atomic, ok} end, ?TIMES),
+ Size = size(term_to_binary(#person{})),
+ io:format("A fun apply costs ~p micro seconds. Record size is ~p bytes.~n",
+ [FunOverhead, Size]),
+ Res = go(ReplicaType, Nodes, [], FunOverhead, []),
+ NewRes = rearrange(Res, []),
+ DescHeader = lists:flatten(io_lib:format("~w on ~w", [ReplicaType, Nodes])),
+ ItemHeader = lists:seq(1, length(Nodes)),
+ Header = #result{desc = DescHeader, list = ItemHeader},
+ SepList = ['--------' || _ <- Nodes],
+ Separator = #result{desc = "", list = SepList},
+ display([Separator, Header, Separator | NewRes] ++ [Separator]).
+
+go(_ReplicaType, [], _Config, _FunOverhead, Acc) ->
+ Acc;
+go(ReplicaType, [H | T], OldNodes, FunOverhead, Acc) ->
+ Nodes = [H | OldNodes],
+ Config = [{ReplicaType, Nodes}],
+ Res = run(Nodes, Config, FunOverhead),
+ go(ReplicaType, T, Nodes, FunOverhead, [{ReplicaType, Nodes, Res} | Acc]).
+
+rearrange([{_ReplicaType, _Nodes, Meters} | Tail], Acc) ->
+ Acc2 = [add_meter(M, Acc) || M <- Meters],
+ rearrange(Tail, Acc2);
+rearrange([], Acc) ->
+ Acc.
+
+add_meter(M, Acc) ->
+ case lists:keysearch(M#meter.desc, #result.desc, Acc) of
+ {value, R} ->
+ R#result{list = [M#meter.micros | R#result.list]};
+ false ->
+ #result{desc = M#meter.desc, list = [M#meter.micros]}
+ end.
+
+display(Res) ->
+ MaxDesc = lists:max([length(R#result.desc) || R <- Res]),
+ Format = lists:concat(["! ~-", MaxDesc, "s"]),
+ display(Res, Format, MaxDesc).
+
+display([R | Res], Format, MaxDesc) ->
+ case R#result.desc of
+ "" ->
+ io:format(Format, [lists:duplicate(MaxDesc, "-")]);
+ Desc ->
+ io:format(Format, [Desc])
+ end,
+ display_items(R#result.list, R#result.desc),
+ io:format(" !~n", []),
+ display(Res, Format, MaxDesc);
+display([], _Format, _MaxDesc) ->
+ ok.
+
+display_items([_Item | Items], "") ->
+ io:format(" ! ~s", [lists:duplicate(10, $-)]),
+ display_items(Items, "");
+display_items([Micros | Items], Desc) ->
+ io:format(" ! ~10w", [Micros]),
+ display_items(Items, Desc);
+display_items([], _Desc) ->
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+meters() ->
+ [#meter{desc = "transaction update two records with read and write",
+ init = fun write_records/2,
+ meter = fun update_records/1},
+ #meter{desc = "transaction update two records with wread and write",
+ init = fun write_records/2,
+ meter = fun w_update_records/1},
+ #meter{desc = "transaction update two records with read and s_write",
+ init = fun s_write_records/2,
+ meter = fun s_update_records/1},
+ #meter{desc = "sync_dirty update two records with read and write",
+ init = fun sync_dirty_write_records/2,
+ meter = fun sync_dirty_update_records/1},
+ #meter{desc = "async_dirty update two records with read and write",
+ init = fun async_dirty_write_records/2,
+ meter = fun async_dirty_update_records/1},
+ #meter{desc = "plain fun update two records with dirty_read and dirty_write",
+ init = fun dirty_write_records/2,
+ meter = fun dirty_update_records/1},
+ #meter{desc = "ets update two records with read and write (local only)",
+ init = fun ets_opt_write_records/2,
+ meter = fun ets_update_records/1},
+ #meter{desc = "plain fun update two records with ets:lookup and ets:insert (local only)",
+ init = fun bif_opt_write_records/2,
+ meter = fun bif_update_records/1},
+ #meter{desc = "plain fun update two records with dets:lookup and dets:insert (local only)",
+ init = fun dets_opt_write_records/2,
+ meter = fun dets_update_records/1},
+
+ #meter{desc = "transaction write two records with write",
+ init = fun write_records/2,
+ meter = fun(X) -> write_records(X, 0-X) end},
+ #meter{desc = "transaction write two records with s_write",
+ init = fun s_write_records/2,
+ meter = fun(X) -> s_write_records(X, 0-X) end},
+ #meter{desc = "sync_dirty write two records with write",
+ init = fun sync_dirty_write_records/2,
+ meter = fun(X) -> sync_dirty_write_records(X, 0-X) end},
+ #meter{desc = "async_dirty write two records with write",
+ init = fun async_dirty_write_records/2,
+ meter = fun(X) -> async_dirty_write_records(X, 0-X) end},
+ #meter{desc = "plain fun write two records with dirty_write",
+ init = fun dirty_write_records/2,
+ meter = fun(X) -> dirty_write_records(X, 0-X) end},
+ #meter{desc = "ets write two records with write (local only)",
+ init = fun ets_opt_write_records/2,
+ meter = fun(X) -> ets_write_records(X, 0-X) end},
+ #meter{desc = "plain fun write two records with ets:insert (local only)",
+ init = fun bif_opt_write_records/2,
+ meter = fun(X) -> bif_write_records(X, 0-X) end},
+ #meter{desc = "plain fun write two records with dets:insert (local only)",
+ init = fun dets_opt_write_records/2,
+ meter = fun(X) -> dets_write_records(X, 0-X) end},
+
+ #meter{desc = "transaction read two records with read",
+ init = fun write_records/2,
+ meter = fun(X) -> read_records(X, 0-X) end},
+ #meter{desc = "sync_dirty read two records with read",
+ init = fun sync_dirty_write_records/2,
+ meter = fun(X) -> sync_dirty_read_records(X, 0-X) end},
+ #meter{desc = "async_dirty read two records with read",
+ init = fun async_dirty_write_records/2,
+ meter = fun(X) -> async_dirty_read_records(X, 0-X) end},
+ #meter{desc = "plain fun read two records with dirty_read",
+ init = fun dirty_write_records/2,
+ meter = fun(X) -> dirty_read_records(X, 0-X) end},
+ #meter{desc = "ets read two records with read",
+ init = fun ets_opt_write_records/2,
+ meter = fun(X) -> ets_read_records(X, 0-X) end},
+ #meter{desc = "plain fun read two records with ets:lookup",
+ init = fun bif_opt_write_records/2,
+ meter = fun(X) -> bif_read_records(X, 0-X) end},
+ #meter{desc = "plain fun read two records with dets:lookup",
+ init = fun dets_opt_write_records/2,
+ meter = fun(X) -> dets_read_records(X, 0-X) end}
+ ].
+
+update_fun(Name) ->
+ fun() ->
+ case mnesia:read({person, Name}) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = mnesia:read({person, Pers#person.married_to}),
+ mnesia:write(Pers#person{married_to = undefined}),
+ mnesia:write(Partner#person{married_to = undefined})
+ end
+ end.
+
+update_records(Name) ->
+ mnesia:transaction(update_fun(Name)).
+
+sync_dirty_update_records(Name) ->
+ {atomic, mnesia:sync_dirty(update_fun(Name))}.
+
+async_dirty_update_records(Name) ->
+ {atomic, mnesia:async_dirty(update_fun(Name))}.
+
+ets_update_records(Name) ->
+ {atomic, mnesia:ets(update_fun(Name))}.
+
+w_update_records(Name) ->
+ F = fun() ->
+ case mnesia:wread({person, Name}) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = mnesia:wread({person, Pers#person.married_to}),
+ mnesia:write(Pers#person{married_to = undefined}),
+ mnesia:write(Partner#person{married_to = undefined})
+ end
+ end,
+ mnesia:transaction(F).
+
+s_update_records(Name) ->
+ F = fun() ->
+ case mnesia:read({person, Name}) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = mnesia:read({person, Pers#person.married_to}),
+ mnesia:s_write(Pers#person{married_to = undefined}),
+ mnesia:s_write(Partner#person{married_to = undefined})
+ end
+ end,
+ mnesia:transaction(F).
+
+dirty_update_records(Name) ->
+ case mnesia:dirty_read({person, Name}) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = mnesia:dirty_read({person, Pers#person.married_to}),
+ mnesia:dirty_write(Pers#person{married_to = undefined}),
+ mnesia:dirty_write(Partner#person{married_to = undefined})
+ end,
+ {atomic, ok}.
+
+bif_update_records(Name) ->
+ case ets:lookup(person, Name) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = ets:lookup(person, Pers#person.married_to),
+ ets:insert(person, Pers#person{married_to = undefined}),
+ ets:insert(person, Partner#person{married_to = undefined})
+ end,
+ {atomic, ok}.
+
+dets_update_records(Name) ->
+ case dets:lookup(person, Name) of
+ [] ->
+ mnesia:abort(no_such_person);
+ [Pers] ->
+ [Partner] = dets:lookup(person, Pers#person.married_to),
+ dets:insert(person, Pers#person{married_to = undefined}),
+ dets:insert(person, Partner#person{married_to = undefined})
+ end,
+ {atomic, ok}.
+
+write_records_fun(Pers, Partner) ->
+ fun() ->
+ P = #person{children = [ulla, bella]},
+ mnesia:write(P#person{name = Pers, married_to = Partner}),
+ mnesia:write(P#person{name = Partner, married_to = Pers})
+ end.
+
+write_records(Pers, Partner) ->
+ mnesia:transaction(write_records_fun(Pers, Partner)).
+
+sync_dirty_write_records(Pers, Partner) ->
+ {atomic, mnesia:sync_dirty(write_records_fun(Pers, Partner))}.
+
+async_dirty_write_records(Pers, Partner) ->
+ {atomic, mnesia:async_dirty(write_records_fun(Pers, Partner))}.
+
+ets_write_records(Pers, Partner) ->
+ {atomic, mnesia:ets(write_records_fun(Pers, Partner))}.
+
+s_write_records(Pers, Partner) ->
+ F = fun() ->
+ P = #person{children = [ulla, bella]},
+ mnesia:s_write(P#person{name = Pers, married_to = Partner}),
+ mnesia:s_write(P#person{name = Partner, married_to = Pers})
+ end,
+ mnesia:transaction(F).
+
+dirty_write_records(Pers, Partner) ->
+ P = #person{children = [ulla, bella]},
+ mnesia:dirty_write(P#person{name = Pers, married_to = Partner}),
+ mnesia:dirty_write(P#person{name = Partner, married_to = Pers}),
+ {atomic, ok}.
+
+ets_opt_write_records(Pers, Partner) ->
+ case mnesia:table_info(person, where_to_commit) of
+ [{N, ram_copies}] when N == node() ->
+ ets_write_records(Pers, Partner);
+ _ ->
+ throw(skipped)
+ end.
+
+bif_opt_write_records(Pers, Partner) ->
+ case mnesia:table_info(person, where_to_commit) of
+ [{N, ram_copies}] when N == node() ->
+ bif_write_records(Pers, Partner);
+ _ ->
+ throw(skipped)
+ end.
+
+bif_write_records(Pers, Partner) ->
+ P = #person{children = [ulla, bella]},
+ ets:insert(person, P#person{name = Pers, married_to = Partner}),
+ ets:insert(person, P#person{name = Partner, married_to = Pers}),
+ {atomic, ok}.
+
+dets_opt_write_records(Pers, Partner) ->
+ case mnesia:table_info(person, where_to_commit) of
+ [{N, disc_only_copies}] when N == node() ->
+ dets_write_records(Pers, Partner);
+ _ ->
+ throw(skipped)
+ end.
+
+dets_write_records(Pers, Partner) ->
+ P = #person{children = [ulla, bella]},
+ dets:insert(person, P#person{name = Pers, married_to = Partner}),
+ dets:insert(person, P#person{name = Partner, married_to = Pers}),
+ {atomic, ok}.
+
+read_records_fun(Pers, Partner) ->
+ fun() ->
+ case {mnesia:read({person, Pers}),
+ mnesia:read({person, Partner})} of
+ {[_], [_]} ->
+ ok;
+ _ ->
+ mnesia:abort(no_such_person)
+ end
+ end.
+
+read_records(Pers, Partner) ->
+ mnesia:transaction(read_records_fun(Pers, Partner)).
+
+sync_dirty_read_records(Pers, Partner) ->
+ {atomic, mnesia:sync_dirty(read_records_fun(Pers, Partner))}.
+
+async_dirty_read_records(Pers, Partner) ->
+ {atomic, mnesia:async_dirty(read_records_fun(Pers, Partner))}.
+
+ets_read_records(Pers, Partner) ->
+ {atomic, mnesia:ets(read_records_fun(Pers, Partner))}.
+
+dirty_read_records(Pers, Partner) ->
+ case {mnesia:dirty_read({person, Pers}),
+ mnesia:dirty_read({person, Partner})} of
+ {[_], [_]} ->
+ {atomic, ok};
+ _ ->
+ mnesia:abort(no_such_person)
+ end.
+
+bif_read_records(Pers, Partner) ->
+ case {ets:lookup(person, Pers),
+ ets:lookup(person, Partner)} of
+ {[_], [_]} ->
+ {atomic, ok};
+ _ ->
+ mnesia:abort(no_such_person)
+ end.
+
+dets_read_records(Pers, Partner) ->
+ case {dets:lookup(person, Pers),
+ dets:lookup(person, Partner)} of
+ {[_], [_]} ->
+ {atomic, ok};
+ _ ->
+ mnesia:abort(no_such_person)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+run(Nodes, Config, FunOverhead) ->
+ Meters = meters(),
+ io:format("Run ~w meters with table config: ~w~n", [length(Meters), Config]),
+ rpc:multicall(Nodes, mnesia, lkill, []),
+ start(Nodes, Config),
+ Res = [run_meter(Data, Nodes, FunOverhead) || Data <- Meters],
+ stop(Nodes),
+ Res.
+
+run_meter(M, Nodes, FunOverhead) when is_record(M, meter) ->
+ io:format(".", []),
+ case catch init_records(M#meter.init, ?TIMES) of
+ {atomic, ok} ->
+ rpc:multicall(Nodes, mnesia, dump_log, []),
+ case tc(M#meter.meter, ?TIMES) of
+ {ok, Micros} ->
+ M#meter{micros = lists:max([0, Micros - FunOverhead])};
+ {error, Reason} ->
+ M#meter{micros = Reason}
+ end;
+ Res ->
+ M#meter{micros = Res}
+ end.
+
+start(Nodes, Config) ->
+ mnesia:delete_schema(Nodes),
+ ok = mnesia:create_schema(Nodes),
+ Args = [[{dump_log_write_threshold, ?TIMES div 2},
+ {dump_log_time_threshold, timer:hours(10)}]],
+ lists:foreach(fun(Node) -> rpc:call(Node, mnesia, start, Args) end, Nodes),
+ Attrs = record_info(fields, person),
+ TabDef = [{attributes, Attrs} | Config],
+ {atomic, _} = mnesia:create_table(person, TabDef).
+
+stop(Nodes) ->
+ rpc:multicall(Nodes, mnesia, stop, []).
+
+%% Generate some dummy persons
+init_records(_Fun, 0) ->
+ {atomic, ok};
+init_records(Fun, Times) ->
+ {atomic, ok} = Fun(Times, 0 - Times),
+ init_records(Fun, Times - 1).
+
+tc(Fun, Times) ->
+ case catch timer:tc(?MODULE, repeat_meter, [Fun, Times]) of
+ {Micros, ok} ->
+ {ok, Micros div Times};
+ {_Micros, {error, Reason}} ->
+ {error, Reason};
+ {'EXIT', Reason} ->
+ {error, Reason}
+ end.
+
+%% The meter must return {atomic, ok}
+repeat_meter(Meter, Times) ->
+ repeat_meter(Meter, {atomic, ok}, Times).
+
+repeat_meter(_, {atomic, ok}, 0) ->
+ ok;
+repeat_meter(Meter, {atomic, _Result}, Times) when Times > 0 ->
+ repeat_meter(Meter, Meter(Times), Times - 1);
+repeat_meter(_Meter, Reason, _Times) ->
+ {error, Reason}.
+
diff --git a/lib/mnesia/test/mnesia_nice_coverage_test.erl b/lib/mnesia/test/mnesia_nice_coverage_test.erl
new file mode 100644
index 0000000000..aa9339f6b9
--- /dev/null
+++ b/lib/mnesia/test/mnesia_nice_coverage_test.erl
@@ -0,0 +1,227 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_nice_coverage_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+-record(nice_tab, {key, val}).
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Test nice usage of the entire API",
+ "Invoke all functions in the API, at least once.",
+ "Try to verify that all functions exists and that they perform",
+ "reasonable things when used in the most simple way."];
+all(suite) -> [nice].
+
+nice(doc) -> [""];
+nice(suite) -> [];
+nice(Config) when is_list(Config) ->
+ %% The whole test suite is one huge test case for the time beeing
+
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Attrs = record_info(fields, nice_tab),
+
+ initialize(Attrs, Node1),
+ dirty_access(Node1),
+ success_and_fail(),
+ index_mgt(),
+
+ adm(Attrs, Node1, Node2),
+ snmp(Node1, Node2),
+ backup(Node1),
+ ?verify_mnesia(Nodes, []).
+
+initialize(Attrs, Node1) ->
+ ?match(Version when is_list(Version), mnesia:system_info(version)),
+
+ Schema = [{name, nice_tab},
+ {attributes, Attrs}, {ram_copies, [Node1]}],
+
+ ?match({_, _}, mnesia:system_info(schema_version)),
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ ?match(ok, mnesia:info()),
+ ?match(set, mnesia:table_info(nice_tab, type)),
+ ?match(ok, mnesia:schema()),
+ ?match(ok, mnesia:schema(nice_tab)),
+ ok.
+
+dirty_access(Node1) ->
+ TwoThree = #nice_tab{key=23, val=23},
+ TwoFive = #nice_tab{key=25, val=25},
+ ?match([], mnesia:dirty_slot(nice_tab, 0)),
+ ?match(ok, mnesia:dirty_write(TwoThree)),
+ ?match([TwoThree], mnesia:dirty_read({nice_tab, 23})),
+ ?match(ok, mnesia:dirty_write(TwoFive)),
+ ?match(ok, mnesia:dirty_delete_object(TwoFive)),
+
+ ?match(23, mnesia:dirty_first(nice_tab)),
+ ?match('$end_of_table', mnesia:dirty_next(nice_tab, 23)),
+ ?match([TwoThree], mnesia:dirty_match_object(TwoThree)),
+ ?match(ok, mnesia:dirty_delete({nice_tab, 23})),
+
+ CounterSchema = [{ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(nice_counter_tab, CounterSchema)),
+ TwoFour = {nice_counter_tab, 24, 24},
+ ?match(ok, mnesia:dirty_write(TwoFour)),
+ ?match(34, mnesia:dirty_update_counter({nice_counter_tab, 24}, 10)),
+ TF = {nice_counter_tab, 24, 34},
+ ?match([TF], mnesia:dirty_read({nice_counter_tab, 24})),
+ ?match(ok, mnesia:dirty_delete({nice_counter_tab, 24})),
+ ?match(ok, mnesia:dirty_delete_object(TF)),
+ ok.
+
+success_and_fail() ->
+ ?match({atomic, a_good_trans}, mnesia:transaction(fun() ->good_trans()end)),
+
+ BadFun =
+ fun() ->
+ Two = #nice_tab{key=2, val=12},
+ ?match([Two], mnesia:match_object(#nice_tab{key='$1', val=12})),
+ ?match([#nice_tab{key=3, val=13}], mnesia:wread({nice_tab, 3})),
+ ?match(ok, mnesia:delete({nice_tab, 1})),
+ ?match(ok, mnesia:delete_object(Two)),
+ mnesia:abort(bad_trans),
+ ?match(bad, trans)
+ end,
+ ?match({aborted, bad_trans}, mnesia:transaction(BadFun)),
+ ?match(L when is_list(L), mnesia:error_description(no_exists)),
+ ?match({atomic, ok}, mnesia:transaction(fun(A) -> lock(), A end, [ok])),
+ ?match({atomic, ok}, mnesia:transaction(fun(A) -> lock(), A end, [ok], 3)),
+ ok.
+
+good_trans() ->
+ ?match([], mnesia:read(nice_tab, 3)),
+ ?match([], mnesia:read({nice_tab, 3})),
+ ?match(ok, mnesia:write(#nice_tab{key=14, val=4})),
+ ?match([14], mnesia:all_keys(nice_tab)),
+
+ Records = [ #nice_tab{key=K, val=K+10} || K <- lists:seq(1, 10) ],
+ Ok = [ ok || _ <- Records],
+ ?match(Ok, lists:map(fun(R) -> mnesia:write(R) end, Records)),
+ a_good_trans.
+
+
+lock() ->
+ ?match(ok, mnesia:s_write(#nice_tab{key=22, val=22})),
+ ?match(ok, mnesia:read_lock_table(nice_tab)),
+ ?match(ok, mnesia:write_lock_table(nice_tab)),
+ ok.
+
+index_mgt() ->
+ UniversalRec = #nice_tab{key=4711, val=4711},
+ ?match(ok, mnesia:dirty_write(UniversalRec)),
+ ValPos = #nice_tab.val,
+ ?match({atomic, ok}, mnesia:add_table_index(nice_tab, ValPos)),
+
+ IndexFun =
+ fun() ->
+ ?match([UniversalRec],
+ mnesia:index_read(nice_tab, 4711, ValPos)),
+ Pat = #nice_tab{key='$1', val=4711},
+ ?match([UniversalRec],
+ mnesia:index_match_object(Pat, ValPos)),
+ index_trans
+ end,
+ ?match({atomic, index_trans}, mnesia:transaction(IndexFun, infinity)),
+ ?match([UniversalRec],
+ mnesia:dirty_index_read(nice_tab, 4711, ValPos)),
+ ?match([UniversalRec],
+ mnesia:dirty_index_match_object(#nice_tab{key='$1', val=4711}, ValPos)),
+
+ ?match({atomic, ok}, mnesia:del_table_index(nice_tab, ValPos)),
+ ok.
+
+adm(Attrs, Node1, Node2) ->
+ This = node(),
+ ?match({ok, This}, mnesia:subscribe(system)),
+ ?match({atomic, ok},
+ mnesia:add_table_copy(nice_tab, Node2, disc_only_copies)),
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(nice_tab, Node2, ram_copies)),
+ ?match({atomic, ok}, mnesia:del_table_copy(nice_tab, Node1)),
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ ?match([], mnesia_test_lib:start_mnesia([Node1, Node2], [nice_tab])),
+ ?match(ok, mnesia:wait_for_tables([schema], infinity)),
+
+ Transformer = fun(Rec) ->
+ list_to_tuple(tuple_to_list(Rec) ++ [initial_value])
+ end,
+ ?match({atomic, ok},
+ mnesia:transform_table(nice_tab, Transformer, Attrs ++ [extra])),
+
+ ?match({atomic, ok}, mnesia:delete_table(nice_tab)),
+ DumpSchema = [{name, nice_tab}, {attributes, Attrs}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(DumpSchema)),
+ ?match({atomic, ok}, mnesia:dump_tables([nice_tab])),
+ ?match({atomic, ok}, mnesia:move_table_copy(nice_tab, Node2, Node1)),
+
+ ?match(yes, mnesia:force_load_table(nice_counter_tab)),
+ ?match(dumped, mnesia:dump_log()),
+ ok.
+
+backup(Node1) ->
+ Tab = backup_nice,
+ Def = [{disc_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match({ok,_,_}, mnesia:activate_checkpoint([{name, cp}, {max, [Tab]}])),
+ File = "nice_backup.BUP",
+ File2 = "nice_backup2.BUP",
+ File3 = "nice_backup3.BUP",
+ ?match(ok, mnesia:backup_checkpoint(cp, File)),
+ ?match(ok, mnesia:backup_checkpoint(cp, File, mnesia_backup)),
+ ?match(ok, mnesia:deactivate_checkpoint(cp)),
+ ?match(ok, mnesia:backup(File)),
+ ?match(ok, mnesia:backup(File, mnesia_backup)),
+
+ Fun = fun(X, Acc) -> {[X], Acc} end,
+ ?match({ok, 0}, mnesia:traverse_backup(File, File2, Fun, 0)),
+ ?match({ok, 0}, mnesia:traverse_backup(File, mnesia_backup, dummy, read_only, Fun, 0)),
+ ?match(ok, mnesia:install_fallback(File)),
+ ?match(ok, mnesia:uninstall_fallback()),
+ ?match(ok, mnesia:install_fallback(File, mnesia_backup)),
+ ?match(ok, mnesia:dump_to_textfile(File3)),
+ ?match({atomic, ok}, mnesia:load_textfile(File3)),
+ ?match(ok, file:delete(File)),
+ ?match(ok, file:delete(File2)),
+ ?match(ok, file:delete(File3)),
+ ok.
+
+snmp(Node1, Node2) ->
+ Tab = nice_snmp,
+ Def = [{disc_copies, [Node1]}, {ram_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match({aborted, {badarg, Tab, _}}, mnesia:snmp_open_table(Tab, [])),
+ ?match({atomic, ok}, mnesia:snmp_open_table(Tab, [{key, integer}])),
+ ?match(endOfTable, mnesia:snmp_get_next_index(Tab, [0])),
+ ?match(undefined, mnesia:snmp_get_row(Tab, [0])),
+ ?match(undefined, mnesia:snmp_get_mnesia_key(Tab, [0])),
+ ?match({atomic, ok}, mnesia:snmp_close_table(Tab)),
+ ok.
+
diff --git a/lib/mnesia/test/mnesia_qlc_test.erl b/lib/mnesia/test/mnesia_qlc_test.erl
new file mode 100644
index 0000000000..1e4f776c7d
--- /dev/null
+++ b/lib/mnesia/test/mnesia_qlc_test.erl
@@ -0,0 +1,475 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_qlc_test).
+
+-compile(export_all).
+
+-export([all/1]).
+
+-include("mnesia_test_lib.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+
+init_per_testcase(Func, Conf) ->
+ setup(Conf),
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+all(doc) ->
+ ["Test that the qlc mnesia interface works as expected."];
+all(suite) ->
+ case code:which(qlc) of
+ non_existing -> [];
+ _ ->
+ all_qlc()
+ end.
+
+all_qlc() ->
+ [dirty, trans, frag, info, mnesia_down].
+
+init_testcases(Type,Config) ->
+ Nodes = [N1,N2] = ?acquire_nodes(2, Config),
+ ?match({atomic, ok}, mnesia:create_table(a, [{Type,[N1]}, {index,[3]}])),
+ ?match({atomic, ok}, mnesia:create_table(b, [{Type,[N2]}])),
+ Write = fun(Id) ->
+ ok = mnesia:write({a, {a,Id}, 100 - Id}),
+ ok = mnesia:write({b, {b,100-Id}, Id})
+ end,
+ All = fun() -> [Write(Id) || Id <- lists:seq(1,10)], ok end,
+ ?match({atomic, ok}, mnesia:sync_transaction(All)),
+ Nodes.
+
+%% Test cases
+dirty(suite) ->
+ [dirty_nice_ram_copies,
+ dirty_nice_disc_copies,
+ dirty_nice_disc_only_copies].
+
+dirty_nice_ram_copies(Setup) -> dirty_nice(Setup,ram_copies).
+dirty_nice_disc_copies(Setup) -> dirty_nice(Setup,disc_copies).
+dirty_nice_disc_only_copies(Setup) -> dirty_nice(Setup,disc_only_copies).
+
+dirty_nice(suite, _) -> [];
+dirty_nice(doc, _) -> [];
+dirty_nice(Config, Type) when is_list(Config) ->
+ Ns = init_testcases(Type,Config),
+ QA = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(a),"
+ " Val == 90 + Key]">>),
+ QB = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(b),"
+ " Key == 90 + Val]">>),
+ QC = qlc:sort(mnesia:table(a, [{n_objects,1}, {lock,write}, {traverse, select}])),
+ QD = qlc:sort(mnesia:table(a, [{n_objects,1}, {traverse,{select,[{'$1',[],['$1']}]}}])),
+
+ FA = fun() -> qlc:e(QA) end,
+ FB = fun() -> qlc:e(QB) end,
+ FC = fun() -> qlc:e(QC) end,
+ FD = fun() -> qlc:e(QD) end,
+
+ %% Currently unsupported
+ ?match({'EXIT',{aborted,no_transaction}}, FA()),
+ ?match({'EXIT',{aborted,no_transaction}}, FB()),
+ %%
+ CRes = lists:sort(mnesia:dirty_match_object(a, {'_','_','_'})),
+ ?match([{a,{a,5},95}], mnesia:async_dirty(FA)),
+ ?match([{b,{b,95},5}], mnesia:async_dirty(FB)),
+ ?match(CRes, mnesia:async_dirty(FC)),
+ ?match(CRes, mnesia:async_dirty(FD)),
+ ?match([{a,{a,5},95}], mnesia:sync_dirty(FA)),
+ ?match([{b,{b,95},5}], mnesia:sync_dirty(FB)),
+ ?match(CRes, mnesia:sync_dirty(FC)),
+ ?match([{a,{a,5},95}], mnesia:activity(async_dirty, FA)),
+ ?match([{b,{b,95},5}], mnesia:activity(async_dirty, FB)),
+ ?match([{a,{a,5},95}], mnesia:activity(sync_dirty, FA)),
+ ?match([{b,{b,95},5}], mnesia:activity(sync_dirty, FB)),
+ ?match(CRes, mnesia:activity(async_dirty,FC)),
+ case Type of
+ disc_only_copies -> skip;
+ _ ->
+ ?match([{a,{a,5},95}], mnesia:ets(FA)),
+ ?match([{a,{a,5},95}], mnesia:activity(ets, FA))
+ end,
+ ?verify_mnesia(Ns, []).
+
+trans(suite) ->
+ [trans_nice_ram_copies,
+ trans_nice_disc_copies,
+ trans_nice_disc_only_copies,
+ atomic
+ ].
+
+trans_nice_ram_copies(Setup) -> trans_nice(Setup,ram_copies).
+trans_nice_disc_copies(Setup) -> trans_nice(Setup,disc_copies).
+trans_nice_disc_only_copies(Setup) -> trans_nice(Setup,disc_only_copies).
+
+trans_nice(suite, _) -> [];
+trans_nice(doc, _) -> [];
+trans_nice(Config, Type) when is_list(Config) ->
+ Ns = init_testcases(Type,Config),
+ QA = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(a),"
+ " Val == 90 + Key]">>),
+ QB = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(b),"
+ " Key == 90 + Val]">>),
+ QC = handle(recs(),
+ <<"[Q || Q = #a{v=91} <- mnesia:table(a)]"
+ >>),
+
+ QD = qlc:sort(mnesia:table(a, [{n_objects,1}, {lock,write}, {traverse, select}])),
+ QE = qlc:sort(mnesia:table(a, [{n_objects,1}, {traverse,{select,[{'$1',[],['$1']}]}}])),
+
+ DRes = lists:sort(mnesia:dirty_match_object(a, {'_','_','_'})),
+
+ FA = fun() -> qlc:e(QA) end,
+ FB = fun() -> qlc:e(QB) end,
+ FC = fun() -> qlc:e(QC) end,
+ FD = fun() -> qlc:e(QD) end,
+ FE = fun() -> qlc:e(QE) end,
+
+ ?match({atomic,[{a,{a,5},95}]}, mnesia:transaction(FA)),
+ ?match({atomic,[{b,{b,95},5}]}, mnesia:transaction(FB)),
+ ?match({atomic,[{a,{a,9},91}]}, mnesia:transaction(FC)),
+ ?match({atomic,[{a,{a,5},95}]}, mnesia:sync_transaction(FA)),
+ ?match({atomic,[{b,{b,95},5}]}, mnesia:sync_transaction(FB)),
+ ?match({atomic,[{a,{a,9},91}]}, mnesia:sync_transaction(FC)),
+ ?match([{a,{a,5},95}], mnesia:activity(transaction,FA)),
+ ?match([{b,{b,95},5}], mnesia:activity(transaction,FB)),
+ ?match([{a,{a,9},91}], mnesia:activity(transaction,FC)),
+ ?match([{a,{a,5},95}], mnesia:activity(sync_transaction,FA)),
+ ?match([{b,{b,95},5}], mnesia:activity(sync_transaction,FB)),
+ ?match([{a,{a,9},91}], mnesia:activity(sync_transaction,FC)),
+
+ ?match({atomic, DRes}, mnesia:transaction(FD)),
+ ?match({atomic, DRes}, mnesia:transaction(FE)),
+
+ Rest = fun(Cursor,Loop) ->
+ case qlc:next_answers(Cursor, 1) of
+ [] -> [];
+ [A]-> [A|Loop(Cursor,Loop)]
+ end
+ end,
+ Loop = fun() ->
+ Cursor = qlc:cursor(QD),
+ Rest(Cursor,Rest)
+ end,
+ ?match({atomic, DRes}, mnesia:transaction(Loop)),
+
+ ?verify_mnesia(Ns, []).
+
+%% -record(a, {k,v}).
+%% -record(b, {k,v}).
+%% -record(k, {t,v}).
+
+recs() ->
+ <<"-record(a, {k,v}). "
+ "-record(b, {k,v}). "
+ "-record(k, {t,v}). "
+ >>.
+
+atomic(suite) -> [atomic_eval];
+atomic(doc) -> [].
+
+atomic_eval(suite) -> [];
+atomic_eval(doc) -> [];
+atomic_eval(Config) ->
+ Ns = init_testcases(ram_copies, Config),
+ Q1 = handle(recs(),
+ <<"[Q || Q = #a{k={_,9}} <- mnesia:table(a)]"
+ >>),
+ Eval = fun(Q) ->
+ {qlc:e(Q),
+ mnesia:system_info(held_locks)}
+ end,
+ Self = self(),
+ ?match({[{a,{a,9},91}], [{{a,'______WHOLETABLE_____'},read,{tid,_,Self}}]},
+ ok(Eval,[Q1])),
+
+ Q2 = handle(recs(),
+ <<"[Q || Q = #a{k={a,9}} <- mnesia:table(a)]"
+ >>),
+
+ ?match({[{a,{a,9},91}],[{{a,{a,9}},read,{tid,_,Self}}]},
+ ok(Eval,[Q2])),
+
+ Flush = fun(Loop) -> %% Clean queue
+ receive _ -> Loop(Loop)
+ after 0 -> ok end
+ end,
+
+ Flush(Flush),
+
+ GrabLock = fun(Father) ->
+ mnesia:read(a, {a,9}, write),
+ Father ! locked,
+ receive cont -> ok end end,
+
+ Pid1 = spawn(fun() -> ?match(ok, ok(GrabLock, [Self])) end),
+ ?match(locked,receive locked -> locked after 5000 -> timeout end), %% Wait
+
+ put(count,0),
+ Restart = fun(Locker,Fun) ->
+ Count = get(count),
+ case {Count,(catch Fun())} of
+ {0, {'EXIT', R}} ->
+ Locker ! cont,
+ put(count, Count+1),
+ erlang:yield(),
+ exit(R);
+ Else ->
+ Else
+ end
+ end,
+
+ ?match({1,{[{a,{a,9},91}], [{{a,'______WHOLETABLE_____'},read,{tid,_,Self}}]}},
+ ok(Restart,[Pid1,fun() -> Eval(Q1) end])),
+
+ Pid2 = spawn(fun() -> ?match(ok, ok(GrabLock, [Self])) end),
+ ?match(locked,receive locked -> locked after 5000 -> timeout end), %% Wait
+ put(count,0),
+ ?match({1,{[{a,{a,9},91}],[{{a,{a,9}},read,{tid,_,Self}}]}},
+ ok(Restart,[Pid2, fun() -> Eval(Q2) end])),
+
+%% Basic test
+ Cursor = fun() ->
+ QC = qlc:cursor(Q1),
+ qlc:next_answers(QC)
+ end,
+
+ ?match([{a,{a,9},91}], ok(Cursor, [])),
+ %% Lock
+
+ Pid3 = spawn(fun() -> ?match(ok, ok(GrabLock, [Self])) end),
+ ?match(locked,receive locked -> locked after 5000 -> timeout end), %% Wait
+ put(count,0),
+
+ ?match({1,[{a,{a,9},91}]}, ok(Restart,[Pid3, Cursor])),
+ QC1 = ok(fun() -> qlc:cursor(Q1) end, []),
+ ?match({'EXIT', _}, qlc:next_answers(QC1)),
+ ?match({aborted,_}, ok(fun()->qlc:next_answers(QC1)end,[])),
+ ?verify_mnesia(Ns, []).
+
+
+frag(suite) -> [];
+frag(doc) -> [];
+frag(Config) ->
+ Ns = init_testcases(ram_copies,Config),
+ QA = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(a),"
+ " Val == 90 + Key]">>),
+ QB = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(b),"
+ " Key == 90 + Val]">>),
+
+ Activate =
+ fun(Tab) ->
+ ?match({atomic,ok},mnesia:change_table_frag(Tab, {activate, []})),
+ Dist = mnesia_frag_test:frag_dist(Tab),
+ ?match({atomic,ok},mnesia:change_table_frag(Tab,{add_frag,Dist}))
+ end,
+ Activate(a),
+ Activate(b),
+
+ Fun = fun(Tab) -> mnesia:table_info(Tab, frag_names) end,
+ FTs = mnesia:activity(sync_dirty, Fun, [a], mnesia_frag) ++
+ mnesia:activity(sync_dirty, Fun, [b], mnesia_frag),
+ Size = fun(Tab) -> mnesia:dirty_rpc(Tab, mnesia, table_info, [Tab,size]) end,
+
+ %% Verify that all data doesn't belong to the same frag.
+ ?match([], [{Tab,Size(Tab)} || Tab <- FTs,
+ Size(Tab) =< 0]),
+
+ FA = fun() -> qlc:e(QA) end,
+ FB = fun() -> qlc:e(QB) end,
+ ?match([{a,{a,5},95}], mnesia:activity(transaction,FA,[],mnesia_frag)),
+ ?match([{b,{b,95},5}], mnesia:activity(transaction,FB,[],mnesia_frag)),
+
+ ?verify_mnesia(Ns, []).
+
+info(suite) -> [];
+info(doc) -> [];
+info(Config) ->
+ Ns = init_testcases(ram_copies, Config),
+ Q1 = handle(recs(),
+ <<"[Q || Q = #a{k={_,9}} <- mnesia:table(a)]"
+ >>),
+
+ Q2 = handle(recs(),
+ <<"[Q || Q = #a{k={a,9}} <- mnesia:table(a)]"
+ >>),
+
+ Q3 = handle(recs(),
+ <<"[Q || Q = #a{v=91} <- mnesia:table(a)]"
+ >>),
+
+ %% FIXME compile and check results!
+
+ ?match(ok,io:format("~s~n",[qlc:info(Q1)])),
+ ?match(ok,io:format("~s~n",[qlc:info(Q2)])),
+ ?match(ok,io:format("~s~n",[qlc:info(Q3)])),
+
+ ?verify_mnesia(Ns, []).
+
+ok(Fun,A) ->
+ case mnesia:transaction(Fun,A) of
+ {atomic, R} -> R;
+ E -> E
+ end.
+
+
+mnesia_down(suite) -> [];
+mnesia_down(doc) ->
+ ["Test bug OTP-7968, which crashed mnesia when a"
+ "mnesia_down came after qlc had been invoked"];
+mnesia_down(Config) when is_list(Config) ->
+ [N1,N2] = init_testcases(ram_copies,Config),
+ QB = handle(<<"[Q || Q = {_,{_,Key},Val} <- mnesia:table(b),"
+ " Val == Key - 90]">>),
+
+ Tester = self(),
+
+ Eval = fun() ->
+ Cursor = qlc:cursor(QB), %% Forces another process
+ Res = qlc:next_answers(Cursor),
+ Tester ! {qlc, self(), Res},
+ {Mod, Tid, Ts} = get(mnesia_activity_state),
+ receive
+ continue ->
+ io:format("Continuing ~p ~p ~n",[self(), {Mod, Tid, Ts}]),
+ io:format("ETS ~p~n",[ets:tab2list(element(2,Ts))]),
+ io:format("~p~n",[process_info(self(),messages)]),
+ Res
+ end
+ end,
+ spawn(fun() -> TransRes = mnesia:transaction(Eval), Tester ! {test,TransRes} end),
+
+ TMInfo = fun() ->
+ TmInfo = mnesia_tm:get_info(5000),
+ mnesia_tm:display_info(user, TmInfo)
+ end,
+ receive
+ {qlc, QPid, QRes} ->
+ ?match([{b,{b,95},5}], QRes),
+ TMInfo(),
+ mnesia_test_lib:kill_mnesia([N2]),
+ %%timer:sleep(1000),
+ QPid ! continue
+ after 2000 ->
+ exit(timeout1)
+ end,
+
+ receive
+ {test, QRes2} ->
+ ?match({atomic, [{b,{b,95},5}]}, QRes2)
+ after 2000 ->
+ exit(timeout2)
+ end,
+
+ ?verify_mnesia([N1], [N2]).
+
+
+nested_qlc(suite) -> [];
+nested_qlc(doc) ->
+ ["Test bug in OTP-7968 (the second problem) where nested"
+ "transaction don't work as expected"];
+nested_qlc(Config) when is_list(Config) ->
+ Ns = init_testcases(ram_copies,Config),
+ Res = as_with_bs(),
+ ?match([_|_], Res),
+ top_as_with_some_bs(10),
+
+ ?verify_mnesia(Ns, []).
+
+
+%% Code from Daniel
+bs_by_a_id(A_id) ->
+ find(qlc:q([ B || B={_,_,F_id} <- mnesia:table(b), F_id == A_id])).
+
+as_with_bs() ->
+ find(qlc:q([ {A,bs_by_a_id(Id)} ||
+ A = {_, {a,Id}, _} <- mnesia:table(a)])).
+
+top_as_with_some_bs(Limit) ->
+ top(
+ qlc:q([ {A,bs_by_a_id(Id)} ||
+ A = {_, {a,Id}, _} <- mnesia:table(a)]),
+ Limit,
+ fun(A1,A2) -> A1 < A2 end
+ ).
+
+% --- utils
+
+find(Q) ->
+ F = fun() -> qlc:e(Q) end,
+ {atomic, Res} = mnesia:transaction(F),
+ Res.
+
+% --- it returns top Limit results from query Q ordered by Order sort function
+top(Q, Limit, Order) ->
+ Do = fun() ->
+ OQ = qlc:sort(Q, [{order,Order}]),
+ QC = qlc:cursor(OQ),
+ Res = qlc:next_answers(QC, Limit),
+ qlc:delete_cursor(QC),
+ Res
+ end,
+ {atomic, Res} = mnesia:transaction(Do),
+ Res.
+
+%% To keep mnesia suite backward compatible,
+%% we compile the queries in runtime when qlc is available
+%% Compiles and returns a handle to a qlc
+handle(Expr) ->
+ handle(<<>>,Expr).
+handle(Records,Expr) ->
+ case catch handle2(Records,Expr) of
+ {ok, Handle} ->
+ Handle;
+ Else ->
+ ?match(ok, Else)
+ end.
+
+handle2(Records,Expr) ->
+ {FN,Mod} = temp_name(),
+ ModStr = list_to_binary("-module(" ++ atom_to_list(Mod) ++ ").\n"),
+ Prog = <<
+ ModStr/binary,
+ "-include_lib(\"stdlib/include/qlc.hrl\").\n",
+ "-export([tmp/0]).\n",
+ Records/binary,"\n",
+ "tmp() ->\n",
+%% " _ = (catch throw(fvalue_not_reset)),"
+ " qlc:q( ",
+ Expr/binary,").\n">>,
+
+ ?match(ok,file:write_file(FN,Prog)),
+ {ok,Forms} = epp:parse_file(FN,"",""),
+ {ok,Mod,Bin} = compile:forms(Forms),
+ code:load_binary(Mod,FN,Bin),
+ {ok, Mod:tmp()}.
+
+setup(Config) ->
+ put(mts_config,Config),
+ put(mts_tf_counter,0).
+
+temp_name() ->
+ Conf = get(mts_config),
+ C = get(mts_tf_counter),
+ put(mts_tf_counter,C+1),
+ {filename:join([proplists:get_value(priv_dir,Conf, "."),
+ "tempfile"++integer_to_list(C)++".tmp"]),
+ list_to_atom("tmp" ++ integer_to_list(C))}.
diff --git a/lib/mnesia/test/mnesia_recovery_test.erl b/lib/mnesia/test/mnesia_recovery_test.erl
new file mode 100644
index 0000000000..f6ecf2ce2e
--- /dev/null
+++ b/lib/mnesia/test/mnesia_recovery_test.erl
@@ -0,0 +1,1701 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_recovery_test).
+-author('[email protected]').
+-compile([export_all]).
+
+-include("mnesia_test_lib.hrl").
+-include_lib("kernel/include/file.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(receive_messages(Msgs), receive_messages(Msgs, ?FILE, ?LINE)).
+
+% First Some debug logging
+-define(dgb, true).
+-ifdef(dgb).
+-define(dl(X, Y), ?verbose("**TRACING: " ++ X ++ "**~n", Y)).
+-else.
+-define(dl(X, Y), ok).
+-endif.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Verify recoverability",
+ "Verify that the effects of committed transactions are preserved",
+ "after recovery from system failures. It must be possible to",
+ "restore the tables to a consistent state on a node, from (any kind",
+ "of) replica on other nodes as well as from local disk on the failed",
+ "node. The system must also recover from instantaneous",
+ "interruption causing disk files to not be completely synchronized."];
+
+all(suite) ->
+ [
+ mnesia_down,
+ explicit_stop,
+ coord_dies,
+ schema_trans,
+ async_dirty,
+ sync_dirty,
+ sym_trans,
+ asym_trans,
+ after_full_disc_partition,
+ after_corrupt_files,
+ disc_less,
+ garb_decision,
+ system_upgrade
+ ].
+
+schema_trans(suite) ->
+ [{mnesia_schema_recovery_test, all}].
+
+tpcb_config(ReplicaType, _NodeConfig, Nodes) ->
+ [{n_branches, 5},
+ {n_drivers_per_node, 5},
+ {replica_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {use_running_mnesia, true},
+ {report_interval, infinity},
+ {n_accounts_per_branch, 20},
+ {replica_type, ReplicaType}].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+mnesia_down(doc) ->
+ [" Various tests about recovery when mnesia goes down on one or several nodes."];
+mnesia_down(suite) ->
+ [
+ mnesia_down_during_startup,
+ master_node_tests,
+ read_during_down,
+ with_checkpoint,
+ delete_during_start
+ ].
+
+master_node_tests(doc) ->
+ ["Verify that mnesia loads the correct data after it has been down, regarding master node settings."];
+master_node_tests(suite) ->
+ [
+ no_master_2,
+ no_master_3,
+ one_master_2,
+ one_master_3,
+ two_master_2,
+ two_master_3,
+ all_master_2,
+ all_master_3
+ ].
+
+no_master_2(suite) -> [];
+no_master_2(Config) when is_list(Config) -> mnesia_down_2(no, Config).
+
+no_master_3(suite) -> [];
+no_master_3(Config) when is_list(Config) -> mnesia_down_3(no, Config).
+
+one_master_2(suite) -> [];
+one_master_2(Config) when is_list(Config) -> mnesia_down_2(one, Config).
+
+one_master_3(suite) -> [];
+one_master_3(Config) when is_list(Config) -> mnesia_down_3(one, Config).
+
+two_master_2(suite) -> [];
+two_master_2(Config) when is_list(Config) -> mnesia_down_2(two, Config).
+
+two_master_3(suite) -> [];
+two_master_3(Config) when is_list(Config) -> mnesia_down_3(two, Config).
+
+all_master_2(suite) -> [];
+all_master_2(Config) when is_list(Config) -> mnesia_down_2(all, Config).
+
+all_master_3(suite) -> [];
+all_master_3(Config) when is_list(Config) -> mnesia_down_3(all, Config).
+
+mnesia_down_2(Masters, Config) ->
+ Nodes = [N1, N2] = ?acquire_nodes(2, Config),
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{disc_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab3, [{disc_only_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab4, [{ram_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab5, [{ram_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab6, [{disc_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab7, [{disc_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab8, [{disc_only_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab9, [{disc_only_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab10, [{ram_copies, [N1]}, {disc_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab11, [{ram_copies, [N2]}, {disc_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab12, [{ram_copies, [N1]}, {disc_only_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab13, [{ram_copies, [N2]}, {disc_only_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab14, [{disc_only_copies, [N1]}, {disc_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab15, [{disc_only_copies, [N2]}, {disc_copies, [N1]}])),
+
+ Tabs = [tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8,
+ tab9, tab10, tab11, tab12, tab13, tab14, tab15],
+ [?match(ok, rpc:call(Node, mnesia, wait_for_tables, [Tabs, 10000])) || Node <- Nodes],
+ [insert_data(Tab, 20) || Tab <- Tabs],
+
+ VTabs =
+ case Masters of
+ no ->
+ Tabs -- [tab4, tab5]; % ram copies
+ one ->
+ ?match(ok, rpc:call(N1, mnesia, set_master_nodes, [[N1]])),
+ Tabs -- [tab1, tab4, tab5, tab10, tab12]; % ram_copies
+ two ->
+ ?match(ok, rpc:call(N1, mnesia, set_master_nodes, [Nodes])),
+ Tabs -- [tab4, tab5];
+ all ->
+ [?match(ok, rpc:call(Node, mnesia, set_master_nodes, [[Node]])) || Node <- Nodes],
+ Tabs -- [tab1, tab4, tab5, tab10, tab11, tab12, tab13]
+ end,
+
+ mnesia_test_lib:kill_mnesia([N1]),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ [?match(ok, rpc:call(N1, ?MODULE, verify_data, [Tab, 20])) || Tab <- VTabs],
+ [?match(ok, rpc:call(N2, ?MODULE, verify_data, [Tab, 20])) || Tab <- VTabs],
+ ?verify_mnesia(Nodes, []).
+
+mnesia_down_3(Masters, Config) ->
+ Nodes = [N1, N2, N3] = ?acquire_nodes(3, Config),
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{disc_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab3, [{disc_only_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab4, [{ram_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab5, [{ram_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab16, [{ram_copies, [N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab6, [{disc_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab7, [{disc_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab17, [{disc_copies, [N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab8, [{disc_only_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab9, [{disc_only_copies, [N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab18, [{disc_only_copies, [N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab10, [{ram_copies, [N1]}, {disc_copies, [N2, N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab11, [{ram_copies, [N2]}, {disc_copies, [N3, N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab19, [{ram_copies, [N3]}, {disc_copies, [N1, N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab12, [{ram_copies, [N1]}, {disc_only_copies, [N2, N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab13, [{ram_copies, [N2]}, {disc_only_copies, [N3, N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab20, [{ram_copies, [N3]}, {disc_only_copies, [N1, N2]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab14, [{disc_only_copies, [N1]}, {disc_copies, [N2, N3]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab15, [{disc_only_copies, [N2]}, {disc_copies, [N3, N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab21, [{disc_only_copies, [N3]}, {disc_copies, [N1, N2]}])),
+
+ Tabs = [tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8,
+ tab9, tab10, tab11, tab12, tab13, tab14, tab15,
+ tab16, tab17, tab18, tab19, tab20, tab21],
+ [?match(ok, rpc:call(Node, mnesia, wait_for_tables, [Tabs, 10000])) || Node <- Nodes],
+ [insert_data(Tab, 20) || Tab <- Tabs],
+
+ VTabs =
+ case Masters of
+ no ->
+ Tabs -- [tab4, tab5, tab16]; % ram copies
+ one ->
+ ?match(ok, rpc:call(N1, mnesia, set_master_nodes, [[N1]])),
+ Tabs -- [tab1, tab4, tab5, tab16, tab10, tab12]; % ram copies
+ two ->
+ ?match(ok, rpc:call(N1, mnesia, set_master_nodes, [Nodes])),
+ Tabs -- [tab4, tab5, tab16]; % ram copies
+ all ->
+ [?match(ok, rpc:call(Node, mnesia, set_master_nodes, [[Node]])) || Node <- Nodes],
+ Tabs -- [tab1, tab4, tab5, tab16, tab10,
+ tab11, tab19, tab12, tab13, tab20] % ram copies
+ end,
+
+ mnesia_test_lib:kill_mnesia([N1]),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N3])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2, N1])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N2, N3])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ ?match([], mnesia_test_lib:kill_mnesia([N1, N3])),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, Tabs)),
+
+ [?match(ok, rpc:call(N1, ?MODULE, verify_data, [Tab, 20])) || Tab <- VTabs],
+ [?match(ok, rpc:call(N2, ?MODULE, verify_data, [Tab, 20])) || Tab <- VTabs],
+ [?match(ok, rpc:call(N3, ?MODULE, verify_data, [Tab, 20])) || Tab <- VTabs],
+
+ ?verify_mnesia(Nodes, []).
+
+
+read_during_down(doc) ->
+ ["Verify that read operation can continue to read when mnesia goes down"];
+read_during_down(suite) ->
+ [
+ dirty_read_during_down,
+ trans_read_during_down
+ ].
+
+dirty_read_during_down(suite) ->
+ [];
+dirty_read_during_down(Config) when is_list(Config) ->
+ read_during_down(dirty, Config).
+
+trans_read_during_down(suite) ->
+ [];
+trans_read_during_down(Config) when is_list(Config) ->
+ read_during_down(trans, Config).
+
+
+read_during_down(Op, Config) when is_list(Config) ->
+ Ns = [N1|TNs] = ?acquire_nodes(3, Config),
+ Tabs = [ram, disc, disco],
+
+ ?match({atomic, ok}, mnesia:create_table(ram, [{ram_copies, TNs}])),
+ ?match({atomic, ok}, mnesia:create_table(disc, [{disc_copies, TNs}])),
+ ?match({atomic, ok}, mnesia:create_table(disco, [{disc_only_copies, TNs}])),
+
+ %% Create some work for mnesia_controller when a node goes down
+ [{atomic, ok} = mnesia:create_table(list_to_atom("temp" ++ integer_to_list(N)),
+ [{ram_copies, Ns}]) || N <- lists:seq(1, 50)],
+
+ Write = fun(Tab) -> mnesia:write({Tab, key, val}) end,
+ ?match([ok,ok,ok],
+ [mnesia:sync_dirty(Write, [Tab]) || Tab <- Tabs]),
+
+ Readers = [spawn_link(N1, ?MODULE, reader, [Tab, Op]) || Tab <- Tabs],
+ [_|_] = W2R= [mnesia:table_info(Tab, where_to_read) || Tab <- Tabs],
+ ?log("W2R ~p~n", [W2R]),
+ loop_and_kill_mnesia(10, hd(W2R), Tabs),
+ [Pid ! self() || Pid <- Readers],
+ ?match([ok, ok, ok], [receive ok -> ok after 1000 -> {Pid, mnesia_lib:dist_coredump()} end || Pid <- Readers]),
+ ?verify_mnesia(Ns, []).
+
+reader(Tab, OP) ->
+ Res = case OP of
+ dirty ->
+ catch mnesia:dirty_read({Tab, key});
+ trans ->
+ Read = fun() -> mnesia:read({Tab, key}) end,
+ {_, Temp} = mnesia:transaction(Read),
+ Temp
+ end,
+ case Res of
+ [{Tab, key, val}] -> ok;
+ Else ->
+ ?error("Expected ~p Got ~p ~n", [[{Tab, key, val}], Else]),
+ erlang:error(test_failed)
+ end,
+ receive Pid ->
+ Pid ! ok
+ after 50 ->
+ reader(Tab, OP)
+ end.
+
+loop_and_kill_mnesia(0, _Node, _Tabs) -> ok;
+loop_and_kill_mnesia(N, Node, Tabs) ->
+ mnesia_test_lib:kill_mnesia([Node]),
+ timer:sleep(100),
+ ?match([], mnesia_test_lib:start_mnesia([Node], Tabs)),
+ [KN | _] = W2R= [mnesia:table_info(Tab, where_to_read) || Tab <- Tabs],
+ ?match([KN, KN,KN], W2R),
+ timer:sleep(100),
+ loop_and_kill_mnesia(N-1, KN, Tabs).
+
+mnesia_down_during_startup(doc) ->
+ ["Verify that mnesia can come back up again in a consistent state",
+ "after it has gone down during startup (with different store and",
+ "when it goes down in different situations"];
+mnesia_down_during_startup(suite) ->
+ [
+ mnesia_down_during_startup_disk_ram,
+ mnesia_down_during_startup_init_ram,
+ mnesia_down_during_startup_init_disc,
+ mnesia_down_during_startup_init_disc_only,
+ mnesia_down_during_startup_tm_ram,
+ mnesia_down_during_startup_tm_disc,
+ mnesia_down_during_startup_tm_disc_only
+ ].
+
+mnesia_down_during_startup_disk_ram(suite) -> [];
+mnesia_down_during_startup_disk_ram(Config) when is_list(Config)->
+ [Node1, Node2] = ?acquire_nodes(2, Config ++
+ [{tc_timeout, timer:minutes(2)}]),
+ Tab = down_during_startup,
+ Def = [{ram_copies, [Node2]}, {disc_copies, [Node1]}],
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match(ok, mnesia:dirty_write({Tab, 876234, test_ok})),
+ timer:sleep(500),
+ mnesia_test_lib:kill_mnesia([Node1, Node2]),
+ timer:sleep(500),
+ mnesia_test_lib:start_mnesia([Node1, Node2], [Tab]),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ timer:sleep(500),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [Tab])),
+ ?match([{Tab, 876234, test_ok}], mnesia:dirty_read({Tab,876234})),
+ ?verify_mnesia([Node1, Node2], []).
+
+mnesia_down_during_startup_init_ram(suite) -> [];
+mnesia_down_during_startup_init_ram(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_loader, do_get_network_copy},
+ Type = ram_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup_init_disc(suite) -> [];
+mnesia_down_during_startup_init_disc(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_loader, do_get_network_copy},
+ Type = disc_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup_init_disc_only(suite) -> [];
+mnesia_down_during_startup_init_disc_only(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_loader, do_get_network_copy},
+ Type = disc_only_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup_tm_ram(suite) -> [];
+mnesia_down_during_startup_tm_ram(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_tm, init},
+ Type = ram_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup_tm_disc(suite) -> [];
+mnesia_down_during_startup_tm_disc(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_tm, init},
+ Type = disc_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup_tm_disc_only(suite) -> [];
+mnesia_down_during_startup_tm_disc_only(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ DP = {mnesia_tm, init},
+ Type = disc_only_copies,
+ mnesia_down_during_startup2(Config, Type, DP, self()).
+
+mnesia_down_during_startup2(Config, ReplicaType, Debug_Point, _Father) ->
+ ?log("TC~n mnesia_down_during_startup with type ~w and stops at ~w~n",
+ [ReplicaType, Debug_Point]),
+ Tpcb_tabs = [history,teller,account,branch],
+ Nodes = ?acquire_nodes(2, Config),
+ Node1 = hd(Nodes),
+ {success, [A]} = ?start_activities([Node1]),
+ TpcbConfig = tpcb_config(ReplicaType, 2, Nodes),
+ mnesia_tpcb:init(TpcbConfig),
+ A ! fun () -> mnesia_tpcb:run(TpcbConfig) end,
+ ?match_receive(timeout),
+ timer:sleep(timer:seconds(10)), % Let tpcb run for a while
+ mnesia_tpcb:stop(),
+ ?match(ok, mnesia_tpcb:verify_tabs()),
+ mnesia_test_lib:kill_mnesia([Node1]),
+ timer:sleep(timer:seconds(2)),
+ Self = self(),
+ TestFun = fun(_MnesiaEnv, _EvalEnv) ->
+ ?deactivate_debug_fun(Debug_Point),
+ Self ! fun_done,
+ spawn(mnesia_test_lib, kill_mnesia, [[Node1]])
+ end,
+ ?activate_debug_fun(Debug_Point, TestFun, []), % Kill when debug has been reached
+ mnesia:start(),
+ Res = receive fun_done -> ok after timer:minutes(3) -> timeout end, % Wait till it's killed
+ ?match(ok, Res),
+ ?match(ok, timer:sleep(timer:seconds(2))), % Wait a while, at least till it dies;
+ ?match([], mnesia_test_lib:start_mnesia([Node1], Tpcb_tabs)),
+ ?match(ok, mnesia_tpcb:verify_tabs()), % Verify it
+ ?verify_mnesia(Nodes, []).
+
+
+with_checkpoint(doc) ->
+ ["Restart mnesia with checkpoint"];
+with_checkpoint(suite) ->
+ [with_checkpoint_same, with_checkpoint_other].
+
+with_checkpoint_same(suite) -> [];
+with_checkpoint_same(Config) when is_list(Config) ->
+ with_checkpoint(Config, same).
+
+with_checkpoint_other(suite) -> [];
+with_checkpoint_other(Config) when is_list(Config) ->
+ with_checkpoint(Config, other).
+
+with_checkpoint(Config, Type) when is_list(Config) ->
+ Nodes = [Node1, Node2] = ?acquire_nodes(2, Config),
+ Kill = case Type of
+ same -> %% Node1 is the one used for creating the checkpoint
+ Node1; %% and which we bring down
+ other ->
+ Node2 %% Here we bring node2 down..
+ end,
+
+ ?match({atomic, ok}, mnesia:create_table(ram, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(disc, [{disc_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(disco, [{disc_only_copies, Nodes}])),
+ Tabs = [ram, disc, disco],
+
+ ?match({ok, sune, _}, mnesia:activate_checkpoint([{name, sune},
+ {max, mnesia:system_info(tables)},
+ {ram_overrides_dump, true}])),
+
+ ?match([], check_retainers(sune, Nodes)),
+
+ ?match(ok, mnesia:deactivate_checkpoint(sune)),
+ ?match([], check_chkp(Nodes)),
+
+ timer:sleep(500), %% Just to help debugging the io:formats now comes in the
+ %% correct order... :-)
+
+ ?match({ok, sune, _}, mnesia:activate_checkpoint([{name, sune},
+ {max, mnesia:system_info(tables)},
+ {ram_overrides_dump, true}])),
+
+ [[mnesia:dirty_write({Tab,Key,Key}) || Key <- lists:seq(1,10)] || Tab <- Tabs],
+
+ mnesia_test_lib:kill_mnesia([Kill]),
+ timer:sleep(100),
+ mnesia_test_lib:start_mnesia([Kill], Tabs),
+ io:format("Mnesia on ~p started~n", [Kill]),
+ ?match([], check_retainers(sune, Nodes)),
+ ?match(ok, mnesia:deactivate_checkpoint(sune)),
+ ?match([], check_chkp(Nodes)),
+
+ case Kill of
+ Node1 ->
+ ignore;
+ Node2 ->
+ mnesia_test_lib:kill_mnesia([Kill]),
+ timer:sleep(500), %% Just to help debugging
+ ?match({ok, sune, _}, mnesia:activate_checkpoint([{name, sune},
+ {max, mnesia:system_info(tables)},
+ {ram_overrides_dump, true}])),
+
+ [[mnesia:dirty_write({Tab,Key,Key+2}) || Key <- lists:seq(1,10)] ||
+ Tab <- Tabs],
+
+ mnesia_test_lib:start_mnesia([Kill], Tabs),
+ io:format("Mnesia on ~p started ~n", [Kill]),
+ ?match([], check_retainers(sune, Nodes)),
+ ?match(ok, mnesia:deactivate_checkpoint(sune)),
+ ?match([], check_chkp(Nodes)),
+ ok
+ end,
+ ?verify_mnesia(Nodes, []).
+
+check_chkp(Nodes) ->
+ {Good, Bad} = rpc:multicall(Nodes, ?MODULE, check, []),
+ lists:flatten(Good ++ Bad).
+
+check() ->
+ [PCP] = ets:match_object(mnesia_gvar, {pending_checkpoint_pids, '_'}),
+ [PC] = ets:match_object(mnesia_gvar, {pending_checkpoints, '_'}),
+ [CPN] = ets:match_object(mnesia_gvar, {checkpoints, '_'}),
+ F = lists:filter(fun({_, []}) -> false; (_W) -> true end,
+ [PCP,PC,CPN]),
+ CPP = ets:match_object(mnesia_gvar, {{checkpoint, '_'}, '_'}),
+ Rt = ets:match_object(mnesia_gvar, {{'_', {retainer, '_'}}, '_'}),
+ F ++ CPP ++ Rt.
+
+
+check_retainers(CHP, Nodes) ->
+ {[R1,R2], []} = rpc:multicall(Nodes, ?MODULE, get_all_retainers, [CHP]),
+ (R1 -- R2) ++ (R2 -- R1).
+
+get_all_retainers(CHP) ->
+ Tabs = mnesia:system_info(local_tables),
+ Iter = fun(Tab) ->
+ {ok, Res} =
+ mnesia_checkpoint:iterate(CHP, Tab, fun(R, A) -> [R|A] end, [],
+ retainer, checkpoint),
+%% io:format("Retainer content ~w ~n", [Res]),
+ Res
+ end,
+ Elements = [Iter(Tab) || Tab <- Tabs],
+ lists:sort(lists:flatten(Elements)).
+
+delete_during_start(doc) ->
+ ["Test that tables can be delete during start, hopefully with tables"
+ " in the loader queue or soon to be"];
+delete_during_start(suite) -> [];
+delete_during_start(Config) when is_list(Config) ->
+ [N1, N2, N3] = Nodes = ?acquire_nodes(3, Config),
+ Tabs = [list_to_atom("tab" ++ integer_to_list(I)) || I <- lists:seq(1, 30)],
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, N2, ram_copies)),
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, N3, ram_copies)),
+
+ [?match({atomic, ok},mnesia:create_table(Tab, [{ram_copies,Nodes}])) || Tab <- Tabs],
+ lists:foldl(fun(Tab, I) ->
+ ?match({atomic, ok},
+ mnesia:change_table_load_order(Tab,I)),
+ I+1
+ end, 1, Tabs),
+ mnesia_test_lib:kill_mnesia([N2,N3]),
+%% timer:sleep(500),
+ ?match({[ok,ok],[]}, rpc:multicall([N2,N3], mnesia,start,
+ [[{extra_db_nodes,[N1]}]])),
+ [Tab1,Tab2,Tab3|_] = Tabs,
+ ?match({atomic, ok}, mnesia:delete_table(Tab1)),
+ ?match({atomic, ok}, mnesia:delete_table(Tab2)),
+
+ ?log("W4T ~p~n", [rpc:multicall([N2,N3], mnesia, wait_for_tables, [[Tab1,Tab2,Tab3],1])]),
+
+ Remain = Tabs--[Tab1,Tab2],
+ ?match(ok, rpc:call(N2, mnesia, wait_for_tables, [Remain,10000])),
+ ?match(ok, rpc:call(N3, mnesia, wait_for_tables, [Remain,10000])),
+
+ ?match(ok, rpc:call(N2, ?MODULE, verify_where2read, [Remain])),
+ ?match(ok, rpc:call(N3, ?MODULE, verify_where2read, [Remain])),
+
+ ?verify_mnesia(Nodes, []).
+
+verify_where2read([Tab|Tabs]) ->
+ true = (node() == mnesia:table_info(Tab,where_to_read)),
+ verify_where2read(Tabs);
+verify_where2read([]) -> ok.
+
+
+%%-------------------------------------------------------------------------------------------
+explicit_stop(doc) ->
+ ["Stop Mnesia in different situations"];
+explicit_stop(suite) ->
+ [explicit_stop_during_snmp].
+%% This is a bad implementation, but at least gives a indication if something is wrong
+explicit_stop_during_snmp(suite) -> [];
+explicit_stop_during_snmp(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(2, Config),
+ [Node1, Node2] = Nodes,
+ Tab = snmp_tab,
+ Def = [{attributes, [key, value]},
+ {snmp, [{key, integer}]},
+ {mnesia_test_lib:storage_type(disc_copies, Config),
+ [Node1, Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write({Tab, 1, 1}) end)),
+
+ Do_trans_Pid1 = spawn_link(Node2, ?MODULE, do_trans_loop, [Tab, self()]),
+ Do_trans_Pid2 = spawn_link(?MODULE, do_trans_loop, [Tab, self()]),
+ Start_stop_Pid = spawn_link(?MODULE, start_stop, [Node1, 10, self()]),
+ receive
+ test_done ->
+ ok
+ after timer:minutes(5) ->
+ ?error("test case time out~n", [])
+ end,
+ ?verify_mnesia(Nodes, []),
+ exit(Do_trans_Pid1, kill),
+ exit(Do_trans_Pid2, kill),
+ exit(Start_stop_Pid, kill),
+ ok.
+
+do_trans_loop(Tab, Father) ->
+ %% Do not trap exit
+ do_trans_loop2(Tab, Father).
+do_trans_loop2(Tab, Father) ->
+ Trans =
+ fun() ->
+ [{Tab, 1, Val}] = mnesia:read({Tab, 1}),
+ mnesia:write({Tab, 1, Val + 1})
+ end,
+ case mnesia:transaction(Trans) of
+ {atomic, ok} ->
+ timer:sleep(200),
+ do_trans_loop2(Tab, Father);
+ {aborted, {node_not_running, N}} when N == node() ->
+ timer:sleep(200),
+ do_trans_loop2(Tab, Father);
+ {aborted, {no_exists, Tab}} ->
+ timer:sleep(200),
+ do_trans_loop2(Tab, Father);
+ Else ->
+ ?error("Transaction failed: ~p ~n", [Else]),
+ Father ! test_done,
+ exit(shutdown)
+ end.
+
+start_stop(_Node1, 0, Father) ->
+ Father ! test_done,
+ exit(shutdown);
+start_stop(Node1, N, Father) when N > 0->
+ timer:sleep(timer:seconds(5)),
+ ?match(stopped, rpc:call(Node1, mnesia, stop, [])),
+ timer:sleep(timer:seconds(2)),
+ ?match([], mnesia_test_lib:start_mnesia([Node1])),
+ start_stop(Node1, N-1, Father).
+
+coord_dies(suite) -> [];
+coord_dies(doc) -> [""];
+coord_dies(Config) when is_list(Config) ->
+ Nodes = [N1, N2] = ?acquire_nodes(2, Config),
+ ?match({atomic, ok}, mnesia:create_table(tab1, [{ram_copies, Nodes}])),
+ ?match({atomic, ok}, mnesia:create_table(tab2, [{ram_copies, [N1]}])),
+ ?match({atomic, ok}, mnesia:create_table(tab3, [{ram_copies, [N2]}])),
+ Tester = self(),
+
+ U1 = fun(Tab) ->
+ [{Tab,key,Val}] = mnesia:read(Tab,key,write),
+ mnesia:write({Tab,key, Val+1}),
+ Tester ! {self(),continue},
+ receive
+ continue -> exit(crash)
+ end
+ end,
+ U2 = fun(Tab) ->
+ [{Tab,key,Val}] = mnesia:read(Tab,key,write),
+ mnesia:write({Tab,key, Val+1}),
+ mnesia:transaction(U1, [Tab])
+ end,
+ [mnesia:dirty_write(Tab,{Tab,key,0}) || Tab <- [tab1,tab2,tab3]],
+ Pid1 = spawn(fun() -> mnesia:transaction(U2, [tab1]) end),
+ Pid2 = spawn(fun() -> mnesia:transaction(U2, [tab2]) end),
+ Pid3 = spawn(fun() -> mnesia:transaction(U2, [tab3]) end),
+ [receive {Pid,continue} -> ok end || Pid <- [Pid1,Pid2,Pid3]],
+ Pid1 ! continue, Pid2 ! continue, Pid3 ! continue,
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab1,key}) end)),
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab2,key}) end)),
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab3,key}) end)),
+
+ Pid4 = spawn(fun() -> mnesia:transaction(U2, [tab1]) end),
+ Pid5 = spawn(fun() -> mnesia:transaction(U2, [tab2]) end),
+ Pid6 = spawn(fun() -> mnesia:transaction(U2, [tab3]) end),
+ erlang:monitor(process, Pid4),erlang:monitor(process, Pid5),erlang:monitor(process, Pid6),
+
+ [receive {Pid,continue} -> ok end || Pid <- [Pid4,Pid5,Pid6]],
+ exit(Pid4,crash),
+ ?match_receive({'DOWN',_,_,Pid4, _}),
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab1,key}) end)),
+ exit(Pid5,crash),
+ ?match_receive({'DOWN',_,_,Pid5, _}),
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab2,key}) end)),
+ exit(Pid6,crash),
+ ?match_receive({'DOWN',_,_,Pid6, _}),
+ ?match({atomic,[{_,key,1}]}, mnesia:transaction(fun() -> mnesia:read({tab3,key}) end)),
+
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+sym_trans(doc) ->
+ ["Recovery of symmetrical transactions in a couple of different",
+ "situations; when coordinator or participant or node dies"];
+
+sym_trans(suite) ->
+ [sym_trans_before_commit_kill_coord_node, %% coordinator node dies
+ sym_trans_before_commit_kill_coord_pid, %% coordinator process dies
+ sym_trans_before_commit_kill_part_after_ask, %% participating node dies
+ sym_trans_before_commit_kill_part_before_ask,
+ sym_trans_after_commit_kill_coord_node,
+ sym_trans_after_commit_kill_coord_pid,
+ sym_trans_after_commit_kill_part_after_ask,
+ sym_trans_after_commit_kill_part_do_commit_pre,
+ sym_trans_after_commit_kill_part_do_commit_post].
+
+%kill_after_debug_point(Config, TestCase, {Debug_node, Debug_Point}, TransFun, Tab)
+
+sym_trans_before_commit_kill_coord_node(suite) -> [];
+sym_trans_before_commit_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_before_commit_kill_coord,
+ Def = [{attributes, [key, value]}, {ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, multi_commit_sym}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_before_commit_kill_coord_pid(suite) -> [];
+sym_trans_before_commit_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_before_commit_kill_coord,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, multi_commit_sym}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_before_commit_kill_part_after_ask(suite) -> [];
+sym_trans_before_commit_kill_part_after_ask(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_before_commit_kill_part_after_ask,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(Part1, {Coord, {mnesia_tm, multi_commit_sym}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_before_commit_kill_part_before_ask(suite) -> [];
+sym_trans_before_commit_kill_part_before_ask(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_before_commit_kill_part_before_ask,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, doit_ask_commit}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_after_commit_kill_coord_node(suite) -> [];
+sym_trans_after_commit_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_after_commit_kill_coord,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, multi_commit_sym, post}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_after_commit_kill_coord_pid(suite) -> [];
+sym_trans_after_commit_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_after_commit_kill_coord,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, multi_commit_sym, post}},
+ do_sym_trans, [{Tab,Def}], Nodes).
+
+sym_trans_after_commit_kill_part_after_ask(suite) -> [];
+sym_trans_after_commit_kill_part_after_ask(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_after_commit_kill_part_after_ask,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ kill_after_debug_point(Part1, {Coord, {mnesia_tm, multi_commit_sym, post}},
+ do_sym_trans, [{Tab, Def}], Nodes).
+
+sym_trans_after_commit_kill_part_do_commit_pre(suite) -> [];
+sym_trans_after_commit_kill_part_do_commit_pre(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_after_commit_kill_part_do_commit_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, do_commit, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sym_trans_after_commit_kill_part_do_commit_post(suite) -> [];
+sym_trans_after_commit_kill_part_do_commit_post(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sym_trans_after_commit_kill_part_do_commit_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, do_commit, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+do_sym_trans([Tab], _Fahter) ->
+ ?dl("Starting SYM_TRANS with active debug fun ", []),
+ Trans = fun() ->
+ [{_,_,Val}] = mnesia:read({Tab, 1}),
+ mnesia:write({Tab, 1, Val+1})
+ end,
+ Res = mnesia:transaction(Trans),
+ case Res of
+ {atomic, ok} -> ok;
+ {aborted, _Reason} -> ok;
+ Else -> ?error("Wrong output from mensia:transaction(FUN):~n ~p~n",
+ [Else])
+ end,
+ ?dl("SYM_TRANSACTION done: ~p (deactiv dbgfun) ", [Res]),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+sync_dirty(doc) ->
+ ["Verify recovery of synchronously operations in a couple of different",
+ "situations"];
+sync_dirty(suite) ->
+ [sync_dirty_pre_kill_part,
+ sync_dirty_pre_kill_coord_node,
+ sync_dirty_pre_kill_coord_pid,
+ sync_dirty_post_kill_part,
+ sync_dirty_post_kill_coord_node,
+ sync_dirty_post_kill_coord_pid
+ ].
+
+sync_dirty_pre_kill_part(suite) -> [];
+sync_dirty_pre_kill_part(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, sync_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sync_dirty_pre_kill_coord_node(suite) -> [];
+sync_dirty_pre_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(Coord, {Part1, {mnesia_tm, sync_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sync_dirty_pre_kill_coord_pid(suite) -> [];
+sync_dirty_pre_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(coord_pid, {Part1, {mnesia_tm, sync_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sync_dirty_post_kill_part(suite) -> [];
+sync_dirty_post_kill_part(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, sync_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sync_dirty_post_kill_coord_node(suite) -> [];
+sync_dirty_post_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(Coord, {Part1, {mnesia_tm, sync_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+sync_dirty_post_kill_coord_pid(suite) -> [];
+sync_dirty_post_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = sync_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_sync_dirty,
+ kill_after_debug_point(coord_pid, {Part1, {mnesia_tm, sync_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+do_sync_dirty([Tab], _Father) ->
+ ?dl("Starting SYNC_DIRTY", []),
+ SYNC = fun() ->
+ [{_,_,Val}] = mnesia:read({Tab, 1}),
+ mnesia:write({Tab, 1, Val+1})
+ end,
+ {_, Res} = ?match(ok, mnesia:sync_dirty(SYNC)),
+ ?dl("SYNC_DIRTY done: ~p ", [Res]),
+ ok.
+
+async_dirty(doc) ->
+ ["Verify recovery of asynchronously dirty operations in a couple of different",
+ "situations"];
+async_dirty(suite) ->
+ [async_dirty_pre_kill_part,
+ async_dirty_pre_kill_coord_node,
+ async_dirty_pre_kill_coord_pid,
+ async_dirty_post_kill_part,
+ async_dirty_post_kill_coord_node,
+ async_dirty_post_kill_coord_pid].
+
+async_dirty_pre_kill_part(suite) -> [];
+async_dirty_pre_kill_part(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, async_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+async_dirty_pre_kill_coord_node(suite) -> [];
+async_dirty_pre_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(Coord, {Part1, {mnesia_tm, async_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+async_dirty_pre_kill_coord_pid(suite) -> [];
+async_dirty_pre_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_pre,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(coord_pid, {Part1, {mnesia_tm, async_dirty, pre}},
+ TransFun, [{Tab, Def}], Nodes).
+
+async_dirty_post_kill_part(suite) -> [];
+async_dirty_post_kill_part(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, async_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+async_dirty_post_kill_coord_node(suite) -> [];
+async_dirty_post_kill_coord_node(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(Coord, {Part1, {mnesia_tm, async_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+async_dirty_post_kill_coord_pid(suite) -> [];
+async_dirty_post_kill_coord_pid(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab = async_dirty_post,
+ Def = [{attributes, [key, value]},{ram_copies, [Part2]},{disc_copies, [Coord, Part1]}],
+ TransFun = do_async_dirty,
+ kill_after_debug_point(coord_pid, {Part1, {mnesia_tm, async_dirty, post}},
+ TransFun, [{Tab, Def}], Nodes).
+
+do_async_dirty([Tab], _Fahter) ->
+ ?dl("Starting ASYNC", []),
+ ASYNC = fun() ->
+ [{_,_,Val}] = mnesia:read({Tab, 1}),
+ mnesia:write({Tab, 1, Val+1})
+ end,
+ {_, Res} = ?match(ok, mnesia:async_dirty(ASYNC)),
+ ?dl("ASYNC done: ~p ", [Res]),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+asym_trans(doc) ->
+ ["Recovery of asymmetrical transactions in a couple of different",
+ "situations, currently the error cases are not covered, i.e. ",
+ "not tested are the situations when we kill mnesia or a process",
+ "during a recovery"];
+asym_trans(suite) ->
+ [
+ asym_trans_kill_part_ask,
+ asym_trans_kill_part_commit_vote,
+ asym_trans_kill_part_pre_commit,
+ asym_trans_kill_part_log_commit,
+ asym_trans_kill_part_do_commit,
+ asym_trans_kill_coord_got_votes,
+ asym_trans_kill_coord_pid_got_votes,
+ asym_trans_kill_coord_log_commit_rec,
+ asym_trans_kill_coord_pid_log_commit_rec,
+ asym_trans_kill_coord_log_commit_dec,
+ asym_trans_kill_coord_pid_log_commit_dec,
+ asym_trans_kill_coord_rec_acc_pre_commit_log_commit,
+ asym_trans_kill_coord_pid_rec_acc_pre_commit_log_commit,
+ asym_trans_kill_coord_rec_acc_pre_commit_done_commit,
+ asym_trans_kill_coord_pid_rec_acc_pre_commit_done_commit
+ ].
+
+asym_trans_kill_part_ask(suite) -> [];
+asym_trans_kill_part_ask(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, doit_ask_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_part_commit_vote(suite) -> [];
+asym_trans_kill_part_commit_vote(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, commit_participant, vote_yes}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_part_pre_commit(suite) -> [];
+asym_trans_kill_part_pre_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, commit_participant, pre_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_part_log_commit(suite) -> [];
+asym_trans_kill_part_log_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, commit_participant, log_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_part_do_commit(suite) -> [];
+asym_trans_kill_part_do_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Part1, {Part1, {mnesia_tm, commit_participant, do_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_got_votes(suite) -> [];
+asym_trans_kill_coord_got_votes(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, multi_commit_asym_got_votes}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_pid_got_votes(suite) -> [];
+asym_trans_kill_coord_pid_got_votes(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, multi_commit_asym_got_votes}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_log_commit_rec(suite) -> [];
+asym_trans_kill_coord_log_commit_rec(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, multi_commit_asym_log_commit_rec}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_pid_log_commit_rec(suite) -> [];
+asym_trans_kill_coord_pid_log_commit_rec(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, multi_commit_asym_log_commit_rec}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_log_commit_dec(suite) -> [];
+asym_trans_kill_coord_log_commit_dec(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, multi_commit_asym_log_commit_dec}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_pid_log_commit_dec(suite) -> [];
+asym_trans_kill_coord_pid_log_commit_dec(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, multi_commit_asym_log_commit_dec}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_rec_acc_pre_commit_log_commit(suite) -> [];
+asym_trans_kill_coord_rec_acc_pre_commit_log_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, rec_acc_pre_commit_log_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_pid_rec_acc_pre_commit_log_commit(suite) -> [];
+asym_trans_kill_coord_pid_rec_acc_pre_commit_log_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, rec_acc_pre_commit_log_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_rec_acc_pre_commit_done_commit(suite) -> [];
+asym_trans_kill_coord_rec_acc_pre_commit_done_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(Coord, {Coord, {mnesia_tm, rec_acc_pre_commit_done_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+asym_trans_kill_coord_pid_rec_acc_pre_commit_done_commit(suite) -> [];
+asym_trans_kill_coord_pid_rec_acc_pre_commit_done_commit(Config) when is_list(Config) ->
+ ?is_debug_compiled,
+ Nodes = ?acquire_nodes(3, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ [Coord, Part1, Part2] = Nodes,
+ Tab1 = {asym1, [{ram_copies, [Part2]}, {disc_copies, [Coord]}]},
+ Tab2 = {asym2, [{ram_copies, [Coord]}, {disc_copies, [Part1]}]},
+ TransFun = do_asym_trans,
+ kill_after_debug_point(coord_pid, {Coord, {mnesia_tm, rec_acc_pre_commit_done_commit}},
+ TransFun, [Tab1, Tab2], Nodes).
+
+do_asym_trans([Tab1, Tab2 | _R], Garbhandler) ->
+ ?dl("Starting asym trans ", []),
+ ASym_Trans = fun() ->
+ TidTs = {_Mod, Tid, _Store} =
+ mnesia:get_activity_id(),
+ ?verbose("===> asym_trans: ~w~n", [TidTs]),
+ Garbhandler ! {trans_id, Tid},
+ [{_, _, Val1}] = mnesia:read({Tab1, 1}),
+ [{_, _, Val2}] = mnesia:read({Tab2, 1}),
+ mnesia:write({Tab1, 1, Val1+1}),
+ mnesia:write({Tab2, 1, Val2+1})
+ end,
+ Res = mnesia:transaction(ASym_Trans),
+ case Res of
+ {atomic, ok} -> ok;
+ {aborted, _Reason} -> ok;
+ _Else -> ?error("Wrong output from mensia:transaction(FUN):~n ~p~n", [Res])
+ end,
+ ?dl("Asym trans finished with: ~p ", [Res]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+kill_after_debug_point(Kill, {DebugNode, Debug_Point}, TransFun, TabsAndDefs, Nodes) ->
+ [Coord | _rest] = Nodes,
+
+ Create = fun({Tab, Def}) -> ?match({atomic, ok}, mnesia:create_table(Tab, Def)) end,
+ lists:foreach(Create, TabsAndDefs),
+ Tabs = [T || {T, _} <- TabsAndDefs],
+ Write = fun(Tab) -> ?match(ok, mnesia:dirty_write({Tab, 1, 100})) end,
+ lists:foreach(Write, Tabs),
+
+ Self = self(),
+ SyncFun = fun(_Env1, _Env2) -> % Just Sync with test prog
+ Self ! {self(), fun_in_position},
+ ?dl("SyncFun, sending fun_in_position ", []),
+ receive continue ->
+ ?dl("SyncFun received continue ",[]),
+ ok
+ after timer:seconds(60) ->
+ ?error("Timeout in sync_fun on ~p~n", [node()])
+ end
+ end,
+
+ Garb_handler = spawn_link(?MODULE, garb_handler, [[]]),
+
+ ?remote_activate_debug_fun(DebugNode, Debug_Point, SyncFun, []),
+ ?dl("fun_in_position activated at ~p with ~p", [DebugNode, Debug_Point]),
+ %% Spawn and do the transaction
+ Pid = spawn(Coord, ?MODULE, TransFun, [Tabs, Garb_handler]),
+ %% Wait till all the Nodes are in correct position
+ [{StoppedPid,_}] = ?receive_messages([fun_in_position]),
+ ?dl("Received fun_in_position; Removing the debug funs ~p", [DebugNode]),
+ ?remote_deactivate_debug_fun(DebugNode, Debug_Point),
+
+ case Kill of
+ coord_pid ->
+ ?dl("Intentionally killing pid ~p ", [Pid]),
+ exit(Pid, normal);
+ Node ->
+ mnesia_test_lib:kill_mnesia([Node])
+ end,
+
+ StoppedPid ! continue, %% Send continue, it may still be alive
+
+ %% Start and check that the databases are consistent
+ ?dl("Done, Restarting and verifying result ",[]),
+ case Kill of
+ coord_pid -> ok;
+ _ -> % Ok, mnesia on some node was killed restart it
+ timer:sleep(timer:seconds(3)), %% Just let it have the time to die
+ ?match(ok, rpc:call(Kill, mnesia, start, [[]])),
+ ?match(ok, rpc:call(Kill, mnesia, wait_for_tables, [Tabs, 60000]))
+ end,
+ Trans_res = verify_tabs(Tabs, Nodes),
+ case TransFun of
+ do_asym_trans ->
+ %% Verifies that decisions are garbed, only valid for asym_tran
+ Garb_handler ! {get_tids, self()},
+ Tid_list = receive
+ {tids, List} ->
+ ?dl("Fun rec ~w", [List]),
+ List
+ end,
+ garb_of_decisions(Kill, Nodes, Tid_list, Trans_res);
+ _ ->
+ ignore
+ end,
+ ?verify_mnesia(Nodes, []).
+
+garb_of_decisions(Kill, Nodes, Tid_list, Trans_res) ->
+ [Coord, Part1, Part2] = Nodes,
+ %% Check that decision log is empty on all nodes after the trans is finished
+ verify_garb_decision_log(Nodes, Tid_list),
+ case Trans_res of
+ aborted ->
+ %% Check that aborted trans have not been restarted!!
+ ?match(1, length(Tid_list)),
+ %% Check the transient decision logs
+ %% A transaction should only be aborted in an early stage of
+ %% the trans before the any Node have logged anything
+ verify_garb_transient_logs(Nodes, Tid_list, aborted),
+ %% And only when the coordinator are have died
+ %% Else he would have restarted the transaction
+ ?match(Kill, Coord);
+ updated ->
+ case length(Tid_list) of
+ 1 ->
+ %% If there was only one transaction, it should be logged as
+ %% comitted on every node!
+ [Tid1] = Tid_list,
+ verify_garb_transient_logs(Nodes, [Tid1], committed);
+ 2 ->
+ %% If there is two transaction id, then the first
+ %% TID should have been aborted and the transaction
+ %% restarted with a new TID
+ [Tid1, Tid2] = Tid_list,
+ verify_garb_transient_logs(Nodes, [Tid1], aborted),
+ %% If mnesia is killed on a node i.e Coord and Part1 than they
+ %% won't know about the restarted trans! The rest of the nodes
+ %% should know that the trans was committed
+ case Kill of
+ coord_pid ->
+ verify_garb_transient_logs(Nodes, [Tid2], committed);
+ Coord ->
+ verify_garb_transient_logs([Part1, Part2], [Tid2], committed),
+ verify_garb_transient_logs([Coord], [Tid2], not_found);
+ Part1 ->
+ verify_garb_transient_logs([Coord, Part2], [Tid2], committed),
+ verify_garb_transient_logs([Part1], [Tid2], not_found)
+ end
+ end
+ end.
+
+verify_garb_decision_log([], _Tids) -> ok;
+verify_garb_decision_log([Node|R], Tids) ->
+ Check = fun(Tid) -> %% Node, Tid used in debugging!
+ ?match({{not_found, _}, Node, Tid},
+ {outcome(Tid, [mnesia_decision]), Node, Tid})
+ end,
+ rpc:call(Node, lists, foreach, [Check, Tids]),
+ verify_garb_decision_log(R, Tids).
+
+verify_garb_transient_logs([], _Tids, _) -> ok;
+verify_garb_transient_logs([Node|R], Tids, Exp_Res) ->
+ Check = fun(Tid) ->
+ LatestTab = mnesia_lib:val(latest_transient_decision),
+ PrevTabs = mnesia_lib:val(previous_transient_decisions),
+ case outcome(Tid, [LatestTab | PrevTabs]) of
+ {found, {_, [{_,_Tid, Exp_Res}]}} -> ok;
+ {not_found, _} when Exp_Res == not_found -> ok;
+ {not_found, _} when Exp_Res == aborted -> ok;
+ Else -> ?error("Expected ~p in trans ~p on ~p got ~p~n",
+ [Exp_Res, Tid, Node, Else])
+ end
+ end,
+ rpc:call(Node, lists, foreach, [Check, Tids]),
+ verify_garb_transient_logs(R, Tids, Exp_Res).
+
+outcome(Tid, Tabs) ->
+ outcome(Tid, Tabs, Tabs).
+
+outcome(Tid, [Tab | Tabs], AllTabs) ->
+ case catch ets:lookup(Tab, Tid) of
+ {'EXIT', _} ->
+ outcome(Tid, Tabs, AllTabs);
+ [] ->
+ outcome(Tid, Tabs, AllTabs);
+ Val ->
+ {found, {Tab, Val}}
+ end;
+outcome(_Tid, [], AllTabs) ->
+ {not_found, AllTabs}.
+
+
+verify_tabs([Tab|R], Nodes) ->
+ [_Coord, Part1, Part2 | _rest] = Nodes,
+ Read = fun() -> mnesia:read({Tab, 1}) end,
+ {success, A} = ?match({atomic, _}, mnesia:transaction(Read)),
+ ?match(A, rpc:call(Part1, mnesia, transaction, [Read])),
+ ?match(A, rpc:call(Part2, mnesia, transaction, [Read])),
+ {atomic, [{Tab, 1, Res}]} = A,
+ verify_tabs(R, Nodes, Res).
+
+verify_tabs([], _Nodes, Res) ->
+ case Res of
+ 100 -> aborted;
+ 101 -> updated
+ end;
+
+verify_tabs([Tab | Rest], Nodes, Res) ->
+ [Coord, Part1, Part2 | _rest] = Nodes,
+ Read = fun() -> mnesia:read({Tab, 1}) end,
+ Exp = {atomic, [{Tab, 1, Res}]},
+ ?match(Exp, rpc:call(Coord, mnesia, transaction, [Read])),
+ ?match(Exp, rpc:call(Part1, mnesia, transaction, [Read])),
+ ?match(Exp, rpc:call(Part2, mnesia, transaction, [Read])),
+ verify_tabs(Rest, Nodes, Res).
+
+%% Gather TIDS and send them to requesting process and exit!
+garb_handler(List) ->
+ receive
+ {trans_id, ID} -> garb_handler([ID|List]);
+ {get_tids, Pid} -> Pid ! {tids, lists:reverse(List)}
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%
+receive_messages([], _File, _Line) -> [];
+receive_messages(ListOfMsgs, File, Line) ->
+ receive
+ {Pid, Msg} ->
+ case lists:member(Msg, ListOfMsgs) of
+ false ->
+ mnesia_test_lib:log("<>WARNING<>~n"
+ "Received unexpected msg~n ~p ~n"
+ "While waiting for ~p~n",
+ [{Pid, Msg}, ListOfMsgs], File, Line),
+ receive_messages(ListOfMsgs, File, Line);
+ true ->
+ ?dl("Got msg ~p from ~p ", [Msg, node(Pid)]),
+ [{Pid, Msg} | receive_messages(ListOfMsgs -- [Msg], File, Line)]
+ end;
+ Else -> mnesia_test_lib:log("<>WARNING<>~n"
+ "Recevied unexpected or bad formatted msg~n ~p ~n"
+ "While waiting for ~p~n",
+ [Else, ListOfMsgs], File, Line),
+ receive_messages(ListOfMsgs, File, Line)
+ after timer:minutes(2) ->
+ ?error("Timeout in receive msgs while waiting for ~p~n",
+ [ListOfMsgs])
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+after_full_disc_partition(doc) ->
+ ["Verify that the database does not get corrupt",
+ "when Mnesia encounters a full disc partition"].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% interrupted_fallback_start
+%% is implemented in consistency interupted_install_fallback!
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+after_corrupt_files(doc) ->
+ ["Verify that mnesia (and dets) can handle corrupt files"];
+after_corrupt_files(suite) -> % cope with unsynced disks
+ [after_corrupt_files_decision_log_head,
+ after_corrupt_files_decision_log_tail,
+ after_corrupt_files_latest_log_head,
+ after_corrupt_files_latest_log_tail,
+ after_corrupt_files_table_dat_head,
+ after_corrupt_files_table_dat_tail,
+ after_corrupt_files_schema_dat_head,
+ after_corrupt_files_schema_dat_tail
+ ].
+
+after_corrupt_files_decision_log_head(suite) -> [];
+after_corrupt_files_decision_log_head(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "DECISION.LOG", head, repair).
+
+after_corrupt_files_decision_log_tail(suite) -> [];
+after_corrupt_files_decision_log_tail(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "DECISION.LOG", tail, repair).
+
+after_corrupt_files_latest_log_head(suite) -> [];
+after_corrupt_files_latest_log_head(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "LATEST.LOG", head, repair).
+
+after_corrupt_files_latest_log_tail(suite) -> [];
+after_corrupt_files_latest_log_tail(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "LATEST.LOG", tail, repair).
+
+after_corrupt_files_table_dat_head(suite) -> [];
+after_corrupt_files_table_dat_head(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "rec_files.DAT", head, crash).
+
+after_corrupt_files_table_dat_tail(suite) -> [];
+after_corrupt_files_table_dat_tail(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "rec_files.DAT", tail, repair).
+
+after_corrupt_files_schema_dat_head(suite) -> [];
+after_corrupt_files_schema_dat_head(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "schema.DAT", head, crash).
+
+after_corrupt_files_schema_dat_tail(suite) -> [];
+after_corrupt_files_schema_dat_tail(Config) when is_list(Config) ->
+ after_corrupt_files(Config, "schema.DAT", tail, crash).
+
+
+
+%%% BUGBUG: We should also write testcase's for autorepair=false i.e.
+%%% not the standard case!
+after_corrupt_files(Config, File, Where, Behaviour) ->
+ [Node] = ?acquire_nodes(1, Config ++ [{tc_timeout, timer:minutes(2)}]),
+ Tab = rec_files,
+ Def = [{disc_only_copies, [Node]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab, Def)),
+ insert_data(Tab, 100),
+ Dir = mnesia:system_info(directory),
+ mnesia_test_lib:kill_mnesia([Node]),
+ timer:sleep(timer:seconds(10)), % Let dets finish whatever it does
+
+ DirFile = Dir ++ "/" ++ File,
+
+ {ok, Fd} = file:open(DirFile, read_write),
+ {ok, FileInfo} = file:read_file_info(DirFile),
+ case Where of
+ head ->
+ ?match({ok, _NewP}, file:position(Fd, {bof, 1})),
+ ?match(ok, file:write(Fd, [255, 255, 255, 255, 255, 255, 255, 255, 254])),
+ ok;
+ tail ->
+ Size = FileInfo#file_info.size,
+ Half = Size div 2,
+
+ ?dl(" Size = ~p Half = ~p ", [Size, Half]),
+ ?match({ok, _NewP}, file:position(Fd, {bof, Half})),
+ ?match(ok, file:truncate(Fd)),
+ ok
+ end,
+ ?match(ok, file:close(Fd)),
+
+ ?warning("++++++SOME OF THE after_corrupt* TEST CASES WILL INTENTIONALLY CRASH MNESIA+++++++~n", []),
+ Pid = spawn_link(?MODULE, mymnesia_start, [self()]),
+ receive
+ {Pid, ok} ->
+ ?match(ok, mnesia:wait_for_tables([schema, Tab], 10000)),
+ ?match(ok, verify_data(Tab, 100)),
+ case mnesia_monitor:get_env(auto_repair) of
+ false ->
+ ?error("Mnesia should have crashed in ~p ~p ~n",
+ [File, Where]);
+ true ->
+ ok
+ end,
+ ?verify_mnesia([Node], []);
+ {Pid, {error, ED}} ->
+ case {mnesia_monitor:get_env(auto_repair), Behaviour} of
+ {true, repair} ->
+ ?error("Mnesia crashed with ~p: in ~p ~p ~n",
+ [ED, File, Where]);
+ _ -> %% Every other can crash!
+ ok
+ end,
+ ?verify_mnesia([], [Node]);
+ Msg ->
+ ?error("~p ~p: Got ~p during start of Mnesia~n",
+ [File, Where, Msg])
+ end.
+
+mymnesia_start(Tester) ->
+ Res = mnesia:start(),
+ unlink(Tester),
+ Tester ! {self(), Res}.
+
+verify_data(_, 0) -> ok;
+verify_data(Tab, N) ->
+ Actual = mnesia:dirty_read({Tab, N}),
+ Expected = [{Tab, N, N}],
+ if
+ Expected == Actual ->
+ verify_data(Tab, N - 1);
+ true ->
+ mnesia:schema(Tab),
+ {not_equal, node(), Expected, Actual}
+ end.
+
+insert_data(_Tab, 0) -> ok;
+insert_data(Tab, N) ->
+ ok = mnesia:sync_dirty(fun() -> mnesia:write({Tab, N, N}) end),
+ insert_data(Tab, N-1).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+disc_less(doc) ->
+ ["Here is a simple test case of a simple recovery of a disc less node. "
+ "However a lot more test cases involving disc less nodes should "
+ "be written"];
+disc_less(suite) -> [];
+disc_less(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ case mnesia_test_lib:diskless(Config) of
+ true -> skip;
+ false ->
+ ?match({atomic, ok}, mnesia:change_table_copy_type(schema, Node3, ram_copies))
+ end,
+ Tab1 = disc_less1,
+ Tab2 = disc_less2,
+ Tab3 = disc_less3,
+ Def1 = [{ram_copies, [Node3]}, {disc_copies, [Node1, Node2]}],
+ Def2 = [{ram_copies, [Node3]}, {disc_copies, [Node1]}],
+ Def3 = [{ram_copies, [Node3]}, {disc_copies, [Node2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab1, Def1)),
+ ?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+ insert_data(Tab1, 100),
+ insert_data(Tab2, 100),
+ insert_data(Tab3, 100),
+
+ mnesia_test_lib:kill_mnesia([Node1, Node2]),
+ timer:sleep(500),
+ mnesia_test_lib:kill_mnesia([Node3]),
+ ?match(ok, rpc:call(Node1, mnesia, start, [])),
+ ?match(ok, rpc:call(Node2, mnesia, start, [])),
+
+ timer:sleep(500),
+ ?match(ok, rpc:call(Node3, mnesia, start, [[{extra_db_nodes, [Node1, Node2]}]])),
+ ?match(ok, rpc:call(Node3, mnesia, wait_for_tables, [[Tab1, Tab2, Tab3], 20000])),
+
+ ?match(ok, rpc:call(Node3, ?MODULE, verify_data, [Tab1, 100])),
+ ?match(ok, rpc:call(Node3, ?MODULE, verify_data, [Tab2, 100])),
+ ?match(ok, rpc:call(Node3, ?MODULE, verify_data, [Tab3, 100])),
+
+
+ ?match(ok, rpc:call(Node2, ?MODULE, verify_data, [Tab1, 100])),
+ ?match(ok, rpc:call(Node2, ?MODULE, verify_data, [Tab2, 100])),
+ ?match(ok, rpc:call(Node2, ?MODULE, verify_data, [Tab3, 100])),
+
+ ?match(ok, rpc:call(Node1, ?MODULE, verify_data, [Tab1, 100])),
+ ?match(ok, rpc:call(Node1, ?MODULE, verify_data, [Tab2, 100])),
+ ?match(ok, rpc:call(Node1, ?MODULE, verify_data, [Tab3, 100])),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+system_upgrade(doc) ->
+ ["Test on-line and off-line upgrade of the Mnesia application"].
+
+garb_decision(doc) ->
+ ["Test that decisions are garbed correctly."];
+garb_decision(suite) -> [];
+garb_decision(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ check_garb(Nodes),
+ ?match({atomic, ok},mnesia:create_table(a, [{disc_copies, Nodes}])),
+ check_garb(Nodes),
+ ?match({atomic, ok},mnesia:create_table(b, [{ram_copies, Nodes}])),
+ check_garb(Nodes),
+ ?match({atomic, ok},mnesia:create_table(c, [{ram_copies, [Node1, Node3]},
+ {disc_copies, [Node2]}])),
+ check_garb(Nodes),
+ ?match({atomic, ok},mnesia:create_table(d, [{disc_copies, [Node1, Node3]},
+ {ram_copies, [Node2]}])),
+ check_garb(Nodes),
+
+ W = fun(Tab) -> mnesia:write({Tab,1,1}) end,
+ A = fun(Tab) -> mnesia:write({Tab,1,1}), exit(1) end,
+
+ ?match({atomic, ok}, mnesia:transaction(W,[a])),
+ check_garb(Nodes),
+ ?match({atomic, ok}, mnesia:transaction(W,[b])),
+ check_garb(Nodes),
+ ?match({atomic, ok}, mnesia:transaction(W,[c])),
+ check_garb(Nodes),
+ ?match({atomic, ok}, mnesia:transaction(W,[d])),
+ check_garb(Nodes),
+ ?match({aborted,1}, mnesia:transaction(A,[a])),
+ check_garb(Nodes),
+ ?match({aborted,1}, mnesia:transaction(A,[b])),
+ check_garb(Nodes),
+ ?match({aborted,1}, mnesia:transaction(A,[c])),
+ check_garb(Nodes),
+ ?match({aborted,1}, mnesia:transaction(A,[d])),
+ check_garb(Nodes),
+
+ rpc:call(Node2, mnesia, lkill, []),
+ ?match({atomic, ok}, mnesia:transaction(W,[a])),
+ ?match({atomic, ok}, mnesia:transaction(W,[b])),
+ ?match({atomic, ok}, mnesia:transaction(W,[c])),
+ ?match({atomic, ok}, mnesia:transaction(W,[d])),
+ check_garb(Nodes),
+ ?match([], mnesia_test_lib:start_mnesia([Node2])),
+ check_garb(Nodes),
+ timer:sleep(2000),
+ check_garb(Nodes),
+ %%%%%% Check transient_decision logs %%%%%
+
+ ?match(dumped, mnesia:dump_log()), sys:get_status(mnesia_recover), % sync
+ [{atomic, ok} = mnesia:transaction(W,[a]) || _ <- lists:seq(1,30)],
+ ?match(dumped, mnesia:dump_log()), sys:get_status(mnesia_recover), % sync
+ TD0 = mnesia_lib:val(latest_transient_decision),
+ ?match(0, ets:info(TD0, size)),
+ {atomic, ok} = mnesia:transaction(W,[a]),
+ ?match(dumped, mnesia:dump_log()), sys:get_status(mnesia_recover), % sync
+ ?match(TD0, mnesia_lib:val(latest_transient_decision)),
+ [{atomic, ok} = mnesia:transaction(W,[a]) || _ <- lists:seq(1,30)],
+ ?match(dumped, mnesia:dump_log()), sys:get_status(mnesia_recover), % sync
+ ?match(false, TD0 =:= mnesia_lib:val(latest_transient_decision)),
+ ?match(true, lists:member(TD0, mnesia_lib:val(previous_transient_decisions))),
+ ?verify_mnesia(Nodes, []).
+
+check_garb(Nodes) ->
+ rpc:multicall(Nodes, sys, get_status, [mnesia_recover]),
+ ?match({_, []},rpc:multicall(Nodes, erlang, apply, [fun check_garb/0, []])).
+
+check_garb() ->
+ try
+ Ds = ets:tab2list(mnesia_decision),
+ Check = fun({trans_tid,serial, _}) -> false;
+ ({mnesia_down,_,_,_}) -> false;
+ (_Else) -> true
+ end,
+ Node = node(),
+ ?match({Node, []}, {node(), lists:filter(Check, Ds)})
+ catch _:_ -> ok
+ end,
+ ok.
diff --git a/lib/mnesia/test/mnesia_registry_test.erl b/lib/mnesia/test/mnesia_registry_test.erl
new file mode 100644
index 0000000000..2305ef93b7
--- /dev/null
+++ b/lib/mnesia/test/mnesia_registry_test.erl
@@ -0,0 +1,137 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_registry_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Test the mnesia_registry module"];
+all(suite) ->
+ [
+ good_dump,
+ bad_dump
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+good_dump(doc) ->
+ ["Dump a faked C-node registry"];
+good_dump(suite) -> [];
+good_dump(Config) when is_list(Config) ->
+ [Node] = Nodes = ?acquire_nodes(1, Config),
+ T1 = gordon,
+ ?match(ok, mnesia_registry:create_table(T1)),
+ One = {T1, 1, 0, integer, 0, 10},
+ Two = {T1, "two", 3, integer, 0, 20},
+ Three = {T1, 3, 0, string, 6, "thirty"},
+ ?match(ok, mnesia:dirty_write(One)),
+ ?match(ok, mnesia:dirty_write(Two)),
+ ?match(ok, mnesia:dirty_write(Three)),
+ ?match([One], mnesia:dirty_read({T1, 1})),
+ ?match([_ | _], dump_registry(Node, T1)),
+
+ NewOne = {T1, 1, 0, integer, 0, 1},
+ NewFour = {T1, "4", 1, string, 4, "four"},
+
+ ?match([NewOne], mnesia:dirty_read({T1, 1})),
+ ?match([Two], mnesia:dirty_read({T1, "two"})),
+ ?match([], mnesia:dirty_read({T1, 3})),
+ ?match([NewFour], mnesia:dirty_read({T1, "4"})),
+
+ T2 = blixt,
+ ?match({'EXIT', {aborted, {no_exists, _}}},
+ mnesia:dirty_read({T2, 1})),
+ ?match([_ |_], dump_registry(Node, T2)),
+
+ NewOne2 = setelement(1, NewOne, T2),
+ NewFour2 = setelement(1, NewFour, T2),
+
+ ?match([NewOne2], mnesia:dirty_read({T2, 1})),
+ ?match([], mnesia:dirty_read({T2, "two"})),
+ ?match([], mnesia:dirty_read({T2, 3})),
+ ?match([NewFour2], mnesia:dirty_read({T2, "4"})),
+ ?match([_One2, NewFour2], lists:sort(restore_registry(Node, T2))),
+
+ ?verify_mnesia(Nodes, []).
+
+dump_registry(Node, Tab) ->
+ case rpc:call(Node, mnesia_registry, start_dump, [Tab, self()]) of
+ Pid when is_pid(Pid) ->
+ Pid ! {write, 1, 0, integer, 0, 1},
+ Pid ! {delete, 3},
+ Pid ! {write, "4", 1, string, 4, "four"},
+ Pid ! {commit, self()},
+ receive
+ {ok, Pid} ->
+ [{Tab, "4", 1, string, 4, "four"},
+ {Tab, 1, 0, integer, 0, 1}];
+ {'EXIT', Pid, Reason} ->
+ exit(Reason)
+ end;
+ {badrpc, Reason} ->
+ exit(Reason)
+ end.
+
+restore_registry(Node, Tab) ->
+ case rpc:call(Node, mnesia_registry, start_restore, [Tab, self()]) of
+ {size, Pid, N, _LargestKeySize, _LargestValSize} ->
+ Pid ! {send_records, self()},
+ receive_records(Tab, N);
+ {badrpc, Reason} ->
+ exit(Reason)
+ end.
+
+receive_records(Tab, N) when N > 0 ->
+ receive
+ {restore, KeySize, ValSize, ValType, Key, Val} ->
+ [{Tab, Key, KeySize, ValType, ValSize, Val} | receive_records(Tab, N -1)];
+ {'EXIT', _Pid, Reason} ->
+ exit(Reason)
+ end;
+receive_records(_Tab, 0) ->
+ [].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+bad_dump(doc) ->
+ ["Intentionally fail with the dump of a faked C-node registry"];
+bad_dump(suite) -> [];
+bad_dump(Config) when is_list(Config) ->
+ [Node] = Nodes = ?acquire_nodes(1, Config),
+
+ OldTab = ming,
+ ?match({'EXIT', {aborted, _}}, mnesia_registry:start_restore(no_tab, self())),
+ ?match({atomic, ok}, mnesia:create_table(OldTab, [{attributes, [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q]}])),
+ ?match({'EXIT',{aborted,{bad_type,_}}}, dump_registry(Node, OldTab)),
+ ?match(stopped, mnesia:stop()),
+
+ ?match({'EXIT', {aborted, _}}, mnesia_registry:create_table(down_table)),
+ ?match({'EXIT', {aborted, _}}, mnesia_registry:start_restore(no_tab, self())),
+ ?match({'EXIT', {aborted, _}}, dump_registry(Node, down_dump)),
+
+ ?verify_mnesia([], Nodes).
+
diff --git a/lib/mnesia/test/mnesia_schema_recovery_test.erl b/lib/mnesia/test/mnesia_schema_recovery_test.erl
new file mode 100644
index 0000000000..387238ae6b
--- /dev/null
+++ b/lib/mnesia/test/mnesia_schema_recovery_test.erl
@@ -0,0 +1,787 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_schema_recovery_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(receive_messages(Msgs), receive_messages(Msgs, ?FILE, ?LINE)).
+
+% First Some debug logging
+-define(dgb, true).
+-ifdef(dgb).
+-define(dl(X, Y), ?verbose("**TRACING: " ++ X ++ "**~n", Y)).
+-else.
+-define(dl(X, Y), ok).
+-endif.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+all(doc) ->
+ ["Verify recoverabiliy of schema transactions.",
+ " Verify that a schema transaction",
+ " can be completed when it has been logged correctly and Mnesia",
+ " crashed before the log has been dumped. Then the transaction ",
+ " should be handled during the log dump at startup"
+ ];
+all(suite) ->
+ [interrupted_before_log_dump,
+ interrupted_after_log_dump].
+
+interrupted_before_log_dump(suite) ->
+ [interrupted_before_create_ram,
+ interrupted_before_create_disc,
+ interrupted_before_create_disc_only,
+ interrupted_before_create_nostore,
+ interrupted_before_delete_ram,
+ interrupted_before_delete_disc,
+ interrupted_before_delete_disc_only,
+ interrupted_before_add_ram,
+ interrupted_before_add_disc,
+ interrupted_before_add_disc_only,
+ interrupted_before_add_kill_copier,
+ interrupted_before_move_ram,
+ interrupted_before_move_disc,
+ interrupted_before_move_disc_only,
+ interrupted_before_move_kill_copier,
+ interrupted_before_delcopy_ram,
+ interrupted_before_delcopy_disc,
+ interrupted_before_delcopy_disc_only,
+ interrupted_before_delcopy_kill_copier,
+ interrupted_before_addindex_ram,
+ interrupted_before_addindex_disc,
+ interrupted_before_addindex_disc_only,
+ interrupted_before_delindex_ram,
+ interrupted_before_delindex_disc,
+ interrupted_before_delindex_disc_only,
+ interrupted_before_change_type_ram2disc,
+ interrupted_before_change_type_ram2disc_only,
+ interrupted_before_change_type_disc2ram,
+ interrupted_before_change_type_disc2disc_only,
+ interrupted_before_change_type_disc_only2ram,
+ interrupted_before_change_type_disc_only2disc,
+ interrupted_before_change_type_other_node,
+ interrupted_before_change_schema_type %% Change schema table copy type!!
+ ].
+
+interrupted_after_log_dump(suite) ->
+ [interrupted_after_create_ram,
+ interrupted_after_create_disc,
+ interrupted_after_create_disc_only,
+ interrupted_after_create_nostore,
+ interrupted_after_delete_ram,
+ interrupted_after_delete_disc,
+ interrupted_after_delete_disc_only,
+ interrupted_after_add_ram,
+ interrupted_after_add_disc,
+ interrupted_after_add_disc_only,
+ interrupted_after_add_kill_copier,
+ interrupted_after_move_ram,
+ interrupted_after_move_disc,
+ interrupted_after_move_disc_only,
+ interrupted_after_move_kill_copier,
+ interrupted_after_delcopy_ram,
+ interrupted_after_delcopy_disc,
+ interrupted_after_delcopy_disc_only,
+ interrupted_after_delcopy_kill_copier,
+ interrupted_after_addindex_ram,
+ interrupted_after_addindex_disc,
+ interrupted_after_addindex_disc_only,
+ interrupted_after_delindex_ram,
+ interrupted_after_delindex_disc,
+ interrupted_after_delindex_disc_only,
+ interrupted_after_change_type_ram2disc,
+ interrupted_after_change_type_ram2disc_only,
+ interrupted_after_change_type_disc2ram,
+ interrupted_after_change_type_disc2disc_only,
+ interrupted_after_change_type_disc_only2ram,
+ interrupted_after_change_type_disc_only2disc,
+ interrupted_after_change_type_other_node,
+ interrupted_after_change_schema_type %% Change schema table copy type!!
+
+% interrupted_before_change_access_mode,
+% interrupted_before_transform,
+% interrupted_before_restore,
+ ].
+
+interrupted_before_create_ram(suite) -> [];
+interrupted_before_create_ram(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, dump_schema_op},
+ interrupted_create(Config, ram_copies, all, KillAt).
+
+interrupted_before_create_disc(suite) -> [];
+interrupted_before_create_disc(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, dump_schema_op},
+ interrupted_create(Config, disc_copies, all, KillAt).
+
+interrupted_before_create_disc_only(suite) -> [];
+interrupted_before_create_disc_only(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, dump_schema_op},
+ interrupted_create(Config, disc_only_copies, all, KillAt).
+
+interrupted_before_create_nostore(suite) -> [];
+interrupted_before_create_nostore(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, dump_schema_op},
+ interrupted_create(Config, ram_copies, one, KillAt).
+
+interrupted_after_create_ram(suite) -> [];
+interrupted_after_create_ram(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, post_dump},
+ interrupted_create(Config, ram_copies, all, KillAt).
+
+interrupted_after_create_disc(suite) -> [];
+interrupted_after_create_disc(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, post_dump},
+ interrupted_create(Config, disc_copies, all, KillAt).
+
+interrupted_after_create_disc_only(suite) -> [];
+interrupted_after_create_disc_only(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, post_dump},
+ interrupted_create(Config, disc_only_copies, all, KillAt).
+
+interrupted_after_create_nostore(suite) -> [];
+interrupted_after_create_nostore(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, post_dump},
+ interrupted_create(Config, ram_copies, one, KillAt).
+
+%%% After dump don't need debug point
+interrupted_create(Config, Type, _Where, {mnesia_dumper, post_dump}) ->
+ [Node1] = Nodes = ?acquire_nodes(1, [{tc_timeout, timer:seconds(30)} | Config]),
+ ?match({atomic, ok},mnesia:create_table(itrpt, [{Type, Nodes}])),
+ ?match({atomic, ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [itrpt,test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ case Type of
+ ram_copies ->
+ ?match([], mnesia:dirty_read({itrpt, before}));
+ _ ->
+ ?match([{itrpt, before, 1}], mnesia:dirty_read({itrpt, before}))
+ end,
+ ?verify_mnesia(Nodes, []);
+interrupted_create(Config, Type, Where, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ {success, [A]} = ?start_activities([Node2]),
+ setup_dbgpoint(KillAt, Node2),
+
+ if %% CREATE TABLE
+ Where == all -> % tables on both nodes
+ A ! fun() -> mnesia:create_table(itrpt, [{Type, Nodes}]) end;
+ true -> % no table on the killed node
+ A ! fun() -> mnesia:create_table(itrpt, [{Type, [Node1]}]) end
+ end,
+
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Node2], [itrpt])),
+ %% Verify
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ verify_tab(Node1, Node2),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_delete_ram(suite) -> [];
+interrupted_before_delete_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delete(Config, ram_copies, Debug_Point).
+interrupted_before_delete_disc(suite) -> [];
+interrupted_before_delete_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delete(Config, disc_copies, Debug_Point).
+interrupted_before_delete_disc_only(suite) -> [];
+interrupted_before_delete_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delete(Config, disc_only_copies, Debug_Point).
+
+interrupted_after_delete_ram(suite) -> [];
+interrupted_after_delete_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delete(Config, ram_copies, Debug_Point).
+interrupted_after_delete_disc(suite) -> [];
+interrupted_after_delete_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delete(Config, disc_copies, Debug_Point).
+interrupted_after_delete_disc_only(suite) -> [];
+interrupted_after_delete_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delete(Config, disc_only_copies, Debug_Point).
+
+interrupted_delete(Config, Type, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node2]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+ {_Alive, Kill} = {Node1, Node2},
+ {success, [A]} = ?start_activities([Kill]),
+
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:delete_table(Tab) end,
+
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Node2], [])),
+ Bad = {badrpc, {'EXIT', {aborted,{no_exists, Tab, all}}}},
+ ?match(Bad, rpc:call(Node1, mnesia, table_info, [Tab, all])),
+ ?match(Bad, rpc:call(Node2, mnesia, table_info, [Tab, all])),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_add_ram(suite) -> [];
+interrupted_before_add_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_add(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_before_add_disc(suite) -> [];
+interrupted_before_add_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_add(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_before_add_disc_only(suite) -> [];
+interrupted_before_add_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_add(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_before_add_kill_copier(suite) -> [];
+interrupted_before_add_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_add(Config, ram_copies, kill_copier, Debug_Point).
+
+interrupted_after_add_ram(suite) -> [];
+interrupted_after_add_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_add(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_after_add_disc(suite) -> [];
+interrupted_after_add_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_add(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_after_add_disc_only(suite) -> [];
+interrupted_after_add_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_add(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_after_add_kill_copier(suite) -> [];
+interrupted_after_add_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_add(Config, ram_copies, kill_copier, Debug_Point).
+
+%%% After dump don't need debug point
+interrupted_add(Config, Type, _Where, {mnesia_dumper, post_dump}) ->
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node2]}, {local_content,true}])),
+ ?match({atomic, ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match({atomic, ok}, mnesia:add_table_copy(Tab, Node1, Type)),
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [itrpt,test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ case Type of
+ ram_copies ->
+ ?match([], mnesia:dirty_read({itrpt, before}));
+ _ ->
+ ?match([{itrpt, before, 1}], mnesia:dirty_read({itrpt, before}))
+ end,
+ ?verify_mnesia(Nodes, []);
+interrupted_add(Config, Type, Who, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ {_Alive, Kill} =
+ if Who == kill_reciever ->
+ {Node1, Node2};
+ true ->
+ {Node2, Node1}
+ end,
+ {success, [A]} = ?start_activities([Kill]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+
+ setup_dbgpoint(KillAt, Kill),
+
+ A ! fun() -> mnesia:add_table_copy(Tab, Node2, Type) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Kill], [itrpt])),
+ verify_tab(Node1, Node2),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_move_ram(suite) -> [];
+interrupted_before_move_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_move(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_before_move_disc(suite) -> [];
+interrupted_before_move_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_move(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_before_move_disc_only(suite) -> [];
+interrupted_before_move_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_move(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_before_move_kill_copier(suite) -> [];
+interrupted_before_move_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_move(Config, ram_copies, kill_copier, Debug_Point).
+
+interrupted_after_move_ram(suite) -> [];
+interrupted_after_move_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_move(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_after_move_disc(suite) -> [];
+interrupted_after_move_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_move(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_after_move_disc_only(suite) -> [];
+interrupted_after_move_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_move(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_after_move_kill_copier(suite) -> [];
+interrupted_after_move_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_move(Config, ram_copies, kill_copier, Debug_Point).
+
+%%% After dump don't need debug point
+interrupted_move(Config, Type, _Where, {mnesia_dumper, post_dump}) ->
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1]}])),
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ ?match({atomic, ok}, mnesia:move_table_copy(Tab, Node1, Node2)),
+ ?match(ok, mnesia:dirty_write({itrpt, aFter, 1})),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [itrpt,test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ ?match([{itrpt, before, 1}], mnesia:dirty_read({itrpt, before})),
+ ?match([{itrpt, aFter, 1}], mnesia:dirty_read({itrpt, aFter})),
+ ?verify_mnesia(Nodes, []);
+interrupted_move(Config, Type, Who, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+
+ {_Alive, Kill} =
+ if Who == kill_reciever ->
+ if Type == ram_copies ->
+ {atomic, ok} = mnesia:dump_tables([Tab]);
+ true ->
+ ignore
+ end,
+ {Node1, Node2};
+ true ->
+ {Node2, Node1}
+ end,
+
+ {success, [A]} = ?start_activities([Kill]),
+
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:move_table_copy(Tab, Node1, Node2) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Kill], [itrpt])),
+ verify_tab(Node1, Node2),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_delcopy_ram(suite) -> [];
+interrupted_before_delcopy_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delcopy(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_before_delcopy_disc(suite) -> [];
+interrupted_before_delcopy_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delcopy(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_before_delcopy_disc_only(suite) -> [];
+interrupted_before_delcopy_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delcopy(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_before_delcopy_kill_copier(suite) -> [];
+interrupted_before_delcopy_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delcopy(Config, ram_copies, kill_copier, Debug_Point).
+
+interrupted_after_delcopy_ram(suite) -> [];
+interrupted_after_delcopy_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delcopy(Config, ram_copies, kill_reciever, Debug_Point).
+interrupted_after_delcopy_disc(suite) -> [];
+interrupted_after_delcopy_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delcopy(Config, disc_copies, kill_reciever, Debug_Point).
+interrupted_after_delcopy_disc_only(suite) -> [];
+interrupted_after_delcopy_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delcopy(Config, disc_only_copies, kill_reciever, Debug_Point).
+interrupted_after_delcopy_kill_copier(suite) -> [];
+interrupted_after_delcopy_kill_copier(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delcopy(Config, ram_copies, kill_copier, Debug_Point).
+
+
+%%% After dump don't need debug point
+interrupted_delcopy(Config, Type, _Where, {mnesia_dumper, post_dump}) ->
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1,Node2]}])),
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, Node1)),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ ?match([Node2], mnesia:table_info(itrpt,Type)),
+ ?verify_mnesia(Nodes, []);
+interrupted_delcopy(Config, Type, Who, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes =
+ ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1, Node2]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+
+ {_Alive, Kill} =
+ if Who == kill_reciever ->
+ {Node1, Node2};
+ true ->
+ if
+ Type == ram_copies ->
+ {atomic, ok} = mnesia:dump_tables([Tab]);
+ true ->
+ ignore
+ end,
+ {Node2, Node1}
+ end,
+
+ {success, [A]} = ?start_activities([Kill]),
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:del_table_copy(Tab, Node2) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Kill], [itrpt])),
+ verify_tab(Node1, Node2),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_addindex_ram(suite) -> [];
+interrupted_before_addindex_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_addindex(Config, ram_copies, Debug_Point).
+interrupted_before_addindex_disc(suite) -> [];
+interrupted_before_addindex_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_addindex(Config, disc_copies, Debug_Point).
+interrupted_before_addindex_disc_only(suite) -> [];
+interrupted_before_addindex_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_addindex(Config, disc_only_copies, Debug_Point).
+
+interrupted_after_addindex_ram(suite) -> [];
+interrupted_after_addindex_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_addindex(Config, ram_copies, Debug_Point).
+interrupted_after_addindex_disc(suite) -> [];
+interrupted_after_addindex_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_addindex(Config, disc_copies, Debug_Point).
+interrupted_after_addindex_disc_only(suite) -> [];
+interrupted_after_addindex_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_addindex(Config, disc_only_copies, Debug_Point).
+
+
+%%% After dump don't need debug point
+interrupted_addindex(Config, Type, {mnesia_dumper, post_dump}) ->
+ [Node1] = Nodes = ?acquire_nodes(1, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic,ok},mnesia:create_table(Tab, [{Type, Nodes}])),
+ ?match({atomic,ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match({atomic,ok}, mnesia:add_table_index(Tab, val)),
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [itrpt,test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ case Type of
+ ram_copies ->
+ ?match([], mnesia:dirty_index_read(itrpt, 1, val));
+ _ ->
+ ?match([{itrpt, before, 1}], mnesia:dirty_index_read(itrpt, 1, val))
+ end,
+ ?verify_mnesia(Nodes, []);
+interrupted_addindex(Config, Type, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{Type, [Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+ {_Alive, Kill} = {Node1, Node2},
+ {success, [A]} = ?start_activities([Kill]),
+
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:add_table_index(Tab, val) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Node2], [])),
+
+ verify_tab(Node1, Node2),
+ ?match([{Tab, b, a}, {Tab, a, a}],
+ rpc:call(Node1, mnesia, dirty_index_read, [itrpt, a, val])),
+ ?match([{Tab, b, a}, {Tab, a, a}],
+ rpc:call(Node2, mnesia, dirty_index_read, [itrpt, a, val])),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_delindex_ram(suite) -> [];
+interrupted_before_delindex_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delindex(Config, ram_copies, Debug_Point).
+interrupted_before_delindex_disc(suite) -> [];
+interrupted_before_delindex_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delindex(Config, disc_copies, Debug_Point).
+interrupted_before_delindex_disc_only(suite) -> [];
+interrupted_before_delindex_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_delindex(Config, disc_only_copies, Debug_Point).
+
+interrupted_after_delindex_ram(suite) -> [];
+interrupted_after_delindex_ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delindex(Config, ram_copies, Debug_Point).
+interrupted_after_delindex_disc(suite) -> [];
+interrupted_after_delindex_disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delindex(Config, disc_copies, Debug_Point).
+interrupted_after_delindex_disc_only(suite) -> [];
+interrupted_after_delindex_disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_delindex(Config, disc_only_copies, Debug_Point).
+
+%%% After dump don't need debug point
+interrupted_delindex(Config, Type, {mnesia_dumper, post_dump}) ->
+ [Node1] = Nodes = ?acquire_nodes(1, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic,ok},mnesia:create_table(Tab, [{Type, Nodes},{index,[val]}])),
+ ?match({atomic,ok},mnesia:create_table(test, [{disc_copies,[Node1]}])),
+ ?match({atomic,ok}, mnesia:del_table_index(Tab, val)),
+ ?match(ok, mnesia:dirty_write({itrpt, before, 1})),
+ ?match(ok, mnesia:dirty_write({test, found_in_log, 1})),
+ ?match(stopped, mnesia:stop()),
+ ?match([], mnesia_test_lib:start_mnesia([Node1], [itrpt,test])),
+ %% Verify
+ ?match([{test, found_in_log, 1}], mnesia:dirty_read({test, found_in_log})),
+ ?match({'EXIT',{aborted,{badarg,_}}}, mnesia:dirty_index_read(itrpt, 1, val)),
+ ?verify_mnesia(Nodes, []);
+
+interrupted_delindex(Config, Type, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{index, [val]},
+ {Type, [Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+ {_Alive, Kill} = {Node1, Node2},
+ {success, [A]} = ?start_activities([Kill]),
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:del_table_index(Tab, val) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia([Node2], [])),
+ verify_tab(Node1, Node2),
+ ?match({badrpc, _}, rpc:call(Node1, mnesia, dirty_index_read, [itrpt, a, val])),
+ ?match({badrpc, _}, rpc:call(Node2, mnesia, dirty_index_read, [itrpt, a, val])),
+ ?match([], rpc:call(Node1, mnesia, table_info, [Tab, index])),
+ ?match([], rpc:call(Node2, mnesia, table_info, [Tab, index])),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_change_type_ram2disc(suite) -> [];
+interrupted_before_change_type_ram2disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, ram_copies, disc_copies, changer, Debug_Point).
+interrupted_before_change_type_ram2disc_only(suite) -> [];
+interrupted_before_change_type_ram2disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, ram_copies, disc_only_copies, changer, Debug_Point).
+interrupted_before_change_type_disc2ram(suite) -> [];
+interrupted_before_change_type_disc2ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, disc_copies, ram_copies, changer, Debug_Point).
+interrupted_before_change_type_disc2disc_only(suite) -> [];
+interrupted_before_change_type_disc2disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, disc_copies, disc_only_copies, changer, Debug_Point).
+interrupted_before_change_type_disc_only2ram(suite) -> [];
+interrupted_before_change_type_disc_only2ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, disc_only_copies, ram_copies, changer, Debug_Point).
+interrupted_before_change_type_disc_only2disc(suite) -> [];
+interrupted_before_change_type_disc_only2disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, disc_only_copies, disc_copies, changer, Debug_Point).
+interrupted_before_change_type_other_node(suite) -> [];
+interrupted_before_change_type_other_node(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, dump_schema_op},
+ interrupted_change_type(Config, ram_copies, disc_copies, the_other_one, Debug_Point).
+
+interrupted_after_change_type_ram2disc(suite) -> [];
+interrupted_after_change_type_ram2disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, ram_copies, disc_copies, changer, Debug_Point).
+interrupted_after_change_type_ram2disc_only(suite) -> [];
+interrupted_after_change_type_ram2disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, ram_copies, disc_only_copies, changer, Debug_Point).
+interrupted_after_change_type_disc2ram(suite) -> [];
+interrupted_after_change_type_disc2ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, disc_copies, ram_copies, changer, Debug_Point).
+interrupted_after_change_type_disc2disc_only(suite) -> [];
+interrupted_after_change_type_disc2disc_only(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, disc_copies, disc_only_copies, changer, Debug_Point).
+interrupted_after_change_type_disc_only2ram(suite) -> [];
+interrupted_after_change_type_disc_only2ram(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, disc_only_copies, ram_copies, changer, Debug_Point).
+interrupted_after_change_type_disc_only2disc(suite) -> [];
+interrupted_after_change_type_disc_only2disc(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, disc_only_copies, disc_copies, changer, Debug_Point).
+interrupted_after_change_type_other_node(suite) -> [];
+interrupted_after_change_type_other_node(Config) when is_list(Config) ->
+ Debug_Point = {mnesia_dumper, post_dump},
+ interrupted_change_type(Config, ram_copies, disc_copies, the_other_one, Debug_Point).
+
+interrupted_change_type(Config, FromType, ToType, Who, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{FromType, [Node2, Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+
+ {_Alive, Kill} =
+ if Who == changer -> {Node1, Node2};
+ true -> {Node2, Node1}
+ end,
+
+ {success, [A]} = ?start_activities([Kill]),
+ setup_dbgpoint(KillAt, Kill),
+ A ! fun() -> mnesia:change_table_copy_type(Tab, Node2, ToType) end,
+ kill_at_debug(),
+ ?match([], mnesia_test_lib:start_mnesia(Nodes, [itrpt])),
+ verify_tab(Node1, Node2),
+ ?match(FromType, rpc:call(Node1, mnesia, table_info, [Tab, storage_type])),
+ ?match(ToType, rpc:call(Node2, mnesia, table_info, [Tab, storage_type])),
+ ?verify_mnesia(Nodes, []).
+
+interrupted_before_change_schema_type(suite) -> [];
+interrupted_before_change_schema_type(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, dump_schema_op},
+ interrupted_change_schema_type(Config, KillAt).
+
+interrupted_after_change_schema_type(suite) -> [];
+interrupted_after_change_schema_type(Config) when is_list(Config) ->
+ KillAt = {mnesia_dumper, post_dump},
+ interrupted_change_schema_type(Config, KillAt).
+
+-define(cleanup(N, Config),
+ mnesia_test_lib:prepare_test_case([{reload_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+
+interrupted_change_schema_type(Config, KillAt) ->
+ ?is_debug_compiled,
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, [{tc_timeout, timer:seconds(30)} | Config]),
+
+ Tab = itrpt,
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{ram_copies, [Node2, Node1]}])),
+ ?match(ok, mnesia:dirty_write({Tab, before, 1})),
+
+ {success, [A]} = ?start_activities([Node2]),
+ setup_dbgpoint(KillAt, Node2),
+
+ A ! fun() -> mnesia:change_table_copy_type(schema, Node2, ram_copies) end,
+ kill_at_debug(),
+ ?match(ok, rpc:call(Node2, mnesia, start, [[{extra_db_nodes, [Node1, Node2]}]])),
+ ?match(ok, rpc:call(Node2, mnesia, wait_for_tables, [[itrpt, schema], 2000])),
+ ?match(disc_copies, rpc:call(Node1, mnesia, table_info, [schema, storage_type])),
+ ?match(ram_copies, rpc:call(Node2, mnesia, table_info, [schema, storage_type])),
+
+ %% Go back to disc_copies !!
+ {success, [B]} = ?start_activities([Node2]),
+ setup_dbgpoint(KillAt, Node2),
+ B ! fun() -> mnesia:change_table_copy_type(schema, Node2, disc_copies) end,
+ kill_at_debug(),
+
+ ?match(ok, rpc:call(Node2, mnesia, start, [[{extra_db_nodes, [Node1, Node2]}]])),
+ ?match(ok, rpc:call(Node2, mnesia, wait_for_tables, [[itrpt, schema], 2000])),
+ ?match(disc_copies, rpc:call(Node1, mnesia, table_info, [schema, storage_type])),
+ ?match(disc_copies, rpc:call(Node2, mnesia, table_info, [schema, storage_type])),
+
+ ?verify_mnesia(Nodes, []),
+ ?cleanup(2, Config).
+
+%%% Helpers
+verify_tab(Node1, Node2) ->
+ ?match({atomic, ok},
+ rpc:call(Node1, mnesia, transaction, [fun() -> mnesia:dirty_write({itrpt, a, a}) end])),
+ ?match({atomic, ok},
+ rpc:call(Node2, mnesia, transaction, [fun() -> mnesia:dirty_write({itrpt, b, a}) end])),
+ ?match([{itrpt,a,a}], rpc:call(Node1, mnesia, dirty_read, [{itrpt, a}])),
+ ?match([{itrpt,a,a}], rpc:call(Node2, mnesia, dirty_read, [{itrpt, a}])),
+ ?match([{itrpt,b,a}], rpc:call(Node1, mnesia, dirty_read, [{itrpt, b}])),
+ ?match([{itrpt,b,a}], rpc:call(Node2, mnesia, dirty_read, [{itrpt, b}])),
+ ?match([{itrpt,before,1}], rpc:call(Node1, mnesia, dirty_read, [{itrpt, before}])),
+ ?match([{itrpt,before,1}], rpc:call(Node2, mnesia, dirty_read, [{itrpt, before}])).
+
+setup_dbgpoint(DbgPoint, Where) ->
+ Self = self(),
+ TestFun = fun(_, [InitBy]) ->
+ case InitBy of
+ schema_prepare ->
+ ignore;
+ schema_begin ->
+ ignore;
+ _Other ->
+ ?deactivate_debug_fun(DbgPoint),
+ unlink(Self),
+ Self ! {fun_done, node()},
+ timer:sleep(infinity)
+ end
+ end,
+ %% Kill when debug has been reached
+ ?remote_activate_debug_fun(Where, DbgPoint, TestFun, []).
+
+kill_at_debug() ->
+ %% Wait till it's killed
+ receive
+ {fun_done, Node} ->
+ ?match([], mnesia_test_lib:kill_mnesia([Node]))
+ after
+ timer:minutes(1) -> ?error("Timeout in kill_at_debug", [])
+ end.
+
diff --git a/lib/mnesia/test/mnesia_test_lib.erl b/lib/mnesia/test/mnesia_test_lib.erl
new file mode 100644
index 0000000000..1e98f017f7
--- /dev/null
+++ b/lib/mnesia/test/mnesia_test_lib.erl
@@ -0,0 +1,1058 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%%% Author: Hakan Mattsson [email protected]
+%%% Purpose: Test case support library
+%%%
+%%% This test suite may be run as a part of the Grand Test Suite
+%%% of Erlang. The Mnesia test suite is structured in a hierarchy.
+%%% Each test case is implemented as an exported function with arity 1.
+%%% Test case identifiers must have the following syntax: {Module, Function}.
+%%%
+%%% The driver of the test suite runs in two passes as follows:
+%%% first the test case function is invoked with the atom 'suite' as
+%%% single argument. The returned value is treated as a list of sub
+%%% test cases. If the list of sub test cases is [] the test case
+%%% function is invoked again, this time with a list of nodes as
+%%% argument. If the list of sub test cases is not empty, the test
+%%% case driver applies the algorithm recursively on each element
+%%% in the list.
+%%%
+%%% All test cases are written in such a manner
+%%% that they start to invoke ?acquire_nodes(X, Config)
+%%% in order to prepare the test case execution. When that is
+%%% done, the test machinery ensures that at least X number
+%%% of nodes are connected to each other. If too few nodes was
+%%% specified in the Config, the test case is skipped. If there
+%%% was enough node names in the Config, X of them are selected
+%%% and if some of them happens to be down they are restarted
+%%% via the slave module. When all nodes are up and running a
+%%% disk resident schema is created on all nodes and Mnesia is
+%%% started a on all nodes. This means that all test cases may
+%%% assume that Mnesia is up and running on all acquired nodes.
+%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%
+%%% doc(TestCases)
+%%%
+%%% Generates a test spec from parts of the test case structure
+%%%
+%%% struct(TestCases)
+%%%
+%%% Prints out the test case structure
+%%%
+%%% test(TestCases)
+%%%
+%%% Run parts of the test suite. Uses test/2.
+%%% Reads Config from mnesia_test.config and starts them if neccessary.
+%%% Kills Mnesia and wipes out the Mnesia directories as a starter.
+%%%
+%%% test(TestCases, Config)
+%%%
+%%% Run parts of the test suite on the given Nodes,
+%%% assuming that the nodes are up and running.
+%%% Kills Mnesia and wipes out the Mnesia directories as a starter.
+%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-module(mnesia_test_lib).
+-author('[email protected]').
+-export([
+ log/2,
+ log/4,
+ verbose/4,
+ default_config/0,
+ diskless/1,
+ eval_test_case/3,
+ test_driver/2,
+ test_case_evaluator/3,
+ activity_evaluator/1,
+ flush/0,
+ pick_msg/0,
+ start_activities/1,
+ start_transactions/1,
+ start_transactions/2,
+ start_sync_transactions/1,
+ start_sync_transactions/2,
+ sync_trans_tid_serial/1,
+ prepare_test_case/5,
+ select_nodes/4,
+ init_nodes/3,
+ error/4,
+ slave_start_link/0,
+ slave_start_link/1,
+ slave_sup/0,
+
+ start_mnesia/1,
+ start_mnesia/2,
+ start_appls/2,
+ start_appls/3,
+ start_wait/2,
+ storage_type/2,
+ stop_mnesia/1,
+ stop_appls/2,
+ sort/1,
+ kill_mnesia/1,
+ kill_appls/2,
+ verify_mnesia/4,
+ shutdown/0,
+ verify_replica_location/5,
+ lookup_config/2,
+ sync_tables/2,
+ remote_start/3,
+ remote_stop/1,
+ remote_kill/1,
+
+ reload_appls/2,
+
+ remote_activate_debug_fun/6,
+ do_remote_activate_debug_fun/6,
+
+ test/1,
+ test/2,
+ doc/1,
+ struct/1,
+ init_per_testcase/2,
+ fin_per_testcase/2,
+ kill_tc/2
+ ]).
+
+-include("mnesia_test_lib.hrl").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% included for test server compatibility
+%% assume that all test cases only takes Config as sole argument
+init_per_testcase(_Func, Config) ->
+ global:register_name(mnesia_global_logger, group_leader()),
+ Config.
+
+fin_per_testcase(_Func, Config) ->
+ global:unregister_name(mnesia_global_logger),
+ %% Nodes = select_nodes(all, Config, ?FILE, ?LINE),
+ %% rpc:multicall(Nodes, mnesia, lkill, []),
+ Config.
+
+%% Use ?log(Format, Args) as wrapper
+log(Format, Args, LongFile, Line) ->
+ File = filename:basename(LongFile),
+ Format2 = lists:concat([File, "(", Line, ")", ": ", Format]),
+ log(Format2, Args).
+
+log(Format, Args) ->
+ case global:whereis_name(mnesia_global_logger) of
+ undefined ->
+ io:format(user, Format, Args);
+ Pid ->
+ io:format(Pid, Format, Args)
+ end.
+
+verbose(Format, Args, File, Line) ->
+ Arg = mnesia_test_verbose,
+ case get(Arg) of
+ false ->
+ ok;
+ true ->
+ log(Format, Args, File, Line);
+ undefined ->
+ case init:get_argument(Arg) of
+ {ok, List} when is_list(List) ->
+ case lists:last(List) of
+ ["true"] ->
+ put(Arg, true),
+ log(Format, Args, File, Line);
+ _ ->
+ put(Arg, false),
+ ok
+ end;
+ _ ->
+ put(Arg, false),
+ ok
+ end
+ end.
+
+-record('REASON', {file, line, desc}).
+
+error(Format, Args, File, Line) ->
+ global:send(mnesia_global_logger, {failed, File, Line}),
+ Fail = #'REASON'{file = filename:basename(File),
+ line = Line,
+ desc = Args},
+ case global:whereis_name(mnesia_test_case_sup) of
+ undefined ->
+ ignore;
+ Pid ->
+ Pid ! Fail
+%% global:send(mnesia_test_case_sup, Fail),
+ end,
+ log("<>ERROR<>~n" ++ Format, Args, File, Line).
+
+storage_type(Default, Config) ->
+ case diskless(Config) of
+ true ->
+ ram_copies;
+ false ->
+ Default
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+default_config() ->
+ [{nodes, default_nodes()}].
+
+default_nodes() ->
+ mk_nodes(3, []).
+
+mk_nodes(0, Nodes) ->
+ Nodes;
+mk_nodes(N, []) ->
+ mk_nodes(N - 1, [node()]);
+mk_nodes(N, Nodes) when N > 0 ->
+ Head = hd(Nodes),
+ [Name, Host] = node_to_name_and_host(Head),
+ Nodes ++ [mk_node(I, Name, Host) || I <- lists:seq(1, N)].
+
+mk_node(N, Name, Host) ->
+ list_to_atom(lists:concat([Name ++ integer_to_list(N) ++ "@" ++ Host])).
+
+slave_start_link() ->
+ slave_start_link(node()).
+
+slave_start_link(Node) ->
+ [Local, Host] = node_to_name_and_host(Node),
+ {Mega, Sec, Micro} = erlang:now(),
+ List = [Local, "_", Mega, "_", Sec, "_", Micro],
+ Name = list_to_atom(lists:concat(List)),
+ slave_start_link(list_to_atom(Host), Name).
+
+slave_start_link(Host, Name) ->
+ slave_start_link(Host, Name, 10).
+
+slave_start_link(Host, Name, Retries) ->
+ Debug = atom_to_list(mnesia:system_info(debug)),
+ Args = "-mnesia debug " ++ Debug ++
+ " -pa " ++
+ filename:dirname(code:which(?MODULE)) ++
+ " -pa " ++
+ filename:dirname(code:which(mnesia)),
+ case starter(Host, Name, Args) of
+ {ok, NewNode} ->
+ ?match(pong, net_adm:ping(NewNode)),
+ {ok, Cwd} = file:get_cwd(),
+ Path = code:get_path(),
+ ok = rpc:call(NewNode, file, set_cwd, [Cwd]),
+ true = rpc:call(NewNode, code, set_path, [Path]),
+ spawn_link(NewNode, ?MODULE, slave_sup, []),
+ rpc:multicall([node() | nodes()], global, sync, []),
+ {ok, NewNode};
+ {error, Reason} when Retries == 0->
+ {error, Reason};
+ {error, Reason} ->
+ io:format("Could not start slavenode ~p ~p retrying~n",
+ [{Host, Name, Args}, Reason]),
+ timer:sleep(500),
+ slave_start_link(Host, Name, Retries - 1)
+ end.
+
+starter(Host, Name, Args) ->
+ case os:type() of
+ vxworks ->
+ X = test_server:start_node(Name, slave, [{args,Args}]),
+ timer:sleep(5000),
+ X;
+ _ ->
+ slave:start(Host, Name, Args)
+ end.
+
+slave_sup() ->
+ process_flag(trap_exit, true),
+ receive
+ {'EXIT', _, _} ->
+ case os:type() of
+ vxworks ->
+ erlang:halt();
+ _ ->
+ ignore
+ end
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Index the test case structure
+
+doc(TestCases) when is_list(TestCases) ->
+ test(TestCases, suite),
+ SuiteFname = "index.html",
+ io:format("Generating HTML test specification to file: ~s~n",
+ [SuiteFname]),
+ {ok, Fd} = file:open(SuiteFname, [write]),
+ io:format(Fd, "<TITLE>Test specification for ~p</TITLE>.~n", [TestCases]),
+ io:format(Fd, "<H1>Test specification for ~p</H1>~n", [TestCases]),
+ io:format(Fd, "Test cases which not are implemented yet are written in <B>bold face</B>.~n~n", []),
+
+ io:format(Fd, "<BR><BR>~n", []),
+ io:format(Fd, "~n<DL>~n", []),
+ do_doc(Fd, TestCases, []),
+ io:format(Fd, "</DL>~n", []),
+ file:close(Fd);
+doc(TestCases) ->
+ doc([TestCases]).
+
+do_doc(Fd, [H | T], List) ->
+ case H of
+ {Module, TestCase} when is_atom(Module), is_atom(TestCase) ->
+ do_doc(Fd, Module, TestCase, List);
+ TestCase when is_atom(TestCase), List == [] ->
+ do_doc(Fd, mnesia_SUITE, TestCase, List);
+ TestCase when is_atom(TestCase) ->
+ do_doc(Fd, hd(List), TestCase, List)
+ end,
+ do_doc(Fd, T, List);
+do_doc(_, [], _) ->
+ ok.
+
+do_doc(Fd, Module, TestCase, List) ->
+ case get_suite(Module, TestCase) of
+ [] ->
+ %% Implemented leaf test case
+ Head = ?flat_format("<A HREF=~p.html#~p_1>{~p, ~p}</A>}",
+ [Module, TestCase, Module, TestCase]),
+ print_doc(Fd, Module, TestCase, Head);
+ Suite when is_list(Suite) ->
+ %% Test suite
+ Head = ?flat_format("{~p, ~p}", [Module, TestCase]),
+ print_doc(Fd, Module, TestCase, Head),
+ io:format(Fd, "~n<DL>~n", []),
+ do_doc(Fd, Suite, [Module | List]),
+ io:format(Fd, "</DL>~n", []);
+ 'NYI' ->
+ %% Not yet implemented
+ Head = ?flat_format("<B>{~p, ~p}</B>", [Module, TestCase]),
+ print_doc(Fd, Module, TestCase, Head)
+ end.
+
+print_doc(Fd, Mod, Fun, Head) ->
+ case catch (apply(Mod, Fun, [doc])) of
+ {'EXIT', _} ->
+ io:format(Fd, "<DT>~s</DT>~n", [Head]);
+ Doc when is_list(Doc) ->
+ io:format(Fd, "<DT><U>~s</U><BR><DD>~n", [Head]),
+ print_rows(Fd, Doc),
+ io:format(Fd, "</DD><BR><BR>~n", [])
+ end.
+
+print_rows(_Fd, []) ->
+ ok;
+print_rows(Fd, [H | T]) when is_list(H) ->
+ io:format(Fd, "~s~n", [H]),
+ print_rows(Fd, T);
+print_rows(Fd, [H | T]) when is_integer(H) ->
+ io:format(Fd, "~s~n", [[H | T]]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Show the test case structure
+
+struct(TestCases) ->
+ T = test(TestCases, suite),
+ struct(T, "").
+
+struct({Module, TestCase}, Indentation)
+ when is_atom(Module), is_atom(TestCase) ->
+ log("~s{~p, ~p} ...~n", [Indentation, Module, TestCase]);
+struct({Module, TestCase, Other}, Indentation)
+ when is_atom(Module), is_atom(TestCase) ->
+ log("~s{~p, ~p} ~p~n", [Indentation, Module, TestCase, Other]);
+struct([], _) ->
+ ok;
+struct([TestCase | TestCases], Indentation) ->
+ struct(TestCase, Indentation),
+ struct(TestCases, Indentation);
+struct({TestCase, []}, Indentation) ->
+ struct(TestCase, Indentation);
+struct({TestCase, SubTestCases}, Indentation) when is_list(SubTestCases) ->
+ struct(TestCase, Indentation),
+ struct(SubTestCases, Indentation ++ " ").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Execute the test cases
+
+test(TestCases) ->
+ test(TestCases, []).
+
+test(TestCases, suite) when is_list(TestCases) ->
+ test_driver(TestCases, suite);
+test(TestCases, Config) when is_list(TestCases) ->
+ D1 = lists:duplicate(10, $=),
+ D2 = lists:duplicate(10, $ ),
+ log("~n~s TEST CASES: ~p~n ~sCONFIG: ~p~n~n", [D1, TestCases, D2, Config]),
+ test_driver(TestCases, Config);
+test(TestCase, Config) ->
+ test([TestCase], Config).
+
+test_driver([], _Config) ->
+ [];
+test_driver([T|TestCases], Config) ->
+ L1 = test_driver(T, Config),
+ L2 = test_driver(TestCases, Config),
+ [L1|L2];
+test_driver({Module, TestCases}, Config) when is_list(TestCases)->
+ test_driver(default_module(Module, TestCases), Config);
+test_driver({_, {Module, TestCase}}, Config) ->
+ test_driver({Module, TestCase}, Config);
+test_driver({Module, TestCase}, Config) ->
+ Sec = timer:seconds(1) * 1000,
+ case get_suite(Module, TestCase) of
+ [] when Config == suite ->
+ {Module, TestCase, 'IMPL'};
+ [] ->
+ log("Eval test case: ~w~n", [{Module, TestCase}]),
+ {T, Res} =
+ timer:tc(?MODULE, eval_test_case, [Module, TestCase, Config]),
+ log("Tested ~w in ~w sec~n", [TestCase, T div Sec]),
+ {T div Sec, Res};
+ Suite when is_list(Suite), Config == suite ->
+ Res = test_driver(default_module(Module, Suite), Config),
+ {{Module, TestCase}, Res};
+ Suite when is_list(Suite) ->
+ log("Expand test case ~w~n", [{Module, TestCase}]),
+ Def = default_module(Module, Suite),
+ {T, Res} = timer:tc(?MODULE, test_driver, [Def, Config]),
+ {T div Sec, {{Module, TestCase}, Res}};
+ 'NYI' when Config == suite ->
+ {Module, TestCase, 'NYI'};
+ 'NYI' ->
+ log("<WARNING> Test case ~w NYI~n", [{Module, TestCase}]),
+ {0, {skip, {Module, TestCase}, "NYI"}}
+ end;
+test_driver(TestCase, Config) ->
+ DefaultModule = mnesia_SUITE,
+ log("<>WARNING<> Missing module in test case identifier. "
+ "{~w, ~w} assumed~n", [DefaultModule, TestCase]),
+ test_driver({DefaultModule, TestCase}, Config).
+
+default_module(DefaultModule, TestCases) when is_list(TestCases) ->
+ Fun = fun(T) ->
+ case T of
+ {_, _} -> true;
+ T -> {true, {DefaultModule, T}}
+ end
+ end,
+ lists:zf(Fun, TestCases).
+
+%% Returns a list (possibly empty) or the atom 'NYI'
+get_suite(Mod, Fun) ->
+ case catch (apply(Mod, Fun, [suite])) of
+ {'EXIT', _} -> 'NYI';
+ List when is_list(List) -> List
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+eval_test_case(Mod, Fun, Config) ->
+ flush(),
+ global:register_name(mnesia_test_case_sup, self()),
+ Flag = process_flag(trap_exit, true),
+ Pid = spawn_link(?MODULE, test_case_evaluator, [Mod, Fun, [Config]]),
+ R = wait_for_evaluator(Pid, Mod, Fun, Config),
+ global:unregister_name(mnesia_test_case_sup),
+ process_flag(trap_exit, Flag),
+ R.
+
+flush() ->
+ receive Msg -> [Msg | flush()]
+ after 0 -> []
+ end.
+
+wait_for_evaluator(Pid, Mod, Fun, Config) ->
+ receive
+ {'EXIT', Pid, {test_case_ok, _PidRes}} ->
+ Errors = flush(),
+ Res =
+ case Errors of
+ [] -> ok;
+ Errors -> failed
+ end,
+ {Res, {Mod, Fun}, Errors};
+ {'EXIT', Pid, {skipped, Reason}} ->
+ log("<WARNING> Test case ~w skipped, because ~p~n",
+ [{Mod, Fun}, Reason]),
+ Mod:fin_per_testcase(Fun, Config),
+ {skip, {Mod, Fun}, Reason};
+ {'EXIT', Pid, Reason} ->
+ log("<>ERROR<> Eval process ~w exited, because ~p~n",
+ [{Mod, Fun}, Reason]),
+ Mod:fin_per_testcase(Fun, Config),
+ {crash, {Mod, Fun}, Reason}
+ end.
+
+test_case_evaluator(Mod, Fun, [Config]) ->
+ NewConfig = Mod:init_per_testcase(Fun, Config),
+ R = apply(Mod, Fun, [NewConfig]),
+ Mod:fin_per_testcase(Fun, NewConfig),
+ exit({test_case_ok, R}).
+
+activity_evaluator(Coordinator) ->
+ activity_evaluator_loop(Coordinator),
+ exit(normal).
+
+activity_evaluator_loop(Coordinator) ->
+ receive
+ begin_trans ->
+ transaction(Coordinator, 0);
+ {begin_trans, MaxRetries} ->
+ transaction(Coordinator, MaxRetries);
+ end_trans ->
+ end_trans;
+ Fun when is_function(Fun) ->
+ Coordinator ! {self(), Fun()},
+ activity_evaluator_loop(Coordinator);
+% {'EXIT', Coordinator, Reason} ->
+% Reason;
+ ExitExpr ->
+% ?error("activity_evaluator_loop ~p ~p: exit(~p)~n}", [Coordinator, self(), ExitExpr]),
+ exit(ExitExpr)
+ end.
+
+transaction(Coordinator, MaxRetries) ->
+ Fun = fun() ->
+ Coordinator ! {self(), begin_trans},
+ activity_evaluator_loop(Coordinator)
+ end,
+ Coordinator ! {self(), mnesia:transaction(Fun, MaxRetries)},
+ activity_evaluator_loop(Coordinator).
+
+pick_msg() ->
+ receive
+ Message -> Message
+ after 4000 -> timeout
+ end.
+
+start_activities(Nodes) ->
+ Fun = fun(N) -> spawn_link(N, ?MODULE, activity_evaluator, [self()]) end,
+ Pids = mapl(Fun, Nodes),
+ {success, Pids}.
+
+mapl(Fun, [H|T]) ->
+ Res = Fun(H),
+ [Res|mapl(Fun, T)];
+mapl(_Fun, []) ->
+ [].
+
+diskless(Config) ->
+ case lists:keysearch(diskless, 1, Config) of
+ {value, {diskless, true}} ->
+ true;
+ _Else ->
+ false
+ end.
+
+
+start_transactions(Pids) ->
+ Fun = fun(Pid) ->
+ Pid ! begin_trans,
+ ?match_receive({Pid, begin_trans})
+ end,
+ mapl(Fun, Pids).
+
+start_sync_transactions(Pids) ->
+ Nodes = [node(Pid) || Pid <- Pids],
+ Fun = fun(Pid) ->
+ sync_trans_tid_serial(Nodes),
+ Pid ! begin_trans,
+ ?match_receive({Pid, begin_trans})
+ end,
+ mapl(Fun, Pids).
+
+
+start_transactions(Pids, MaxRetries) ->
+ Fun = fun(Pid) ->
+ Pid ! {begin_trans, MaxRetries},
+ ?match_receive({Pid, begin_trans})
+ end,
+ mapl(Fun, Pids).
+
+start_sync_transactions(Pids, MaxRetries) ->
+ Nodes = [node(Pid) || Pid <- Pids],
+ Fun = fun(Pid) ->
+ sync_trans_tid_serial(Nodes),
+ Pid ! {begin_trans, MaxRetries},
+ ?match_receive({Pid, begin_trans})
+ end,
+ mapl(Fun, Pids).
+
+sync_trans_tid_serial(Nodes) ->
+ Fun = fun() -> mnesia:write_lock_table(schema) end,
+ rpc:multicall(Nodes, mnesia, transaction, [Fun]).
+
+select_nodes(N, Config, File, Line) ->
+ prepare_test_case([], N, Config, File, Line).
+
+prepare_test_case(Actions, N, Config, File, Line) ->
+ NodeList1 = lookup_config(nodes, Config),
+ NodeList2 = lookup_config(nodenames, Config), %% For testserver
+ NodeList3 = append_unique(NodeList1, NodeList2),
+ This = node(),
+ All = [This | lists:delete(This, NodeList3)],
+ Selected = pick_nodes(N, All, File, Line),
+ case diskless(Config) of
+ true ->
+ ok;
+ false ->
+ rpc:multicall(Selected, application, set_env,[mnesia, schema_location, opt_disc])
+ end,
+ do_prepare(Actions, Selected, All, Config, File, Line).
+
+do_prepare([], Selected, _All, _Config, _File, _Line) ->
+ Selected;
+do_prepare([{init_test_case, Appls} | Actions], Selected, All, Config, File, Line) ->
+ set_kill_timer(Config),
+ Started = init_nodes(Selected, File, Line),
+ All2 = append_unique(Started, All),
+ Alive = mnesia_lib:intersect(nodes() ++ [node()], All2),
+ kill_appls(Appls, Alive),
+ process_flag(trap_exit, true),
+ do_prepare(Actions, Started, All2, Config, File, Line);
+do_prepare([delete_schema | Actions], Selected, All, Config, File, Line) ->
+ Alive = mnesia_lib:intersect(nodes() ++ [node()], All),
+ case diskless(Config) of
+ true ->
+ skip;
+ false ->
+ Del = fun(Node) ->
+ case mnesia:delete_schema([Node]) of
+ ok -> ok;
+ {error, {"All nodes not running",_}} ->
+ ok;
+ Else ->
+ ?log("Delete schema error ~p ~n", [Else])
+ end
+ end,
+ lists:foreach(Del, Alive)
+ end,
+ do_prepare(Actions, Selected, All, Config, File, Line);
+do_prepare([create_schema | Actions], Selected, All, Config, File, Line) ->
+ case diskless(Config) of
+ true ->
+ skip;
+ _Else ->
+ case mnesia:create_schema(Selected) of
+ ok ->
+ ignore;
+ BadNodes ->
+ ?fatal("Cannot create Mnesia schema on ~p~n", [BadNodes])
+ end
+ end,
+ do_prepare(Actions, Selected, All, Config, File, Line);
+do_prepare([{start_appls, Appls} | Actions], Selected, All, Config, File, Line) ->
+ case start_appls(Appls, Selected, Config) of
+ [] -> ok;
+ Bad -> ?fatal("Cannot start appls ~p: ~p~n", [Appls, Bad])
+ end,
+ do_prepare(Actions, Selected, All, Config, File, Line);
+do_prepare([{reload_appls, Appls} | Actions], Selected, All, Config, File, Line) ->
+ reload_appls(Appls, Selected),
+ do_prepare(Actions, Selected, All, Config, File, Line).
+
+set_kill_timer(Config) ->
+ case init:get_argument(mnesia_test_timeout) of
+ {ok, _ } -> ok;
+ _ ->
+ Time0 =
+ case lookup_config(tc_timeout, Config) of
+ [] -> timer:minutes(5);
+ ConfigTime when is_integer(ConfigTime) -> ConfigTime
+ end,
+ Mul = try
+ test_server:timetrap_scale_factor()
+ catch _:_ -> 1 end,
+ (catch test_server:timetrap(Mul*Time0 + 1000)),
+ spawn_link(?MODULE, kill_tc, [self(),Time0*Mul])
+ end.
+
+kill_tc(Pid, Time) ->
+ receive
+ after Time ->
+ case process_info(Pid) of
+ undefined -> ok;
+ _ ->
+ ?error("Watchdog in test case timed out "
+ "in ~p min~n", [Time div (1000*60)]),
+ Files = mnesia_lib:dist_coredump(),
+ ?log("Cores dumped to:~n ~p~n", [Files]),
+ %% Genarate erlang crashdumps.
+ %% GenDump = fun(Node) ->
+ %% File = "CRASH_" ++ atom_to_list(Node) ++ ".dump",
+ %% rpc:call(Node, os, putenv, ["ERL_CRASH_DUMP", File]),
+ %% rpc:cast(Node, erlang, halt, ["RemoteTimeTrap"])
+ %% end,
+ %% [GenDump(Node) || Node <- nodes()],
+
+ %% erlang:halt("DebugTimeTrap"),
+ exit(Pid, kill)
+ end
+ end.
+
+
+append_unique([], List) -> List;
+append_unique([H|R], List) ->
+ case lists:member(H, List) of
+ true -> append_unique(R, List);
+ false -> [H | append_unique(R, List)]
+ end.
+
+pick_nodes(all, Nodes, File, Line) ->
+ pick_nodes(length(Nodes), Nodes, File, Line);
+pick_nodes(N, [H | T], File, Line) when N > 0 ->
+ [H | pick_nodes(N - 1, T, File, Line)];
+pick_nodes(0, _Nodes, _File, _Line) ->
+ [];
+pick_nodes(N, [], File, Line) ->
+ ?skip("Test case (~p(~p)) ignored: ~p nodes missing~n",
+ [File, Line, N]).
+
+init_nodes([Node | Nodes], File, Line) ->
+ case net_adm:ping(Node) of
+ pong ->
+ [Node | init_nodes(Nodes, File, Line)];
+ pang ->
+ [Name, Host] = node_to_name_and_host(Node),
+ case slave_start_link(Host, Name) of
+ {ok, Node1} ->
+ Path = code:get_path(),
+ true = rpc:call(Node1, code, set_path, [Path]),
+ [Node1 | init_nodes(Nodes, File, Line)];
+ Other ->
+ ?skip("Test case (~p(~p)) ignored: cannot start node ~p: ~p~n",
+ [File, Line, Node, Other])
+ end
+ end;
+init_nodes([], _File, _Line) ->
+ [].
+
+%% Returns [Name, Host]
+node_to_name_and_host(Node) ->
+ string:tokens(atom_to_list(Node), [$@]).
+
+lookup_config(Key,Config) ->
+ case lists:keysearch(Key,1,Config) of
+ {value,{Key,Val}} ->
+ Val;
+ _ ->
+ []
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_appls(Appls, Nodes) ->
+ start_appls(Appls, Nodes, [], [schema]).
+
+start_appls(Appls, Nodes, Config) ->
+ start_appls(Appls, Nodes, Config, [schema]).
+
+start_appls([Appl | Appls], Nodes, Config, Tabs) ->
+ {Started, BadStarters} =
+ rpc:multicall(Nodes, ?MODULE, remote_start, [Appl, Config, Nodes]),
+ BadS = [{Node, Appl, Res} || {Node, Res} <- Started, Res /= ok],
+ BadN = [{BadNode, Appl, bad_start} || BadNode <- BadStarters],
+ Bad = BadS ++ BadN,
+ case Appl of
+ mnesia when Bad == [] ->
+ sync_tables(Nodes, Tabs);
+ _ ->
+ ignore
+ end,
+ Bad ++ start_appls(Appls, Nodes, Config, Tabs);
+start_appls([], _Nodes, _Config, _Tabs) ->
+ [].
+
+remote_start(mnesia, Config, Nodes) ->
+ case diskless(Config) of
+ true ->
+ application_controller:set_env(mnesia,
+ extra_db_nodes,
+ Nodes -- [node()]),
+ application_controller:set_env(mnesia,
+ schema_location,
+ ram);
+ false ->
+ application_controller:set_env(mnesia,
+ schema_location,
+ opt_disc),
+ ignore
+ end,
+ {node(), mnesia:start()};
+remote_start(Appl, _Config, _Nodes) ->
+ Res =
+ case application:start(Appl) of
+ {error, {already_started, Appl}} ->
+ ok;
+ Other ->
+ Other
+ end,
+ {node(), Res}.
+
+%% Start Mnesia on all given nodes and wait for specified
+%% tables to be accessible on each node. The atom all means
+%% that we should wait for all tables to be loaded
+%%
+%% Returns a list of error tuples {BadNode, mnesia, Reason}
+start_mnesia(Nodes) ->
+ start_appls([mnesia], Nodes).
+start_mnesia(Nodes, Tabs) when is_list(Nodes) ->
+ start_appls([mnesia], Nodes, [], Tabs).
+
+%% Wait for the tables to be accessible from all nodes in the list
+%% and that all nodes are aware of that the other nodes also ...
+sync_tables(Nodes, Tabs) ->
+ Res = send_wait(Nodes, Tabs, []),
+ if
+ Res == [] ->
+ mnesia:transaction(fun() -> mnesia:write_lock_table(schema) end),
+ Res;
+ true ->
+ Res
+ end.
+
+send_wait([Node | Nodes], Tabs, Pids) ->
+ Pid = spawn_link(Node, ?MODULE, start_wait, [self(), Tabs]),
+ send_wait(Nodes, Tabs, [Pid | Pids]);
+send_wait([], _Tabs, Pids) ->
+ rec_wait(Pids, []).
+
+rec_wait([Pid | Pids], BadRes) ->
+ receive
+ {'EXIT', Pid, R} ->
+ rec_wait(Pids, [{node(Pid), bad_wait, R} | BadRes]);
+ {Pid, ok} ->
+ rec_wait(Pids, BadRes);
+ {Pid, {error, R}} ->
+ rec_wait(Pids, [{node(Pid), bad_wait, R} | BadRes])
+ end;
+rec_wait([], BadRes) ->
+ BadRes.
+
+start_wait(Coord, Tabs) ->
+ process_flag(trap_exit, true),
+ Mon = whereis(mnesia_monitor),
+ case catch link(Mon) of
+ {'EXIT', _} ->
+ unlink(Coord),
+ Coord ! {self(), {error, {node_not_running, node()}}};
+ _ ->
+ Res = start_wait_loop(Tabs),
+ unlink(Mon),
+ unlink(Coord),
+ Coord ! {self(), Res}
+ end.
+
+start_wait_loop(Tabs) ->
+ receive
+ {'EXIT', Pid, Reason} ->
+ {error, {start_wait, Pid, Reason}}
+ after 0 ->
+ case mnesia:wait_for_tables(Tabs, timer:seconds(30)) of
+ ok ->
+ verify_nodes(Tabs);
+ {timeout, BadTabs} ->
+ log("<>WARNING<> Wait for tables ~p: ~p~n", [node(), Tabs]),
+ start_wait_loop(BadTabs);
+ {error, Reason} ->
+ {error, {start_wait, Reason}}
+ end
+ end.
+
+verify_nodes(Tabs) ->
+ verify_nodes(Tabs, 0).
+
+verify_nodes([], _) ->
+ ok;
+
+verify_nodes([Tab| Tabs], N) ->
+ ?match(X when is_atom(X), mnesia_lib:val({Tab, where_to_read})),
+ Nodes = mnesia:table_info(Tab, where_to_write),
+ Copies =
+ mnesia:table_info(Tab, disc_copies) ++
+ mnesia:table_info(Tab, disc_only_copies) ++
+ mnesia:table_info(Tab, ram_copies),
+ Local = mnesia:table_info(Tab, local_content),
+ case Copies -- Nodes of
+ [] ->
+ verify_nodes(Tabs, 0);
+ _Else when Local == true, Nodes /= [] ->
+ verify_nodes(Tabs, 0);
+ Else ->
+ N2 =
+ if
+ N > 20 ->
+ log("<>WARNING<> ~w Waiting for table: ~p on ~p ~n",
+ [node(), Tab, Else]),
+ 0;
+ true -> N+1
+ end,
+ timer:sleep(500),
+ verify_nodes([Tab| Tabs], N2)
+ end.
+
+
+%% Nicely stop Mnesia on all given nodes
+%%
+%% Returns a list of error tuples {BadNode, Reason}
+stop_mnesia(Nodes) when is_list(Nodes) ->
+ stop_appls([mnesia], Nodes).
+
+stop_appls([Appl | Appls], Nodes) when is_list(Nodes) ->
+ {Stopped, BadNodes} = rpc:multicall(Nodes, ?MODULE, remote_stop, [Appl]),
+ BadS =[{Node, Appl, Res} || {Node, Res} <- Stopped, Res /= stopped],
+ BadN =[{BadNode, Appl, bad_node} || BadNode <- BadNodes],
+ BadS ++ BadN ++ stop_appls(Appls, Nodes);
+stop_appls([], _Nodes) ->
+ [].
+
+remote_stop(mnesia) ->
+ {node(), mnesia:stop()};
+remote_stop(Appl) ->
+ {node(), application:stop(Appl)}.
+
+remote_kill([Appl | Appls]) ->
+ catch Appl:lkill(),
+ application:stop(Appl),
+ remote_kill(Appls);
+remote_kill([]) ->
+ ok.
+
+%% Abruptly kill Mnesia on all given nodes
+%% Returns []
+kill_appls(Appls, Nodes) when is_list(Nodes) ->
+ verbose("<>WARNING<> Intentionally killing ~p: ~w...~n",
+ [Appls, Nodes], ?FILE, ?LINE),
+ rpc:multicall(Nodes, ?MODULE, remote_kill, [Appls]),
+ [].
+
+kill_mnesia(Nodes) when is_list(Nodes) ->
+ kill_appls([mnesia], Nodes).
+
+reload_appls([Appl | Appls], Selected) ->
+ kill_appls([Appl], Selected),
+ timer:sleep(1000),
+ Ok = {[ok || _N <- Selected], []},
+ {Ok2temp, Empty} = rpc:multicall(Selected, application, unload, [Appl]),
+ Conv = fun({error,{not_loaded,mnesia}}) -> ok; (Else) -> Else end,
+ Ok2 = {lists:map(Conv, Ok2temp), Empty},
+
+ Ok3 = rpc:multicall(Selected, application, load, [Appl]),
+ if
+ Ok /= Ok2 ->
+ ?fatal("Cannot unload appl ~p: ~p~n", [Appl, Ok2]);
+ Ok /= Ok3 ->
+ ?fatal("Cannot load appl ~p: ~p~n", [Appl, Ok3]);
+ true ->
+ ok
+ end,
+ reload_appls(Appls, Selected);
+reload_appls([], _Selected) ->
+ ok.
+
+shutdown() ->
+ log("<>WARNING<> Intentionally shutting down all nodes... ~p~n",
+ [nodes() ++ [node()]]),
+ rpc:multicall(nodes(), erlang, halt, []),
+ erlang:halt().
+
+verify_mnesia(Ups, Downs, File, Line) when is_list(Ups), is_list(Downs) ->
+ BadUps =
+ [N || N <- Ups, rpc:call(N, mnesia, system_info, [is_running]) /= yes],
+ BadDowns =
+ [N || N <- Downs, rpc:call(N, mnesia, system_info, [is_running]) == yes],
+ if
+ BadUps == [] ->
+ ignore;
+ true ->
+ error("Mnesia is not running as expected: ~p~n",
+ [BadUps], File, Line)
+ end,
+ if
+ BadDowns == [] ->
+ ignore;
+ true ->
+ error("Mnesia is not stopped as expected: ~p~n",
+ [BadDowns], File, Line)
+ end,
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+verify_replica_location(Tab, [], [], [], _) ->
+ ?match({'EXIT', _}, mnesia:table_info(Tab, ram_copies)),
+ ?match({'EXIT', _}, mnesia:table_info(Tab, disc_copies)),
+ ?match({'EXIT', _}, mnesia:table_info(Tab, disc_only_copies)),
+ ?match({'EXIT', _}, mnesia:table_info(Tab, where_to_write)),
+ ?match({'EXIT', _}, mnesia:table_info(Tab, where_to_read)),
+ [];
+
+verify_replica_location(Tab, DiscOnly0, Ram0, Disc0, AliveNodes0) ->
+%% sync_tables(AliveNodes0, [Tab]),
+ AliveNodes = lists:sort(AliveNodes0),
+ DiscOnly = lists:sort(DiscOnly0),
+ Ram = lists:sort(Ram0),
+ Disc = lists:sort(Disc0),
+ Write = ignore_dead(DiscOnly ++ Ram ++ Disc, AliveNodes),
+ Read = ignore_dead(DiscOnly ++ Ram ++ Disc, AliveNodes),
+ This = node(),
+
+ timer:sleep(100),
+
+ S1 = ?match(AliveNodes, lists:sort(mnesia:system_info(running_db_nodes))),
+ S2 = ?match(DiscOnly, lists:sort(mnesia:table_info(Tab, disc_only_copies))),
+ S3 = ?match(Ram, lists:sort(mnesia:table_info(Tab, ram_copies))),
+ S4 = ?match(Disc, lists:sort(mnesia:table_info(Tab, disc_copies))),
+ S5 = ?match(Write, lists:sort(mnesia:table_info(Tab, where_to_write))),
+ S6 = case lists:member(This, Read) of
+ true ->
+ ?match(This, mnesia:table_info(Tab, where_to_read));
+ false ->
+ ?match(true, lists:member(mnesia:table_info(Tab, where_to_read), Read))
+ end,
+ lists:filter(fun({success,_}) -> false; (_) -> true end, [S1,S2,S3,S4,S5,S6]).
+
+ignore_dead(Nodes, AliveNodes) ->
+ Filter = fun(Node) -> lists:member(Node, AliveNodes) end,
+ lists:sort(lists:zf(Filter, Nodes)).
+
+
+remote_activate_debug_fun(N, I, F, C, File, Line) ->
+ Pid = spawn_link(N, ?MODULE, do_remote_activate_debug_fun, [self(), I, F, C, File, Line]),
+ receive
+ {activated, Pid} -> ok;
+ {'EXIT', Pid, Reason} -> {error, Reason}
+ end.
+
+do_remote_activate_debug_fun(From, I, F, C, File, Line) ->
+ mnesia_lib:activate_debug_fun(I, F, C, File, Line),
+ From ! {activated, self()},
+ timer:sleep(infinity). % Dies whenever the test process dies !!
+
+
+sort(L) when is_list(L) ->
+ lists:sort(L);
+sort({atomic, L}) when is_list(L) ->
+ {atomic, lists:sort(L)};
+sort({ok, L}) when is_list(L) ->
+ {ok, lists:sort(L)};
+sort(W) ->
+ W.
diff --git a/lib/mnesia/test/mnesia_test_lib.hrl b/lib/mnesia/test/mnesia_test_lib.hrl
new file mode 100644
index 0000000000..85f12200d4
--- /dev/null
+++ b/lib/mnesia/test/mnesia_test_lib.hrl
@@ -0,0 +1,132 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-define(log(Format,Args),mnesia_test_lib:log(Format,Args,?FILE,?LINE)).
+-define(warning(Format,Args),?log("<>WARNING<>~n " ++ Format,Args)).
+-define(error(Format,Args),
+ mnesia_test_lib:error(Format,Args,?FILE,?LINE)).
+-define(verbose(Format,Args),mnesia_test_lib:verbose(Format,Args,?FILE,?LINE)).
+
+-define(fatal(Format,Args),
+ ?error(Format, Args),
+ exit({test_case_fatal, Format, Args, ?FILE, ?LINE})).
+
+-define(skip(Format,Args),
+ ?warning(Format, Args),
+ exit({skipped, ?flat_format(Format, Args)})).
+
+-define(flat_format(Format,Args),
+ lists:flatten(io_lib:format(Format, Args))).
+
+-define(sort(What), mnesia_test_lib:sort(What)).
+
+-define(ignore(Expr),
+ fun() ->
+ AcTuAlReS = (catch (Expr)),
+ ?verbose("ok, ~n Result as expected:~p~n",[AcTuAlReS]),
+ AcTuAlReS
+ end()).
+
+-define(match(ExpectedRes,Expr),
+ fun() ->
+ AcTuAlReS = (catch (Expr)),
+ case AcTuAlReS of
+ ExpectedRes ->
+ ?verbose("ok, ~n Result as expected:~p~n",[AcTuAlReS]),
+ {success,AcTuAlReS};
+ _ ->
+ ?error("Not Matching Actual result was:~n ~p~n",
+ [AcTuAlReS]),
+ {fail,AcTuAlReS}
+ end
+ end()).
+
+-define(match_inverse(NotExpectedRes,Expr),
+ fun() ->
+ AcTuAlReS = (catch (Expr)),
+ case AcTuAlReS of
+ NotExpectedRes ->
+ ?error("Not matching Actual result was:~n ~p~n",
+ [AcTuAlReS]),
+ {fail,AcTuAlReS};
+ _ ->
+ ?verbose("ok, ~n Result as expected: ~p~n",[AcTuAlReS]),
+ {success,AcTuAlReS}
+ end
+ end()).
+
+-define(match_receive(ExpectedMsg),
+ ?match(ExpectedMsg,mnesia_test_lib:pick_msg())).
+
+%% ExpectedMsgs must be completely bound
+-define(match_multi_receive(ExpectedMsgs),
+ fun() ->
+ TmPeXpCtEdMsGs = lists:sort(ExpectedMsgs),
+ ?match(TmPeXpCtEdMsGs,
+ lists:sort(lists:map(fun(_) ->
+ mnesia_test_lib:pick_msg()
+ end,
+ TmPeXpCtEdMsGs)))
+ end()).
+
+-define(start_activities(Nodes),
+ mnesia_test_lib:start_activities(Nodes)).
+
+-define(start_transactions(Pids),
+ mnesia_test_lib:start_transactions(Pids)).
+
+-define(acquire_nodes(N, Config),
+ mnesia_test_lib:prepare_test_case([{init_test_case, [mnesia]},
+ delete_schema,
+ create_schema,
+ {start_appls, [mnesia]}],
+ N, Config, ?FILE, ?LINE)).
+
+-define(activate_debug_fun(I, F, C),
+ mnesia_lib:activate_debug_fun(I, F, C, ?FILE, ?LINE)).
+
+-define(remote_activate_debug_fun(N, I, F, C),
+ ?match(ok, mnesia_test_lib:remote_activate_debug_fun(N, I, F, C,
+ ?FILE, ?LINE))).
+
+-define(deactivate_debug_fun(I),
+ mnesia_lib:deactivate_debug_fun(I, ?FILE, ?LINE)).
+
+-define(remote_deactivate_debug_fun(N, I),
+ rpc:call(N, mnesia_lib, deactivate_debug_fun, [I, ?FILE, ?LINE])).
+
+-define(is_debug_compiled,
+ case mnesia_lib:is_debug_compiled() of
+ false ->
+ ?skip("Mnesia is not debug compiled, test case ignored.~n", []);
+ _OhTeR ->
+ ok
+ end).
+
+-define(needs_disc(Config),
+ case mnesia_test_lib:diskless(Config) of
+ false ->
+ ok;
+ true ->
+ ?skip("Must have disc, test case ignored.~n", [])
+ end).
+
+-define(verify_mnesia(Ups, Downs),
+ mnesia_test_lib:verify_mnesia(Ups, Downs, ?FILE, ?LINE)).
diff --git a/lib/mnesia/test/mnesia_tpcb.erl b/lib/mnesia/test/mnesia_tpcb.erl
new file mode 100644
index 0000000000..903c53a21c
--- /dev/null
+++ b/lib/mnesia/test/mnesia_tpcb.erl
@@ -0,0 +1,1268 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% MODULE
+%%
+%% mnesia_tpcb - TPC-B benchmarking of Mnesia
+%%
+%% DESCRIPTION
+%%
+%% The metrics used in the TPC-B benchmark are throughput as measured
+%% in transactions per second (TPS). The benchmark uses a single,
+%% simple update-intensive transaction to load the database system.
+%% The single transaction type provides a simple, repeatable
+%% unit of work, and is designed to exercise the basic components of
+%% a database system.
+%%
+%% The definition of the TPC-B states lots of detailed rules and
+%% conditions that must be fullfilled, e.g. how the ACID (atomicity,
+%% consistency, isolation and durability) properties are verified,
+%% how the random numbers must be distributed, minimum sizes of
+%% the different types of records, minimum duration of the benchmark,
+%% formulas to calculate prices (dollars per tps), disclosure issues
+%% etc. Please, see http://www.tpc.org/ about the nitty gritty details.
+%%
+%% The TPC-B benchmark is stated in terms of a hypothetical bank. The
+%% bank has one or more branches. Each branch has multiple tellers. The
+%% bank has many customers, each with an account. The database represents
+%% the cash position of each entity (branch, teller and account) and a
+%% history of recent transactions run by the bank. The transaction
+%% represents the work done when a customer makes a deposit or a
+%% withdrawal against his account. The transaction is performed by a
+%% teller at some branch.
+%%
+%% Each process that performs TPC-B transactions is called a driver.
+%% Drivers generates teller_id, account_id and delta amount of
+%% money randomly. An account, a teller and a branch are read, their
+%% balances are adjusted and a history record is created. The driver
+%% measures the time for 3 reads, 3 writes and 1 create.
+%%
+%% GETTING STARTED
+%%
+%% Generate tables and run with default configuration:
+%%
+%% mnesia_tpcb:start().
+%%
+%% A little bit more advanced;
+%%
+%% spawn(mnesia_tpcb, start, [[[{n_drivers_per_node, 8}, {stop_after, infinity}]]),
+%% mnesia_tpcb:stop().
+%%
+%% Really advanced;
+%%
+%% mnesia_tpcb:init(([{n_branches, 8}, {replica_type, disc_only_copies}]),
+%% mnesia_tpcb:run(([{n_drivers_per_node, 8}]),
+%% mnesia_tpcb:run(([{n_drivers_per_node, 64}]).
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-module(mnesia_tpcb).
+-author('[email protected]').
+
+-export([
+ config/2,
+ count_balance/0,
+ driver_init/2,
+ init/1,
+ reporter_init/2,
+ run/1,
+ start/0,
+ start/1,
+ start/2,
+ stop/0,
+ real_trans/5,
+ verify_tabs/0,
+ reply_gen_branch/3,
+ frag_add_delta/7,
+
+ conflict_test/1,
+ dist_test/1,
+ replica_test/1,
+ sticky_replica_test/1,
+ remote_test/1,
+ remote_frag2_test/1
+ ]).
+
+-define(SECOND, 1000000).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Account record, total size must be at least 100 bytes
+
+-define(ACCOUNT_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234}).
+
+-record(account,
+ {
+ id = 0, % Unique account id
+ branch_id = 0, % Branch where the account is held
+ balance = 0, % Account balance
+ filler = ?ACCOUNT_FILLER % Gap filler to ensure size >= 100 bytes
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Branch record, total size must be at least 100 bytes
+
+-define(BRANCH_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890}).
+
+-record(branch,
+ {
+ id = 0, % Unique branch id
+ balance = 0, % Total balance of whole branch
+ filler = ?BRANCH_FILLER % Gap filler to ensure size >= 100 bytes
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Teller record, total size must be at least 100 bytes
+
+-define(TELLER_FILLER,
+ {123456789012345678901234567890123456789012345678901234567890,
+ 123456789012345678901234567890123456789012345678901234567890,
+ 1234567890123456789012345678901234567890123456789012345678}).
+
+-record(teller,
+ {
+ id = 0, % Unique teller id
+ branch_id = 0, % Branch where the teller is located
+ balance = 0, % Teller balance
+ filler = ?TELLER_FILLER % Gap filler to ensure size >= 100 bytes
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% History record, total size must be at least 50 bytes
+
+-define(HISTORY_FILLER, 1234567890).
+
+-record(history,
+ {
+ history_id = {0, 0}, % {DriverId, DriverLocalHistoryid}
+ time_stamp = now(), % Time point during active transaction
+ branch_id = 0, % Branch associated with teller
+ teller_id = 0, % Teller invlolved in transaction
+ account_id = 0, % Account updated by transaction
+ amount = 0, % Amount (delta) specified by transaction
+ filler = ?HISTORY_FILLER % Gap filler to ensure size >= 50 bytes
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-record(tab_config,
+ {
+ db_nodes = [node()],
+ n_replicas = 1, % Ignored for non-fragmented tables
+ replica_nodes = [node()],
+ replica_type = ram_copies,
+ use_running_mnesia = false,
+ n_fragments = 0,
+ n_branches = 1,
+ n_tellers_per_branch = 10, % Must be 10
+ n_accounts_per_branch = 100000, % Must be 100000
+ branch_filler = ?BRANCH_FILLER,
+ account_filler = ?ACCOUNT_FILLER,
+ teller_filler = ?TELLER_FILLER
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(run_config,
+ {
+ driver_nodes = [node()],
+ n_drivers_per_node = 1,
+ use_running_mnesia = false,
+ stop_after = timer:minutes(15), % Minimum 15 min
+ report_interval = timer:minutes(1),
+ use_sticky_locks = false,
+ spawn_near_branch = false,
+ activity_type = transaction,
+ reuse_history_id = false
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(time,
+ {
+ n_trans = 0,
+ min_n = 0,
+ max_n = 0,
+ acc_time = 0,
+ max_time = 0
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(driver_state,
+ {
+ driver_id,
+ driver_node,
+ seed,
+ n_local_branches,
+ local_branches,
+ tab_config,
+ run_config,
+ history_id,
+ time = #time{},
+ acc_time = #time{},
+ reuse_history_id
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(reporter_state,
+ {
+ driver_pids,
+ starter_pid,
+ n_iters = 0,
+ prev_tps = 0,
+ curr = #time{},
+ acc = #time{},
+ init_micros,
+ prev_micros,
+ run_config
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on each node, table not replicated
+
+config(frag_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {n_branches, length(Nodes)},
+ {n_fragments, length(Nodes)},
+ {replica_nodes, Nodes},
+ {db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on each node, table replicated to two nodes.
+
+config(frag2_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {n_branches, length(Nodes)},
+ {n_fragments, length(Nodes)},
+ {n_replicas, 2},
+ {replica_nodes, Nodes},
+ {db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on this node, table replicated to all nodes.
+
+config(replica_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {db_nodes, Nodes},
+ {driver_nodes, [Local]},
+ {replica_nodes, Nodes},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on this node, table replicated to all nodes.
+
+config(sticky_replica_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {db_nodes, Nodes},
+ {driver_nodes, [node()]},
+ {replica_nodes, Nodes},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {use_sticky_locks, true},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Ten drivers per node, tables replicated to all nodes, lots of branches
+
+config(dist_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 10},
+ {n_branches, 10 * length(Nodes) * 100},
+ {n_accounts_per_branch, 10},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Ten drivers per node, tables replicated to all nodes, single branch
+
+config(conflict_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {db_nodes, Nodes},
+ {driver_nodes, Nodes},
+ {replica_nodes, Nodes},
+ {n_drivers_per_node, 10},
+ {n_branches, 1},
+ {n_accounts_per_branch, 10},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on this node, table replicated to all other nodes.
+
+config(remote_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {db_nodes, Nodes},
+ {driver_nodes, [Local]},
+ {replica_nodes, Remote},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ];
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% One driver on this node, table replicated to two other nodes.
+
+config(remote_frag2_test, ReplicaType) ->
+ Remote = nodes(),
+ Local = node(),
+ Nodes = [Local | Remote],
+ [
+ {n_branches, length(Remote)},
+ {n_fragments, length(Remote)},
+ {n_replicas, 2},
+ {replica_nodes, Remote},
+ {db_nodes, Nodes},
+ {driver_nodes, [Local]},
+ {n_accounts_per_branch, 100},
+ {replica_type, ReplicaType},
+ {stop_after, timer:minutes(1)},
+ {report_interval, timer:seconds(10)},
+ {reuse_history_id, true}
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start(What, ReplicaType) ->
+ spawn_link(?MODULE, start, [config(What, ReplicaType)]).
+
+replica_test(ReplicaType) ->
+ start(replica_test, ReplicaType).
+
+sticky_replica_test(ReplicaType) ->
+ start(sticky_replica_test, ReplicaType).
+
+dist_test(ReplicaType) ->
+ start(dist_test, ReplicaType).
+
+conflict_test(ReplicaType) ->
+ start(conflict_test, ReplicaType).
+
+remote_test(ReplicaType) ->
+ start(remote_test, ReplicaType).
+
+remote_frag2_test(ReplicaType) ->
+ start(remote_frag2_test, ReplicaType).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Args is a list of {Key, Val} tuples where Key is a field name
+%% in either the record tab_config or run_config. Unknown keys are ignored.
+
+start() ->
+ start([]).
+start(Args) ->
+ init(Args),
+ run(Args).
+
+list2rec(List, Fields, DefaultTuple) ->
+ [Name|Defaults] = tuple_to_list(DefaultTuple),
+ List2 = list2rec(List, Fields, Defaults, []),
+ list_to_tuple([Name] ++ List2).
+
+list2rec(_List, [], [], Acc) ->
+ Acc;
+list2rec(List, [F|Fields], [D|Defaults], Acc) ->
+ {Val, List2} =
+ case lists:keysearch(F, 1, List) of
+ false ->
+ {D, List};
+ {value, {F, NewVal}} ->
+ {NewVal, lists:keydelete(F, 1, List)}
+ end,
+ list2rec(List2, Fields, Defaults, Acc ++ [Val]).
+
+stop() ->
+ case whereis(mnesia_tpcb) of
+ undefined ->
+ {error, not_running};
+ Pid ->
+ sync_stop(Pid)
+ end.
+
+sync_stop(Pid) ->
+ Pid ! {self(), stop},
+ receive
+ {Pid, {stopped, Res}} -> Res
+ after timer:minutes(1) ->
+ exit(Pid, kill),
+ {error, brutal_kill}
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Initialization
+
+%% Args is a list of {Key, Val} tuples where Key is a field name
+%% in the record tab_config, unknown keys are ignored.
+
+init(Args) ->
+ TabConfig0 = list2rec(Args, record_info(fields, tab_config), #tab_config{}),
+ TabConfig =
+ if
+ TabConfig0#tab_config.n_fragments =:= 0 ->
+ TabConfig0#tab_config{n_replicas = length(TabConfig0#tab_config.replica_nodes)};
+ true ->
+ TabConfig0
+ end,
+ Tags = record_info(fields, tab_config),
+ Fun = fun(F, Pos) -> {{F, element(Pos, TabConfig)}, Pos + 1} end,
+ {List, _} = lists:mapfoldl(Fun, 2, Tags),
+ io:format("TPC-B: Table config: ~p ~n", [List]),
+
+ DbNodes = TabConfig#tab_config.db_nodes,
+ stop(),
+ if
+ TabConfig#tab_config.use_running_mnesia =:= true ->
+ ignore;
+ true ->
+ rpc:multicall(DbNodes, mnesia, lkill, []),
+ case mnesia:delete_schema(DbNodes) of
+ ok ->
+ case mnesia:create_schema(DbNodes) of
+ ok ->
+ {Replies, BadNodes} =
+ rpc:multicall(DbNodes, mnesia, start, []),
+ case [Res || Res <- Replies, Res =/= ok] of
+ [] when BadNodes =:= [] ->
+ ok;
+ BadRes ->
+ io:format("TPC-B: <ERROR> "
+ "Failed to start ~p: ~p~n",
+ [BadNodes, BadRes]),
+ exit({start_failed, BadRes, BadNodes})
+ end;
+ {error, Reason} ->
+ io:format("TPC-B: <ERROR> "
+ "Failed to create schema on disc: ~p~n",
+ [Reason]),
+ exit({create_schema_failed, Reason})
+ end;
+ {error, Reason} ->
+ io:format("TPC-B: <ERROR> "
+ "Failed to delete schema on disc: ~p~n",
+ [Reason]),
+ exit({delete_schema_failed, Reason})
+ end
+ end,
+ gen_tabs(TabConfig).
+
+gen_tabs(TC) ->
+ create_tab(TC, branch, record_info(fields, branch),
+ undefined),
+ create_tab(TC, account, record_info(fields, account),
+ {branch, #account.branch_id}),
+ create_tab(TC, teller, record_info(fields, teller),
+ {branch, #teller.branch_id}),
+ create_tab(TC, history, record_info(fields, history),
+ {branch, #history.branch_id}),
+
+ NB = TC#tab_config.n_branches,
+ NT = TC#tab_config.n_tellers_per_branch,
+ NA = TC#tab_config.n_accounts_per_branch,
+ io:format("TPC-B: Generating ~p branches a ~p bytes~n",
+ [NB, size(term_to_binary(default_branch(TC)))]),
+ io:format("TPC-B: Generating ~p * ~p tellers a ~p bytes~n",
+ [NB, NT, size(term_to_binary(default_teller(TC)))]),
+ io:format("TPC-B: Generating ~p * ~p accounts a ~p bytes~n",
+ [NB, NA, size(term_to_binary(default_account(TC)))]),
+ io:format("TPC-B: Generating 0 history records a ~p bytes~n",
+ [size(term_to_binary(default_history(TC)))]),
+ gen_branches(TC),
+
+ case verify_tabs() of
+ ok ->
+ ignore;
+ {error, Reason} ->
+ io:format("TPC-B: <ERROR> Inconsistent tables: ~w~n",
+ [Reason]),
+ exit({inconsistent_tables, Reason})
+ end.
+
+create_tab(TC, Name, Attrs, _ForeignKey) when TC#tab_config.n_fragments =:= 0 ->
+ Nodes = TC#tab_config.replica_nodes,
+ Type = TC#tab_config.replica_type,
+ Def = [{Type, Nodes}, {attributes, Attrs}],
+ create_tab(Name, Def);
+create_tab(TC, Name, Attrs, ForeignKey) ->
+ NReplicas = TC#tab_config.n_replicas,
+ NodePool = TC#tab_config.replica_nodes,
+ Type = TC#tab_config.replica_type,
+ NF = TC#tab_config.n_fragments,
+ Props = [{n_fragments, NF},
+ {node_pool, NodePool},
+ {n_copies(Type), NReplicas},
+ {foreign_key, ForeignKey}],
+ Def = [{frag_properties, Props},
+ {attributes, Attrs}],
+ create_tab(Name, Def).
+
+create_tab(Name, Def) ->
+ mnesia:delete_table(Name),
+ case mnesia:create_table(Name, Def) of
+ {atomic, ok} ->
+ ok;
+ {aborted, Reason} ->
+ io:format("TPC-B: <ERROR> failed to create table ~w ~w: ~p~n",
+ [Name, Def, Reason]),
+ exit({create_table_failed, Reason})
+ end.
+
+n_copies(Type) ->
+ case Type of
+ ram_copies -> n_ram_copies;
+ disc_copies -> n_disc_copies;
+ disc_only_copies -> n_disc_only_copies
+ end.
+
+gen_branches(TC) ->
+ First = 0,
+ Last = First + TC#tab_config.n_branches - 1,
+ GenPids = gen_branches(TC, First, Last, []),
+ wait_for_gen(GenPids).
+
+wait_for_gen([]) ->
+ ok;
+wait_for_gen(Pids) ->
+ receive
+ {branch_generated, Pid} -> wait_for_gen(lists:delete(Pid, Pids));
+ Exit ->
+ exit({tpcb_failed, Exit})
+ end.
+
+gen_branches(TC, BranchId, Last, UsedNs) when BranchId =< Last ->
+ UsedNs2 = get_branch_nodes(BranchId, UsedNs),
+ Node = hd(UsedNs2),
+ Pid = spawn_link(Node, ?MODULE, reply_gen_branch,
+ [self(), TC, BranchId]),
+ [Pid | gen_branches(TC, BranchId + 1, Last, UsedNs2)];
+gen_branches(_, _, _, _) ->
+ [].
+
+reply_gen_branch(ReplyTo, TC, BranchId) ->
+ gen_branch(TC, BranchId),
+ ReplyTo ! {branch_generated, self()},
+ unlink(ReplyTo).
+
+%% Returns a new list of nodes with the best node as head
+get_branch_nodes(BranchId, UsedNs) ->
+ WriteNs = table_info({branch, BranchId}, where_to_write),
+ WeightedNs = [{n_duplicates(N, UsedNs, 0), N} || N <- WriteNs],
+ [{_, LeastUsed} | _ ] = lists:sort(WeightedNs),
+ [LeastUsed | UsedNs].
+
+n_duplicates(_N, [], Count) ->
+ Count;
+n_duplicates(N, [N | Tail], Count) ->
+ n_duplicates(N, Tail, Count + 1);
+n_duplicates(N, [_ | Tail], Count) ->
+ n_duplicates(N, Tail, Count).
+
+gen_branch(TC, BranchId) ->
+ A = default_account(TC),
+ NA = TC#tab_config.n_accounts_per_branch,
+ FirstA = BranchId * NA,
+ ArgsA = [FirstA, FirstA + NA - 1, BranchId, A],
+ ok = mnesia:activity(async_dirty, fun gen_accounts/4, ArgsA, mnesia_frag),
+
+ T = default_teller(TC),
+ NT = TC#tab_config.n_tellers_per_branch,
+ FirstT = BranchId * NT,
+ ArgsT = [FirstT, FirstT + NT - 1, BranchId, T],
+ ok = mnesia:activity(async_dirty, fun gen_tellers/4, ArgsT, mnesia_frag),
+
+ B = default_branch(TC),
+ FunB = fun() -> mnesia:write(branch, B#branch{id = BranchId}, write) end,
+ ok = mnesia:activity(sync_dirty, FunB, [], mnesia_frag).
+
+gen_tellers(Id, Last, BranchId, T) when Id =< Last ->
+ mnesia:write(teller, T#teller{id = Id, branch_id=BranchId}, write),
+ gen_tellers(Id + 1, Last, BranchId, T);
+gen_tellers(_, _, _, _) ->
+ ok.
+
+gen_accounts(Id, Last, BranchId, A) when Id =< Last ->
+ mnesia:write(account, A#account{id = Id, branch_id=BranchId}, write),
+ gen_accounts(Id + 1, Last, BranchId, A);
+gen_accounts(_, _, _, _) ->
+ ok.
+
+default_branch(TC) -> #branch{filler = TC#tab_config.branch_filler}.
+default_teller(TC) -> #teller{filler = TC#tab_config.teller_filler}.
+default_account(TC) -> #account{filler = TC#tab_config.account_filler}.
+default_history(_TC) -> #history{}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Run the benchmark
+
+%% Args is a list of {Key, Val} tuples where Key is a field name
+%% in the record run_config, unknown keys are ignored.
+run(Args) ->
+ RunConfig = list2rec(Args, record_info(fields, run_config), #run_config{}),
+ Tags = record_info(fields, run_config),
+ Fun = fun(F, Pos) -> {{F, element(Pos, RunConfig)}, Pos + 1} end,
+ {List, _} = lists:mapfoldl(Fun, 2, Tags),
+ io:format("TPC-B: Run config: ~p ~n", [List]),
+
+ Pid = spawn_link(?MODULE, reporter_init, [self(), RunConfig]),
+ receive
+ {Pid, {stopped, Res}} ->
+ Res; % Stopped by other process
+ Else ->
+ {tpcb_got, Else}
+ after RunConfig#run_config.stop_after ->
+ sync_stop(Pid)
+ end.
+
+reporter_init(Starter, RC) ->
+ register(mnesia_tpcb, self()),
+ process_flag(trap_exit, true),
+ DbNodes = mnesia:system_info(db_nodes),
+ if
+ RC#run_config.use_running_mnesia =:= true ->
+ ignore;
+ true ->
+ {Replies, BadNodes} =
+ rpc:multicall(DbNodes, mnesia, start, []),
+ case [Res || Res <- Replies, Res =/= ok] of
+ [] when BadNodes =:= [] ->
+ ok;
+ BadRes ->
+ io:format("TPC-B: <ERROR> "
+ "Failed to start ~w: ~p~n",
+ [BadNodes, BadRes]),
+ exit({start_failed, BadRes, BadNodes})
+ end,
+ verify_tabs()
+ end,
+
+ N = table_info(branch, size),
+ NT = table_info(teller, size) div N,
+ NA = table_info(account, size) div N,
+
+ {Type, NF, RepNodes} = table_storage(branch),
+ TC = #tab_config{n_fragments = NF,
+ n_branches = N,
+ n_tellers_per_branch = NT,
+ n_accounts_per_branch = NA,
+ db_nodes = DbNodes,
+ replica_nodes = RepNodes,
+ replica_type = Type
+ },
+ Drivers = start_drivers(RC, TC),
+ Now = now_to_micros(erlang:now()),
+ State = #reporter_state{driver_pids = Drivers,
+ run_config = RC,
+ starter_pid = Starter,
+ init_micros = Now,
+ prev_micros = Now
+ },
+ case catch reporter_loop(State) of
+ {'EXIT', Reason} ->
+ io:format("TPC-B: Abnormal termination: ~p~n", [Reason]),
+ if
+ RC#run_config.use_running_mnesia =:= true ->
+ ignore;
+ true ->
+ rpc:multicall(DbNodes, mnesia, lkill, [])
+ end,
+ unlink(Starter),
+ Starter ! {self(), {stopped, {error, Reason}}}, % To be sure
+ exit(shutdown);
+ {ok, Stopper, State2} ->
+ Time = State2#reporter_state.acc,
+ Res =
+ case verify_tabs() of
+ ok ->
+ {ok, Time};
+ {error, Reason} ->
+ io:format("TPC-B: <ERROR> Inconsistent tables, ~p~n",
+ [{error, Reason}]),
+ {error, Reason}
+ end,
+ if
+ RC#run_config.use_running_mnesia =:= true ->
+ ignore;
+ true ->
+ rpc:multicall(DbNodes, mnesia, stop, [])
+ end,
+ unlink(Starter),
+ Starter ! {self(), {stopped, Res}},
+ if
+ Stopper =/= Starter ->
+ Stopper ! {self(), {stopped, Res}};
+ true ->
+ ignore
+ end,
+ exit(shutdown)
+ end.
+
+table_info(Tab, Item) ->
+ Fun = fun() -> mnesia:table_info(Tab, Item) end,
+ mnesia:activity(sync_dirty, Fun, mnesia_frag).
+
+%% Returns {Storage, NFragments, ReplicaNodes}
+table_storage(Tab) ->
+ case mnesia:table_info(branch, frag_properties) of
+ [] ->
+ NFO = 0,
+ NR = length(mnesia:table_info(Tab, ram_copies)),
+ ND = length(mnesia:table_info(Tab, disc_copies)),
+ NDO = length(mnesia:table_info(Tab, disc_only_copies)),
+ if
+ NR =/= 0 -> {ram_copies, NFO, NR};
+ ND =/= 0 -> {disc_copies, NFO, ND};
+ NDO =/= 0 -> {disc_copies, NFO, NDO}
+ end;
+ Props ->
+ {value, NFO} = lists:keysearch(n_fragments, 1, Props),
+ NR = table_info(Tab, n_ram_copies),
+ ND = table_info(Tab, n_disc_copies),
+ NDO = table_info(Tab, n_disc_only_copies),
+ if
+ NR =/= 0 -> {ram_copies, NFO, NR};
+ ND =/= 0 -> {disc_copies, NFO, ND};
+ NDO =/= 0 -> {disc_copies, NFO, NDO}
+ end
+ end.
+
+reporter_loop(State) ->
+ RC = State#reporter_state.run_config,
+ receive
+ {From, stop} ->
+ {ok, From, call_drivers(State, stop)};
+ {'EXIT', Pid, Reason} when Pid =:= State#reporter_state.starter_pid ->
+ %% call_drivers(State, stop),
+ exit({starter_died, Pid, Reason})
+ after RC#run_config.report_interval ->
+ Iters = State#reporter_state.n_iters,
+ State2 = State#reporter_state{n_iters = Iters + 1},
+ case call_drivers(State2, report) of
+ State3 when State3#reporter_state.driver_pids =/= [] ->
+ State4 = State3#reporter_state{curr = #time{}},
+ reporter_loop(State4);
+ _ ->
+ exit(drivers_died)
+ end
+ end.
+
+call_drivers(State, Msg) ->
+ Drivers = State#reporter_state.driver_pids,
+ lists:foreach(fun(Pid) -> Pid ! {self(), Msg} end, Drivers),
+ State2 = show_report(calc_reports(Drivers, State)),
+ case Msg =:= stop of
+ true ->
+ Acc = State2#reporter_state.acc,
+ Init = State2#reporter_state.init_micros,
+ show_report(State2#reporter_state{n_iters = 0,
+ curr = Acc,
+ prev_micros = Init});
+ false ->
+ ignore
+ end,
+ State2.
+
+calc_reports([], State) ->
+ State;
+calc_reports([Pid|Drivers], State) ->
+ receive
+ {'EXIT', P, Reason} when P =:= State#reporter_state.starter_pid ->
+ exit({starter_died, P, Reason});
+ {'EXIT', Pid, Reason} ->
+ exit({driver_died, Pid, Reason});
+ {Pid, Time} when is_record(Time, time) ->
+ %% io:format("~w: ~w~n", [Pid, Time]),
+ A = add_time(State#reporter_state.acc, Time),
+ C = add_time(State#reporter_state.curr, Time),
+ State2 = State#reporter_state{acc = A, curr = C},
+ calc_reports(Drivers, State2)
+ end.
+
+add_time(Acc, New) ->
+ Acc#time{n_trans = New#time.n_trans + Acc#time.n_trans,
+ min_n = lists:min([New#time.n_trans, Acc#time.min_n] -- [0]),
+ max_n = lists:max([New#time.n_trans, Acc#time.max_n]),
+ acc_time = New#time.acc_time + Acc#time.acc_time,
+ max_time = lists:max([New#time.max_time, Acc#time.max_time])}.
+
+-define(AVOID_DIV_ZERO(_What_), try (_What_) catch _:_ -> 0 end).
+
+show_report(State) ->
+ Now = now_to_micros(erlang:now()),
+ Iters = State#reporter_state.n_iters,
+ Time = State#reporter_state.curr,
+ Max = Time#time.max_time,
+ N = Time#time.n_trans,
+ Avg = ?AVOID_DIV_ZERO(Time#time.acc_time div N),
+ AliveN = length(State#reporter_state.driver_pids),
+ Tps = ?AVOID_DIV_ZERO((?SECOND * AliveN) div Avg),
+ PrevTps= State#reporter_state.prev_tps,
+ {DiffSign, DiffTps} = signed_diff(Iters, Tps, PrevTps),
+ Unfairness = ?AVOID_DIV_ZERO(Time#time.max_n / Time#time.min_n),
+ BruttoAvg = ?AVOID_DIV_ZERO((Now - State#reporter_state.prev_micros) div N),
+%% io:format("n_iters=~p, n_trans=~p, n_drivers=~p, avg=~p, now=~p, prev=~p~n",
+%% [Iters, N, AliveN, BruttoAvg, Now, State#reporter_state.prev_micros]),
+ BruttoTps = ?AVOID_DIV_ZERO(?SECOND div BruttoAvg),
+ case Iters > 0 of
+ true ->
+ io:format("TPC-B: ~p iter ~s~p diff ~p (~p) tps ~p avg micros ~p max micros ~p unfairness~n",
+ [Iters, DiffSign, DiffTps, Tps, BruttoTps, Avg, Max, Unfairness]);
+ false ->
+ io:format("TPC-B: ~p (~p) transactions per second, "
+ "duration of longest transaction was ~p milliseconds~n",
+ [Tps, BruttoTps, Max div 1000])
+ end,
+ State#reporter_state{prev_tps = Tps, prev_micros = Now}.
+
+signed_diff(Iters, Curr, Prev) ->
+ case Iters > 1 of
+ true -> sign(Curr - Prev);
+ false -> sign(0)
+ end.
+
+sign(N) when N > 0 -> {"+", N};
+sign(N) -> {"", N}.
+
+now_to_micros({Mega, Secs, Micros}) ->
+ DT = calendar:now_to_datetime({Mega, Secs, 0}),
+ S = calendar:datetime_to_gregorian_seconds(DT),
+ (S * ?SECOND) + Micros.
+
+start_drivers(RC, TC) ->
+ LastHistoryId = table_info(history, size),
+ Reuse = RC#run_config.reuse_history_id,
+ DS = #driver_state{tab_config = TC,
+ run_config = RC,
+ n_local_branches = 0,
+ local_branches = [],
+ history_id = LastHistoryId,
+ reuse_history_id = Reuse},
+ Nodes = RC#run_config.driver_nodes,
+ NB = TC#tab_config.n_branches,
+ First = 0,
+ AllBranches = lists:seq(First, First + NB - 1),
+ ND = RC#run_config.n_drivers_per_node,
+ Spawn = fun(Spec) ->
+ Node = Spec#driver_state.driver_node,
+ spawn_link(Node, ?MODULE, driver_init, [Spec, AllBranches])
+ end,
+ Specs = [DS#driver_state{driver_id = Id, driver_node = N}
+ || N <- Nodes,
+ Id <- lists:seq(1, ND)],
+ Specs2 = lists:sort(lists:flatten(Specs)),
+ {Specs3, OrphanBranches} = alloc_local_branches(AllBranches, Specs2, []),
+ case length(OrphanBranches) of
+ N when N =< 10 ->
+ io:format("TPC-B: Orphan branches: ~p~n", [OrphanBranches]);
+ N ->
+ io:format("TPC-B: Orphan branches: ~p~n", [N])
+ end,
+ [Spawn(Spec) || Spec <- Specs3].
+
+alloc_local_branches([BranchId | Tail], Specs, OrphanBranches) ->
+ Nodes = table_info({branch, BranchId}, where_to_write),
+ LocalSpecs = [DS || DS <- Specs,
+ lists:member(DS#driver_state.driver_node, Nodes)],
+ case lists:keysort(#driver_state.n_local_branches, LocalSpecs) of
+ [] ->
+ alloc_local_branches(Tail, Specs, [BranchId | OrphanBranches]);
+ [DS | _] ->
+ LocalNB = DS#driver_state.n_local_branches + 1,
+ LocalBranches = [BranchId | DS#driver_state.local_branches],
+ DS2 = DS#driver_state{n_local_branches = LocalNB,
+ local_branches = LocalBranches},
+ Specs2 = Specs -- [DS],
+ Specs3 = [DS2 | Specs2],
+ alloc_local_branches(Tail, Specs3, OrphanBranches)
+ end;
+alloc_local_branches([], Specs, OrphanBranches) ->
+ {Specs, OrphanBranches}.
+
+driver_init(DS, AllBranches) ->
+ Seed = erlang:now(),
+ DS2 =
+ if
+ DS#driver_state.n_local_branches =:= 0 ->
+ DS#driver_state{seed = Seed,
+ n_local_branches = length(AllBranches),
+ local_branches = AllBranches};
+ true ->
+ DS#driver_state{seed = Seed}
+ end,
+ io:format("TPC-B: Driver ~p started as ~p on node ~p with ~p local branches~n",
+ [DS2#driver_state.driver_id, self(), node(), DS2#driver_state.n_local_branches]),
+ driver_loop(DS2).
+
+driver_loop(DS) ->
+ receive
+ {From, report} ->
+ From ! {self(), DS#driver_state.time},
+ Acc = add_time(DS#driver_state.time, DS#driver_state.acc_time),
+ DS2 = DS#driver_state{time=#time{}, acc_time = Acc}, % Reset timer
+ DS3 = calc_trans(DS2),
+ driver_loop(DS3);
+ {From, stop} ->
+ Acc = add_time(DS#driver_state.time, DS#driver_state.acc_time),
+ io:format("TPC-B: Driver ~p (~p) on node ~p stopped: ~w~n",
+ [DS#driver_state.driver_id, self(), node(self()), Acc]),
+ From ! {self(), DS#driver_state.time},
+ unlink(From),
+ exit(stopped)
+ after 0 ->
+ DS2 = calc_trans(DS),
+ driver_loop(DS2)
+ end.
+
+calc_trans(DS) ->
+ {Micros, DS2} = time_trans(DS),
+ Time = DS2#driver_state.time,
+ Time2 = Time#time{n_trans = Time#time.n_trans + 1,
+ acc_time = Time#time.acc_time + Micros,
+ max_time = lists:max([Micros, Time#time.max_time])
+ },
+ case DS#driver_state.reuse_history_id of
+ false ->
+ HistoryId = DS#driver_state.history_id + 1,
+ DS2#driver_state{time=Time2, history_id = HistoryId};
+ true ->
+ DS2#driver_state{time=Time2}
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Generate teller_id, account_id and delta
+%% Time the TPC-B transaction
+time_trans(DS) ->
+ OldSeed = get(random_seed), % Avoid interference with Mnesia
+ put(random_seed, DS#driver_state.seed),
+ Random = random:uniform(),
+ NewSeed = get(random_seed),
+ case OldSeed of
+ undefined -> erase(random_seed);
+ _ -> put(random_seed, OldSeed)
+ end,
+
+ TC = DS#driver_state.tab_config,
+ RC = DS#driver_state.run_config,
+ {Branchid, Args} = random_to_args(Random, DS),
+ {Fun, Mod} = trans_type(TC, RC),
+ {Time, Res} = timer:tc(?MODULE, real_trans, [RC, Branchid, Fun, Args, Mod]),
+
+ case Res of
+ AccountBal when is_integer(AccountBal) ->
+ {Time, DS#driver_state{seed = NewSeed}};
+ Other ->
+ exit({crash, Other, Args, Random, DS})
+ end.
+
+random_to_args(Random, DS) ->
+ DriverId = DS#driver_state.driver_id,
+ TC = DS#driver_state.tab_config,
+ HistoryId = DS#driver_state.history_id,
+ Delta = trunc(Random * 1999998) - 999999, % -999999 <= Delta <= +999999
+
+ Branches = DS#driver_state.local_branches,
+ NB = DS#driver_state.n_local_branches,
+ NT = TC#tab_config.n_tellers_per_branch,
+ NA = TC#tab_config.n_accounts_per_branch,
+ Tmp = trunc(Random * NB * NT),
+ BranchPos = (Tmp div NT) + 1,
+ BranchId =
+ case TC#tab_config.n_fragments of
+ 0 -> BranchPos - 1;
+ _ -> lists:nth(BranchPos, Branches)
+ end,
+ RelativeTellerId = Tmp div NT,
+ TellerId = (BranchId * NT) + RelativeTellerId,
+ {AccountBranchId, AccountId} =
+ if
+ Random >= 0.85, NB > 1 ->
+ %% Pick from a remote account
+ TmpAccountId= trunc(Random * (NB - 1) * NA),
+ TmpAccountBranchId = TmpAccountId div NA,
+ if
+ TmpAccountBranchId =:= BranchId ->
+ {TmpAccountBranchId + 1, TmpAccountId + NA};
+ true ->
+ {TmpAccountBranchId, TmpAccountId}
+ end;
+ true ->
+ %% Pick from a local account
+ RelativeAccountId = trunc(Random * NA),
+ TmpAccountId = (BranchId * NA) + RelativeAccountId,
+ {BranchId, TmpAccountId}
+ end,
+
+ {BranchId, [DriverId, BranchId, TellerId, AccountBranchId, AccountId, HistoryId, Delta]}.
+
+real_trans(RC, BranchId, Fun, Args, Mod) ->
+ Type = RC#run_config.activity_type,
+ case RC#run_config.spawn_near_branch of
+ false ->
+ mnesia:activity(Type, Fun, Args, Mod);
+ true ->
+ Node = table_info({branch, BranchId}, where_to_read),
+ case rpc:call(Node, mnesia, activity, [Type, Fun, Args, Mod]) of
+ {badrpc, Reason} -> exit(Reason);
+ Other -> Other
+ end
+ end.
+
+trans_type(TC, RC) ->
+ if
+ TC#tab_config.n_fragments =:= 0,
+ RC#run_config.use_sticky_locks =:= false ->
+ {fun add_delta/7, mnesia};
+ TC#tab_config.n_fragments =:= 0,
+ RC#run_config.use_sticky_locks =:= true ->
+ {fun sticky_add_delta/7, mnesia};
+ TC#tab_config.n_fragments > 0,
+ RC#run_config.use_sticky_locks =:= false ->
+ {fun frag_add_delta/7, mnesia_frag}
+ end.
+
+%%
+%% Runs the TPC-B defined transaction and returns NewAccountBalance
+%%
+
+add_delta(DriverId, BranchId, TellerId, _AccountBranchId, AccountId, HistoryId, Delta) ->
+ %% Grab write lock already when the record is read
+
+ %% Add delta to branch balance
+ [B] = mnesia:read(branch, BranchId, write),
+ NewB = B#branch{balance = B#branch.balance + Delta},
+ ok = mnesia:write(branch, NewB, write),
+
+ %% Add delta to teller balance
+ [T] = mnesia:read(teller, TellerId, write),
+ NewT = T#teller{balance = T#teller.balance + Delta},
+ ok = mnesia:write(teller, NewT, write),
+
+ %% Add delta to account balance
+ [A] = mnesia:read(account, AccountId, write),
+ NewA = A#account{balance = A#account.balance + Delta},
+ ok = mnesia:write(account, NewA, write),
+
+ %% Append to history log
+ History = #history{history_id = {DriverId, HistoryId},
+ account_id = AccountId,
+ teller_id = TellerId,
+ branch_id = BranchId,
+ amount = Delta
+ },
+ ok = mnesia:write(history, History, write),
+
+ %% Return account balance
+ NewA#account.balance.
+
+sticky_add_delta(DriverId, BranchId, TellerId, _AccountBranchId, AccountId, HistoryId, Delta) ->
+ %% Grab orinary read lock when the record is read
+ %% Grab sticky write lock when the record is written
+ %% This transaction would benefit of an early stick_write lock at read
+
+ %% Add delta to branch balance
+ [B] = mnesia:read(branch, BranchId, read),
+ NewB = B#branch{balance = B#branch.balance + Delta},
+ ok = mnesia:write(branch, NewB, sticky_write),
+
+ %% Add delta to teller balance
+ [T] = mnesia:read(teller, TellerId, read),
+ NewT = T#teller{balance = T#teller.balance + Delta},
+ ok = mnesia:write(teller, NewT, sticky_write),
+
+ %% Add delta to account balance
+ [A] = mnesia:read(account, AccountId, read),
+ NewA = A#account{balance = A#account.balance + Delta},
+ ok = mnesia:write(account, NewA, sticky_write),
+
+ %% Append to history log
+ History = #history{history_id = {DriverId, HistoryId},
+ account_id = AccountId,
+ teller_id = TellerId,
+ branch_id = BranchId,
+ amount = Delta
+ },
+ ok = mnesia:write(history, History, sticky_write),
+
+ %% Return account balance
+ NewA#account.balance.
+
+frag_add_delta(DriverId, BranchId, TellerId, AccountBranchId, AccountId, HistoryId, Delta) ->
+ %% Access fragmented table
+ %% Grab write lock already when the record is read
+
+ %% Add delta to branch balance
+ [B] = mnesia:read(branch, BranchId, write),
+ NewB = B#branch{balance = B#branch.balance + Delta},
+ ok = mnesia:write(NewB),
+
+ %% Add delta to teller balance
+ [T] = mnesia:read({teller, BranchId}, TellerId, write),
+ NewT = T#teller{balance = T#teller.balance + Delta},
+ ok = mnesia:write(NewT),
+
+ %% Add delta to account balance
+ %%io:format("frag_add_delta(~p): ~p\n", [node(), {account, BranchId, AccountId}]),
+ [A] = mnesia:read({account, AccountBranchId}, AccountId, write),
+ NewA = A#account{balance = A#account.balance + Delta},
+ ok = mnesia:write(NewA),
+
+ %% Append to history log
+ History = #history{history_id = {DriverId, HistoryId},
+ account_id = AccountId,
+ teller_id = TellerId,
+ branch_id = BranchId,
+ amount = Delta
+ },
+ ok = mnesia:write(History),
+
+ %% Return account balance
+ NewA#account.balance.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Verify table consistency
+
+verify_tabs() ->
+ Nodes = mnesia:system_info(running_db_nodes),
+ case lists:member(node(), Nodes) of
+ true ->
+ Tabs = [branch, teller, account, history],
+ io:format("TPC-B: Verifying tables: ~w~n", [Tabs]),
+ rpc:multicall(Nodes, mnesia, wait_for_tables, [Tabs, infinity]),
+
+ Fun = fun() ->
+ mnesia:write_lock_table(branch),
+ mnesia:write_lock_table(teller),
+ mnesia:write_lock_table(account),
+ mnesia:write_lock_table(history),
+ {Res, BadNodes} =
+ rpc:multicall(Nodes, ?MODULE, count_balance, []),
+ check_balance(Res, BadNodes)
+ end,
+ case mnesia:transaction(Fun) of
+ {atomic, Res} -> Res;
+ {aborted, Reason} -> {error, Reason}
+ end;
+ false ->
+ {error, "Must be initiated from a running db_node"}
+ end.
+
+%% Returns a list of {Table, Node, Balance} tuples
+%% Assumes that no updates are performed
+
+-record(summary, {table, node, balance, size}).
+
+count_balance() ->
+ [count_balance(branch, #branch.balance),
+ count_balance(teller, #teller.balance),
+ count_balance(account, #account.balance)].
+
+count_balance(Tab, BalPos) ->
+ Frags = table_info(Tab, frag_names),
+ count_balance(Tab, Frags, 0, 0, BalPos).
+
+count_balance(Tab, [Frag | Frags], Bal, Size, BalPos) ->
+ First = mnesia:dirty_first(Frag),
+ {Bal2, Size2} = count_frag_balance(Frag, First, Bal, Size, BalPos),
+ count_balance(Tab, Frags, Bal2, Size2, BalPos);
+count_balance(Tab, [], Bal, Size, _BalPos) ->
+ #summary{table = Tab, node = node(), balance = Bal, size = Size}.
+
+count_frag_balance(_Frag, '$end_of_table', Bal, Size, _BalPos) ->
+ {Bal, Size};
+count_frag_balance(Frag, Key, Bal, Size, BalPos) ->
+ [Record] = mnesia:dirty_read({Frag, Key}),
+ Bal2 = Bal + element(BalPos, Record),
+ Next = mnesia:dirty_next(Frag, Key),
+ count_frag_balance(Frag, Next, Bal2, Size + 1, BalPos).
+
+check_balance([], []) ->
+ mnesia:abort({"No balance"});
+check_balance(Summaries, []) ->
+ [One | Rest] = lists:flatten(Summaries),
+ Balance = One#summary.balance,
+ %% Size = One#summary.size,
+ case [S || S <- Rest, S#summary.balance =/= Balance] of
+ [] ->
+ ok;
+ BadSummaries ->
+ mnesia:abort({"Bad balance", One, BadSummaries})
+ end;
+check_balance(_, BadNodes) ->
+ mnesia:abort({"Bad nodes", BadNodes}).
diff --git a/lib/mnesia/test/mnesia_trans_access_test.erl b/lib/mnesia/test/mnesia_trans_access_test.erl
new file mode 100644
index 0000000000..c67382e694
--- /dev/null
+++ b/lib/mnesia/test/mnesia_trans_access_test.erl
@@ -0,0 +1,1254 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+-module(mnesia_trans_access_test).
+-author('[email protected]').
+-compile([export_all]).
+-include("mnesia_test_lib.hrl").
+
+init_per_testcase(Func, Conf) ->
+ mnesia_test_lib:init_per_testcase(Func, Conf).
+
+fin_per_testcase(Func, Conf) ->
+ mnesia_test_lib:fin_per_testcase(Func, Conf).
+
+-define(receive_messages(Msgs), mnesia_recovery_test:receive_messages(Msgs, ?FILE, ?LINE)).
+
+% First Some debug logging
+-define(dgb, true).
+-ifdef(dgb).
+-define(dl(X, Y), ?verbose("**TRACING: " ++ X ++ "**~n", Y)).
+-else.
+-define(dl(X, Y), ok).
+-endif.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+all(doc) ->
+ ["Evil access of records in the scope of transactions",
+ "Invoke all functions in the API and try to cover all legal uses",
+ "cases as well the illegal dito. This is a complement to the",
+ "other more explicit test cases."];
+all(suite) ->
+ [
+ write, read, wread, delete, delete_object,
+ match_object, select, select14, all_keys,
+ transaction, nested_activities,
+ index_tabs, index_lifecycle
+ ].
+
+%% Write records
+
+write(suite) -> [];
+write(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = write,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:write([]) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 2}) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:write({foo, 2}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 1, 2}) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:write({Tab, 1, 2})),
+ ?verify_mnesia(Nodes, []).
+
+%% Read records
+
+read(suite) -> [];
+read(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = read,
+ Schema = [{name, Tab}, {type, bag}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ OneRec = {Tab, 1, 2},
+ TwoRec = {Tab, 1, 3},
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:read([]) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:read({Tab}) end)),
+ ?match({aborted, {bad_type, _}}
+ , mnesia:transaction(fun() -> mnesia:read(OneRec) end)),
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(TwoRec) end)),
+ ?match({atomic, [OneRec, TwoRec]},
+ mnesia:transaction(fun() -> mnesia:read({Tab, 1}) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:read({Tab, 1})),
+ ?verify_mnesia(Nodes, []).
+
+%% Read records and set write lock
+
+wread(suite) -> [];
+wread(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = wread,
+ Schema = [{name, Tab}, {type, set}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ OneRec = {Tab, 1, 2},
+ TwoRec = {Tab, 1, 3},
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:wread([]) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:wread({Tab}) end)),
+ ?match({aborted, {bad_type, _}}
+ , mnesia:transaction(fun() -> mnesia:wread(OneRec) end)),
+
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:wread({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:wread({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(TwoRec) end)),
+ ?match({atomic, [TwoRec]},
+ mnesia:transaction(fun() -> mnesia:wread({Tab, 1}) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:wread({Tab, 1})),
+ ?verify_mnesia(Nodes, []).
+
+%% Delete record
+
+delete(suite) -> [];
+delete(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = delete,
+ Schema = [{name, Tab}, {type, bag}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:delete([]) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:delete({Tab}) end)),
+ ?match({aborted, {bad_type, _}}
+ , mnesia:transaction(fun() -> mnesia:delete({Tab, 1, 2}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 1, 2}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 1, 2}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 1, 2}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete({Tab, 1}) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:delete({Tab, 1})),
+ ?verify_mnesia(Nodes, []).
+
+%% Delete matching record
+
+delete_object(suite) -> [];
+delete_object(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = delete_object,
+ Schema = [{name, Tab}, {type, bag}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ OneRec = {Tab, 1, 2},
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:delete_object([]) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:delete_object({Tab}) end)),
+ ?match({aborted, {bad_type, _}},
+ mnesia:transaction(fun() -> mnesia:delete_object({Tab, 1}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete_object(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete_object(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete_object(OneRec) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:delete_object(OneRec)),
+
+ ?match({aborted, {bad_type, Tab, _}},
+ mnesia:transaction(fun() -> mnesia:delete_object({Tab, {['_']}, 21}) end)),
+ ?match({aborted, {bad_type, Tab, _}},
+ mnesia:transaction(fun() -> mnesia:delete_object({Tab, {['$5']}, 21}) end)),
+
+ ?verify_mnesia(Nodes, []).
+
+%% Read matching records
+
+match_object(suite) -> [];
+match_object(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = match,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ OneRec = {Tab, 1, 2},
+ OnePat = {Tab, '$1', 2},
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:match_object(OnePat) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:match_object(OnePat) end)),
+
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:match_object({foo, '$1', 2}) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:match_object({[], '$1', 2}) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:match_object(OnePat)),
+ ?verify_mnesia(Nodes, []).
+
+%% select
+select(suite) -> [];
+select(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = match,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ OneRec = {Tab, 1, 2},
+ TwoRec = {Tab, 2, 3},
+ OnePat = [{{Tab, '$1', 2}, [], ['$_']}],
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:select(Tab, OnePat) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(TwoRec) end)),
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:select(Tab, OnePat) end)),
+
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:select(Tab, {match, '$1', 2}) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:select(Tab, [{'_', [], '$1'}]) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:select(Tab, OnePat)),
+ ?verify_mnesia(Nodes, []).
+
+
+%% more select
+select14(suite) -> [];
+select14(Config) when is_list(Config) ->
+ [Node1,Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab1 = select14_ets,
+ Tab2 = select14_dets,
+ Tab3 = select14_remote,
+ Tab4 = select14_remote_dets,
+ Schemas = [[{name, Tab1}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ [{name, Tab2}, {attributes, [k, v]}, {disc_only_copies, [Node1]}],
+ [{name, Tab3}, {attributes, [k, v]}, {ram_copies, [Node2]}],
+ [{name, Tab4}, {attributes, [k, v]}, {disc_only_copies, [Node2]}]],
+ [?match({atomic, ok}, mnesia:create_table(Schema)) || Schema <- Schemas],
+
+ %% Some Helpers
+ Trans = fun(Fun) -> mnesia:transaction(Fun) end,
+ LoopHelp = fun('$end_of_table',_) -> [];
+ ({Recs,Cont},Fun) ->
+ Sel = mnesia:select(Cont),
+ Recs ++ Fun(Sel, Fun)
+ end,
+ Loop = fun(Table,Pattern) ->
+ Sel = mnesia:select(Table, Pattern, 1, read),
+ Res = LoopHelp(Sel,LoopHelp),
+ case mnesia:table_info(Table, type) of
+ ordered_set -> Res;
+ _ -> lists:sort(Res)
+ end
+ end,
+ Test =
+ fun(Tab) ->
+ OneRec = {Tab, 1, 2},
+ TwoRec = {Tab, 2, 3},
+ OnePat = [{{Tab, '$1', 2}, [], ['$_']}],
+ All = [OneRec,TwoRec],
+ AllPat = [{'_', [], ['$_']}],
+
+ ?match({atomic, []}, Trans(fun() -> Loop(Tab, OnePat) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(TwoRec) end)),
+ ?match({atomic, [OneRec]}, Trans(fun() -> Loop(Tab, OnePat) end)),
+ ?match({atomic, All}, Trans(fun() -> Loop(Tab, AllPat) end)),
+
+ {atomic,{_, Cont}} = Trans(fun() -> mnesia:select(Tab, OnePat, 1, read) end),
+ ?match({aborted, wrong_transaction}, Trans(fun() -> mnesia:select(Cont) end)),
+
+ ?match({aborted, _}, Trans(fun() -> mnesia:select(Tab, {match, '$1', 2},1,read) end)),
+ ?match({aborted, _}, Trans(fun() -> mnesia:select(Tab, [{'_', [], '$1'}],1,read) end)),
+ ?match({aborted, _}, Trans(fun() -> mnesia:select(sune) end)),
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:select(Tab, OnePat,1,read)),
+ ?match({aborted, {badarg,sune}},
+ Trans(fun() -> mnesia:select(sune) end))
+ end,
+ Test(Tab1),
+ Test(Tab2),
+ Test(Tab3),
+ Test(Tab4),
+ ?verify_mnesia(Nodes, []).
+
+
+%% Pick all keys from table
+
+all_keys(suite) ->[];
+all_keys(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = all_keys,
+ Schema = [{name, Tab}, {type, bag}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ Write = fun() -> mnesia:write({Tab, 14, 4}) end,
+ AllKeys = fun() -> mnesia:all_keys(Tab) end,
+
+ ?match({atomic, []}, mnesia:transaction(AllKeys)),
+
+ ?match({atomic, ok}, mnesia:transaction(Write)),
+ ?match({atomic, [14]}, mnesia:transaction(AllKeys)),
+
+ ?match({atomic, ok}, mnesia:transaction(Write)),
+ ?match({atomic, [14]}, mnesia:transaction(AllKeys)),
+
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:all_keys(foo) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:all_keys([]) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:all_keys(Tab)),
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Use and misuse transactions
+
+transaction(suite) -> [];
+transaction(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ ?match({atomic, ali_baba}, mnesia:transaction(fun() -> ali_baba end)),
+ ?match({aborted, _}, mnesia:transaction(no_fun)),
+ ?match({aborted, _}, mnesia:transaction(?MODULE, no_fun, [foo])),
+
+ {success, [A, B, C, D, E, F, G, H]} =
+ ?start_activities(lists:duplicate(8, Node1)),
+ ?start_transactions([A, B, C, D, E, F, G, H]),
+
+ A ! fun() -> mnesia:abort(abort_bad_trans) end,
+ ?match_receive({A, {aborted, abort_bad_trans}}),
+
+ B ! fun() -> erlang:error(exit_here) end,
+ ?match_receive({B, {aborted, _}}),
+
+ C ! fun() -> throw(throw_bad_trans) end,
+ ?match_receive({C, {aborted, {throw, throw_bad_trans}}}),
+
+ D ! fun() -> exit(exit_bad_trans) end,
+ ?match_receive({D, {aborted, exit_bad_trans}}),
+
+ E ! fun() -> exit(normal) end,
+ ?match_receive({E, {aborted, normal}}),
+
+ F ! fun() -> exit(abnormal) end,
+ ?match_receive({F, {aborted, abnormal}}),
+
+ G ! fun() -> exit(G, abnormal) end,
+ ?match_receive({'EXIT', G, abnormal}),
+
+ H ! fun() -> exit(H, kill) end,
+ ?match_receive({'EXIT', H, killed}),
+
+ ?match({atomic, ali_baba},
+ mnesia:transaction(fun() -> ali_baba end, infinity)),
+ ?match({atomic, ali_baba}, mnesia:transaction(fun() -> ali_baba end, 1)),
+ ?match({atomic, ali_baba}, mnesia:transaction(fun() -> ali_baba end, 0)),
+ ?match({aborted, Reason8} when element(1, Reason8) == badarg, mnesia:transaction(fun() -> ali_baba end, -1)),
+ ?match({aborted, Reason1} when element(1, Reason1) == badarg, mnesia:transaction(fun() -> ali_baba end, foo)),
+ Fun = fun() ->
+ ?match(true, mnesia:is_transaction()),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> ?match(true, mnesia:is_transaction()),ok end)), ok end,
+ ?match({atomic, ok}, mnesia:transaction(Fun)),
+ ?verify_mnesia(Nodes, []).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+nested_activities(suite) ->
+ [
+ basic_nested,
+ nested_transactions,
+ mix_of_nested_activities
+ ].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% ensure that nested transactions behave correctly
+%% We create a particular table that is used by this test only
+-record(ntab, {a, b}).
+basic_nested(doc) -> ["Test the basic functionality of nested transactions"];
+basic_nested(suite) -> [];
+basic_nested(Config) when is_list(Config) ->
+ Nodes = ?acquire_nodes(3, Config),
+ Args = [{ram_copies, Nodes},
+ {attributes, record_info(fields, ntab)}],
+ ?match({atomic, ok}, mnesia:create_table(ntab, Args)),
+ do_nested(top),
+ case mnesia_test_lib:diskless(Config) of
+ false ->
+ lists:foreach(fun(N) ->
+ ?match({atomic, ok},
+ mnesia:change_table_copy_type(ntab, N, disc_only_copies))
+ end, Nodes),
+ do_nested(top);
+ true ->
+ skip
+ end,
+ ?verify_mnesia(Nodes, []).
+
+do_nested(How) ->
+ F1 = fun() ->
+ mnesia:write(#ntab{a= 1}),
+ mnesia:write(#ntab{a= 2})
+ end,
+ F2 = fun() ->
+ mnesia:read({ntab, 1})
+ end,
+ ?match({atomic, ok}, mnesia:transaction(F1)),
+ ?match({atomic, _}, mnesia:transaction(F2)),
+
+ ?match({atomic, {aborted, _}},
+ mnesia:transaction(fun() -> n_f1(),
+ mnesia:transaction(fun() -> n_f2() end)
+ end)),
+
+ ?match({atomic, {aborted, _}},
+ mnesia:transaction(fun() -> n_f1(),
+ mnesia:transaction(fun() -> n_f3() end)
+ end)),
+ ?match({atomic, {atomic, [#ntab{a = 5}]}},
+ mnesia:transaction(fun() -> mnesia:write(#ntab{a = 5}),
+ mnesia:transaction(fun() -> n_f4() end)
+ end)),
+ Cyclic = fun() -> mnesia:abort({cyclic,a,a,a,a,a}) end, %% Ugly
+ NodeNotR = fun() -> mnesia:abort({node_not_running, testNode}) end,
+
+ TestAbort = fun(Fun) ->
+ case get(restart_counter) of
+ undefined ->
+ put(restart_counter, 1),
+ Fun();
+ _ ->
+ erase(restart_counter),
+ ok
+ end
+ end,
+
+ ?match({atomic,{atomic,ok}},
+ mnesia:transaction(fun()->mnesia:transaction(TestAbort,
+ [Cyclic])end)),
+
+ ?match({atomic,{atomic,ok}},
+ mnesia:transaction(fun()->mnesia:transaction(TestAbort,
+ [NodeNotR])end)),
+
+ %% Now try the restart thingie
+ case How of
+ top ->
+ Pids = [spawn(?MODULE, do_nested, [{spawned, self()}]),
+ spawn(?MODULE, do_nested, [{spawned, self()}]),
+ spawn(?MODULE, do_nested, [{spawned, self()}]),
+ spawn(?MODULE, do_nested, [{spawned, self()}])],
+ ?match({info, _, _}, mnesia_tm:get_info(2000)),
+ lists:foreach(fun(P) -> receive
+ {P, ok} -> ok
+ end
+ end, Pids),
+ ?match([], [Tab || Tab <- ets:all(), mnesia_trans_store == ets:info(Tab, name)]);
+
+ {spawned, Pid} ->
+ ?match({info, _, _}, mnesia_tm:get_info(2000)),
+ Pid ! {self(), ok},
+ exit(normal)
+ end.
+
+
+n_f1() ->
+ mnesia:read({ntab, 1}),
+ mnesia:write(#ntab{a = 3}).
+
+n_f2() ->
+ mnesia:write(#ntab{a = 4}),
+ erlang:error(exit_here).
+
+n_f3() ->
+ mnesia:write(#ntab{a = 4}),
+ throw(funky).
+
+n_f4() ->
+ mnesia:read({ntab, 5}).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+nested_transactions(doc) ->
+ ["Verify that nested_transactions are handled as expected"];
+nested_transactions(suite) ->
+ [nested_trans_both_ok,
+ nested_trans_child_dies,
+ nested_trans_parent_dies,
+ nested_trans_both_dies].
+
+nested_trans_both_ok(suite) -> [];
+nested_trans_both_ok(Config) when is_list(Config) ->
+ nested_transactions(Config, ok, ok).
+
+nested_trans_child_dies(suite) -> [];
+nested_trans_child_dies(Config) when is_list(Config) ->
+ nested_transactions(Config, abort, ok).
+
+nested_trans_parent_dies(suite) -> [];
+nested_trans_parent_dies(Config) when is_list(Config) ->
+ nested_transactions(Config, ok, abort).
+
+nested_trans_both_dies(suite) -> [];
+nested_trans_both_dies(Config) when is_list(Config) ->
+ nested_transactions(Config, abort, abort).
+
+nested_transactions(Config, Child, Father) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab = nested_trans,
+
+ Def =
+ case mnesia_test_lib:diskless(Config) of
+ true ->
+ [{name, Tab}, {ram_copies, Nodes}];
+ false ->
+ [{name, Tab}, {ram_copies, [Node1]},
+ {disc_copies, [Node2]}, {disc_only_copies, [Node3]}]
+ end,
+
+ ?match({atomic, ok}, mnesia:create_table(Def)),
+ ?match(ok, mnesia:dirty_write({Tab, father, not_updated})),
+ ?match(ok, mnesia:dirty_write({Tab, child, not_updated})),
+
+ ChildOk = fun() -> mnesia:write({Tab, child, updated}) end,
+ ChildAbort = fun() ->
+ mnesia:write({Tab, child, updated}),
+ erlang:error(exit_here)
+ end,
+
+ Child_Fun = % Depending of test case
+ case Child of
+ ok -> ChildOk;
+ abort -> ChildAbort
+ end,
+
+ FatherOk = fun() -> mnesia:transaction(Child_Fun),
+ mnesia:write({Tab, father, updated})
+ end,
+
+ FatherAbort = fun() -> mnesia:transaction(Child_Fun),
+ mnesia:write({Tab, father, updated}),
+ erlang:error(exit_here)
+ end,
+
+ {FatherRes, ChildRes} = % Depending of test case
+ case Father of
+ ok -> ?match({atomic, ok}, mnesia:transaction(FatherOk)),
+ case Child of
+ ok -> {[{Tab, father, updated}], [{Tab, child, updated}]};
+ _ -> {[{Tab, father, updated}], [{Tab, child, not_updated}]}
+ end;
+ abort -> ?match({aborted, _}, mnesia:transaction(FatherAbort)),
+ {[{Tab, father, not_updated}], [{Tab, child, not_updated}]}
+ end,
+
+ %% Syncronize things!!
+ ?match({atomic, ok}, mnesia:sync_transaction(fun() -> mnesia:write({Tab, sync, sync}) end)),
+
+ ?match(ChildRes, rpc:call(Node1, mnesia, dirty_read, [{Tab, child}])),
+ ?match(ChildRes, rpc:call(Node2, mnesia, dirty_read, [{Tab, child}])),
+ ?match(ChildRes, rpc:call(Node3, mnesia, dirty_read, [{Tab, child}])),
+
+ ?match(FatherRes, rpc:call(Node1, mnesia, dirty_read, [{Tab, father}])),
+ ?match(FatherRes, rpc:call(Node2, mnesia, dirty_read, [{Tab, father}])),
+ ?match(FatherRes, rpc:call(Node3, mnesia, dirty_read, [{Tab, father}])),
+ ?verify_mnesia(Nodes, []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+mix_of_nested_activities(doc) ->
+ ["Verify that dirty operations in a transaction are handled like ",
+ "normal transactions"];
+mix_of_nested_activities(suite) -> [];
+mix_of_nested_activities(Config) when is_list(Config) ->
+ [Node1, Node2, Node3] = Nodes = ?acquire_nodes(3, Config),
+ Tab = tab,
+
+ Def =
+ case mnesia_test_lib:diskless(Config) of
+ true -> [{ram_copies, Nodes}];
+ false ->
+ [{ram_copies, [Node1]},
+ {disc_copies, [Node2]},
+ {disc_only_copies, [Node3]}]
+ end,
+
+ ?match({atomic, ok}, mnesia:create_table(Tab, [{type,bag}|Def])),
+ Activities = [transaction, sync_transaction,
+ ets, async_dirty, sync_dirty],
+ %% Make a test for all 3000 combinations
+ Tests = [[A,B,C,D,E] ||
+ A <- Activities,
+ B <- Activities,
+ C <- Activities,
+ D <- Activities,
+ E <- Activities],
+ Foreach =
+ fun(Test,No) ->
+ Result = lists:reverse(Test),
+ ?match({No,Result},{No,catch apply_op({Tab,No},Test)}),
+ No+1
+ end,
+ lists:foldl(Foreach, 0, Tests),
+ ?verify_mnesia(Nodes, []).
+
+apply_op(Oid,[Type]) ->
+ check_res(Type,mnesia:Type(fun() -> [Type|read_op(Oid)] end));
+apply_op(Oid = {Tab,Key},[Type|Next]) ->
+ check_res(Type,mnesia:Type(fun() ->
+ Prev = read_op(Oid),
+ mnesia:write({Tab,Key,[Type|Prev]}),
+ apply_op(Oid,Next)
+ end)).
+
+check_res(transaction, {atomic,Res}) ->
+ Res;
+check_res(sync_transaction, {atomic,Res}) ->
+ Res;
+check_res(async_dirty, Res) when is_list(Res) ->
+ Res;
+check_res(sync_dirty, Res) when is_list(Res) ->
+ Res;
+check_res(ets, Res) when is_list(Res) ->
+ Res;
+check_res(Type,Res) ->
+ ?match(bug,{Type,Res}).
+
+read_op(Oid) ->
+ case lists:reverse(mnesia:read(Oid)) of
+ [] -> [];
+ [{_,_,Ops}|_] ->
+ Ops
+ end.
+
+index_tabs(suite) ->
+ [
+ index_match_object,
+ index_read,
+ index_update,
+ index_write
+ ].
+
+%% Read matching records by using an index
+
+index_match_object(suite) -> [];
+index_match_object(Config) when is_list(Config) ->
+ [Node1, Node2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = index_match_object,
+ Schema = [{name, Tab}, {attributes, [k, v, e]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = 3,
+ BadValPos = ValPos + 2,
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:index_match_object({Tab, '$1', 2}, ValPos) end)),
+ OneRec = {Tab, {1, 1}, 2, {1, 1}},
+ OnePat = {Tab, '$1', 2, '_'},
+ BadPat = {Tab, '$1', '$2', '_'}, %% See ref guide
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+
+ Imatch = fun(Patt, Pos) ->
+ mnesia:transaction(fun() -> lists:sort(mnesia:index_match_object(Patt, Pos)) end)
+ end,
+ ?match({atomic, [OneRec]}, Imatch(OnePat, ValPos)),
+ ?match({aborted, _}, Imatch(OnePat, BadValPos)),
+ ?match({aborted, _}, Imatch({foo, '$1', 2, '_'}, ValPos)),
+ ?match({aborted, _}, Imatch({[], '$1', 2, '_'}, ValPos)),
+ ?match({aborted, _}, Imatch(BadPat, ValPos)),
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:index_match_object(OnePat, ValPos)),
+
+ Another = {Tab, {3,1}, 2, {4,4}},
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Another) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, {4, 4}, 3, {4, 4}}) end)),
+
+ ?match({atomic, [OneRec]}, Imatch({Tab, {1,1}, 2, {1,1}}, ValPos)),
+ ?match({atomic, [OneRec]}, Imatch({Tab, {1,1}, 2, '$1'}, ValPos)),
+ ?match({atomic, [OneRec]}, Imatch({Tab, '$1', 2, {1,1}}, ValPos)),
+ ?match({atomic, [OneRec]}, Imatch({Tab, '$1', 2, '$1'}, ValPos)),
+ ?match({atomic, [OneRec]}, Imatch({Tab, {1, '$1'}, 2, '_'}, ValPos)),
+ ?match({atomic, [OneRec]}, Imatch({Tab, {'$2', '$1'}, 2, {'_', '$1'}}, ValPos)),
+ ?match({atomic, [OneRec, Another]}, Imatch({Tab, '_', 2, '_'}, ValPos)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 4, 5, {7, 4}}) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write({Tab, 7, 5, {7, 5}}) end)),
+
+ ?match({atomic, [{Tab, 4, 5, {7, 4}}]}, Imatch({Tab, '$1', 5, {'_', '$1'}}, ValPos)),
+
+ ?match({atomic, [OneRec]}, rpc:call(Node2, mnesia, transaction,
+ [fun() ->
+ lists:sort(mnesia:index_match_object({Tab, {1,1}, 2,
+ {1,1}}, ValPos))
+ end])),
+ ?verify_mnesia(Nodes, []).
+
+%% Read records by using an index
+
+index_read(suite) -> [];
+index_read(Config) when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = index_read,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = 3,
+ BadValPos = ValPos + 1,
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ OneRec = {Tab, 1, 2},
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(OneRec) end)),
+ ?match({atomic, [OneRec]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, BadValPos) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:index_read(foo, 2, ValPos) end)),
+ ?match({aborted, _},
+ mnesia:transaction(fun() -> mnesia:index_read([], 2, ValPos) end)),
+
+ ?match({'EXIT', {aborted, no_transaction}}, mnesia:index_read(Tab, 2, ValPos)),
+ ?verify_mnesia(Nodes, []).
+
+index_update(suite) -> [index_update_set, index_update_bag];
+index_update(doc) -> ["See Ticket OTP-2083, verifies that a table with a index is "
+ "update in the correct way i.e. the index finds the correct "
+ "records after a update"].
+index_update_set(suite) -> [];
+index_update_set(Config)when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = index_test,
+ Schema = [{name, Tab}, {attributes, [k, v1, v2, v3]}, {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = v1,
+ ValPos2 = v3,
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Pat1 = {Tab, '$1', 2, '$2', '$3'},
+ Pat2 = {Tab, '$1', '$2', '$3', '$4'},
+
+ Rec1 = {Tab, 1, 2, 3, 4},
+ Rec2 = {Tab, 2, 2, 13, 14},
+ Rec3 = {Tab, 1, 12, 13, 14},
+ Rec4 = {Tab, 4, 2, 13, 14},
+
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ ?match({atomic, [Rec1]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec2) end)),
+ {atomic, R1} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R1)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec3) end)),
+ {atomic, R2} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec2], lists:sort(R2)),
+ ?match({atomic, [Rec2]},
+ mnesia:transaction(fun() -> mnesia:index_match_object(Pat1, ValPos) end)),
+
+ {atomic, R3} = mnesia:transaction(fun() -> mnesia:match_object(Pat2) end),
+ ?match([Rec3, Rec2], lists:sort(R3)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ {atomic, R4} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec2, Rec4], lists:sort(R4)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:delete({Tab, 4}) end)),
+ ?match({atomic, [Rec2]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+
+ ?match({atomic, ok}, mnesia:del_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos2)),
+
+ {atomic, R5} = mnesia:transaction(fun() -> mnesia:match_object(Pat2) end),
+ ?match([Rec3, Rec2, Rec4], lists:sort(R5)),
+
+ {atomic, R6} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec2, Rec4], lists:sort(R6)),
+
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end)),
+ {atomic, R7} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec3, Rec2, Rec4], lists:sort(R7)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ {atomic, R8} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R8)),
+ ?match({atomic, [Rec1]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end)),
+ {atomic, R9} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec2, Rec4], lists:sort(R9)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec2) end)),
+ {atomic, R10} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec4], lists:sort(R10)),
+ ?match({atomic, [Rec1]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end)),
+ ?match({atomic, [Rec4]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete({Tab, 4}) end)),
+ {atomic, R11} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1], lists:sort(R11)),
+ ?match({atomic, [Rec1]},mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end)),
+ ?match({atomic, []},mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end)),
+
+ ?verify_mnesia(Nodes, []).
+
+index_update_bag(suite) -> [];
+index_update_bag(Config)when is_list(Config) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = index_test,
+ Schema = [{name, Tab},
+ {type, bag},
+ {attributes, [k, v1, v2, v3]},
+ {ram_copies, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = v1,
+ ValPos2 = v3,
+
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+
+ Pat1 = {Tab, '$1', 2, '$2', '$3'},
+ Pat2 = {Tab, '$1', '$2', '$3', '$4'},
+
+ Rec1 = {Tab, 1, 2, 3, 4},
+ Rec2 = {Tab, 2, 2, 13, 14},
+ Rec3 = {Tab, 1, 12, 13, 14},
+ Rec4 = {Tab, 4, 2, 13, 4},
+ Rec5 = {Tab, 1, 2, 234, 14},
+
+ %% Simple Index
+ ?match({atomic, []},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ ?match({atomic, [Rec1]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec2) end)),
+ {atomic, R1} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R1)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec3) end)),
+ {atomic, R2} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R2)),
+
+ {atomic, R3} = mnesia:transaction(fun() -> mnesia:index_match_object(Pat1, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R3)),
+
+ {atomic, R4} = mnesia:transaction(fun() -> mnesia:match_object(Pat2) end),
+ ?match([Rec1, Rec3, Rec2], lists:sort(R4)),
+
+ ?match({atomic, ok},
+ mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ {atomic, R5} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R5)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete({Tab, 4}) end)),
+ {atomic, R6} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R6)),
+
+ %% OTP-6587 Needs some whitebox testing to see that the index table is cleaned correctly
+
+ [IPos] = mnesia_lib:val({Tab,index}),
+ ITab = mnesia_lib:val({index_test,{index, IPos}}),
+ io:format("~n Index ~p @ ~p => ~p ~n~n",[IPos,ITab, ets:tab2list(ITab)]),
+ ?match([{2,1},{2,2},{12,1}], ets:tab2list(ITab)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec5) end)),
+ {atomic, R60} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1,Rec5,Rec2], lists:sort(R60)),
+
+ ?match([{2,1},{2,2},{12,1}], ets:tab2list(ITab)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec3) end)),
+ {atomic, R61} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1,Rec5,Rec2], lists:sort(R61)),
+ {atomic, R62} = mnesia:transaction(fun() -> mnesia:index_read(Tab,12, ValPos) end),
+ ?match([], lists:sort(R62)),
+ ?match([{2,1},{2,2}], ets:tab2list(ITab)),
+
+ %% reset for rest of testcase
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec3) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec5) end)),
+ {atomic, R6} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2], lists:sort(R6)),
+ %% OTP-6587
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec1) end)),
+ ?match({atomic, [Rec2]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end)),
+ {atomic, R7} = mnesia:transaction(fun() -> mnesia:match_object(Pat2) end),
+ ?match([Rec3, Rec2], lists:sort(R7)),
+
+ %% Two indexies
+ ?match({atomic, ok}, mnesia:del_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec4) end)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos2)),
+
+ {atomic, R8} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec2, Rec4], lists:sort(R8)),
+
+ {atomic, R9} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end),
+ ?match([Rec1, Rec4], lists:sort(R9)),
+ {atomic, R10} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec3, Rec2], lists:sort(R10)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec5) end)),
+ {atomic, R11} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec1, Rec5, Rec2, Rec4], lists:sort(R11)),
+ {atomic, R12} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end),
+ ?match([Rec1, Rec4], lists:sort(R12)),
+ {atomic, R13} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec5, Rec3, Rec2], lists:sort(R13)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec1) end)),
+ {atomic, R14} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec5, Rec2, Rec4], lists:sort(R14)),
+ ?match({atomic, [Rec4]},
+ mnesia:transaction(fun() -> mnesia:index_read(Tab, 4, ValPos2) end)),
+ {atomic, R15} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec5, Rec3, Rec2], lists:sort(R15)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete_object(Rec5) end)),
+ {atomic, R16} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec2, Rec4], lists:sort(R16)),
+ ?match({atomic, [Rec4]}, mnesia:transaction(fun()->mnesia:index_read(Tab, 4, ValPos2) end)),
+ {atomic, R17} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec3, Rec2], lists:sort(R17)),
+
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:write(Rec1) end)),
+ ?match({atomic, ok}, mnesia:transaction(fun() -> mnesia:delete({Tab, 1}) end)),
+ {atomic, R18} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 2, ValPos) end),
+ ?match([Rec2, Rec4], lists:sort(R18)),
+ ?match({atomic, [Rec4]}, mnesia:transaction(fun()->mnesia:index_read(Tab, 4, ValPos2) end)),
+ {atomic, R19} = mnesia:transaction(fun() -> mnesia:index_read(Tab, 14, ValPos2) end),
+ ?match([Rec2], lists:sort(R19)),
+
+ ?verify_mnesia(Nodes, []).
+
+
+index_write(suite) -> [];
+index_write(doc) -> ["See ticket OTP-8072"];
+index_write(Config)when is_list(Config) ->
+ Nodes = ?acquire_nodes(1, Config),
+ mnesia:create_table(a, [{index, [val]}]),
+ mnesia:create_table(counter, []),
+
+ CreateIfNonExist =
+ fun(Index) ->
+ case mnesia:index_read(a, Index, 3) of
+ [] ->
+ Id = mnesia:dirty_update_counter(counter, id, 1),
+ New = {a, Id, Index},
+ mnesia:write(New),
+ New;
+ [Found] ->
+ Found
+ end
+ end,
+
+ Trans = fun(A) ->
+ mnesia:transaction(CreateIfNonExist, [A])
+ %% This works better most of the time
+ %% And it is allowed to fail since it's dirty
+ %% mnesia:async_dirty(CreateIfNonExist, [A])
+ end,
+
+ Self = self(),
+ Update = fun() ->
+ Res = lists:map(Trans, lists:seq(1,10)),
+ Self ! {self(), Res}
+ end,
+
+ Pids = [spawn(Update) || _ <- lists:seq(1,5)],
+
+ Gather = fun(Pid, Acc) -> receive {Pid, Res} -> [Res|Acc] end end,
+ Results = lists:foldl(Gather, [], Pids),
+ Expected = hd(Results),
+ Check = fun(Res) -> ?match(Expected, Res) end,
+ lists:foreach(Check, Results),
+ ?verify_mnesia(Nodes, []).
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Add and drop indecies
+
+index_lifecycle(suite) ->
+ [
+ add_table_index_ram,
+ add_table_index_disc,
+ add_table_index_disc_only,
+ create_live_table_index_ram,
+ create_live_table_index_disc,
+ create_live_table_index_disc_only,
+ del_table_index_ram,
+ del_table_index_disc,
+ del_table_index_disc_only,
+ idx_schema_changes
+ ].
+
+add_table_index_ram(suite) -> [];
+add_table_index_ram(Config) when is_list(Config) ->
+ add_table_index(Config, ram_copies).
+
+add_table_index_disc(suite) -> [];
+add_table_index_disc(Config) when is_list(Config) ->
+ add_table_index(Config, disc_copies).
+
+add_table_index_disc_only(suite) -> [];
+add_table_index_disc_only(Config) when is_list(Config) ->
+ add_table_index(Config, disc_only_copies).
+
+%% Add table index
+
+add_table_index(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = add_table_index,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = 3,
+ BadValPos = ValPos + 1,
+ ?match({aborted, Reason41 } when element(1, Reason41) == bad_type,
+ mnesia:add_table_index(Tab, BadValPos)),
+ ?match({aborted,Reason42 } when element(1, Reason42) == bad_type,
+ mnesia:add_table_index(Tab, 2)),
+ ?match({aborted, Reason43 } when element(1, Reason43) == bad_type,
+ mnesia:add_table_index(Tab, 1)),
+ ?match({aborted, Reason44 } when element(1, Reason44) == bad_type,
+ mnesia:add_table_index(Tab, 0)),
+ ?match({aborted, Reason45 } when element(1, Reason45) == bad_type,
+ mnesia:add_table_index(Tab, -1)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({aborted, Reason46 } when element(1, Reason46) == already_exists,
+ mnesia:add_table_index(Tab, ValPos)),
+
+ NestedFun = fun() ->
+ ?match({aborted, nested_transaction},
+ mnesia:add_table_index(Tab, ValPos)),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(NestedFun)),
+ ?verify_mnesia(Nodes, []).
+
+create_live_table_index_ram(suite) -> [];
+create_live_table_index_ram(Config) when is_list(Config) ->
+ create_live_table_index(Config, ram_copies).
+
+create_live_table_index_disc(suite) -> [];
+create_live_table_index_disc(Config) when is_list(Config) ->
+ create_live_table_index(Config, disc_copies).
+
+create_live_table_index_disc_only(suite) -> [];
+create_live_table_index_disc_only(Config) when is_list(Config) ->
+ create_live_table_index(Config, disc_only_copies).
+
+create_live_table_index(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = create_live_table_index,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = 3,
+ mnesia:dirty_write({Tab, 1, 2}),
+
+ Fun = fun() ->
+ ?match(ok, mnesia:write({Tab, 2, 2})),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(Fun)),
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({atomic, [{Tab, 1, 2},{Tab, 2, 2}]},
+ mnesia:transaction(fun() -> lists:sort(mnesia:index_read(Tab, 2, ValPos))
+ end)),
+ ?verify_mnesia(Nodes, []).
+
+%% Drop table index
+
+del_table_index_ram(suite) ->[];
+del_table_index_ram(Config) when is_list(Config) ->
+ del_table_index(Config, ram_copies).
+
+del_table_index_disc(suite) ->[];
+del_table_index_disc(Config) when is_list(Config) ->
+ del_table_index(Config, disc_copies).
+
+del_table_index_disc_only(suite) ->[];
+del_table_index_disc_only(Config) when is_list(Config) ->
+ del_table_index(Config, disc_only_copies).
+
+del_table_index(Config, Storage) ->
+ [Node1] = Nodes = ?acquire_nodes(1, Config),
+ Tab = del_table_index,
+ Schema = [{name, Tab}, {attributes, [k, v]}, {Storage, [Node1]}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+ ValPos = 3,
+ BadValPos = ValPos + 1,
+ ?match({atomic, ok}, mnesia:add_table_index(Tab, ValPos)),
+ ?match({aborted,Reason} when element(1, Reason) == no_exists,
+ mnesia:del_table_index(Tab, BadValPos)),
+ ?match({atomic, ok}, mnesia:del_table_index(Tab, ValPos)),
+
+ ?match({aborted,Reason1} when element(1, Reason1) == no_exists,
+ mnesia:del_table_index(Tab, ValPos)),
+ NestedFun =
+ fun() ->
+ ?match({aborted, nested_transaction},
+ mnesia:del_table_index(Tab, ValPos)),
+ ok
+ end,
+ ?match({atomic, ok}, mnesia:transaction(NestedFun)),
+ ?verify_mnesia(Nodes, []).
+
+idx_schema_changes(suite) -> [idx_schema_changes_ram,
+ idx_schema_changes_disc,
+ idx_schema_changes_disc_only];
+idx_schema_changes(doc) ->
+ ["Tests that index tables are handled correctly when schema changes.",
+ "For example when a replica is deleted or inserted",
+ "TICKET OTP-2XXX (ELVIRA)"].
+
+idx_schema_changes_ram(suite) -> [];
+idx_schema_changes_ram(Config) when is_list(Config) ->
+ idx_schema_changes(Config, ram_copies).
+idx_schema_changes_disc(suite) -> [];
+idx_schema_changes_disc(Config) when is_list(Config) ->
+ idx_schema_changes(Config, disc_copies).
+idx_schema_changes_disc_only(suite) -> [];
+idx_schema_changes_disc_only(Config) when is_list(Config) ->
+ idx_schema_changes(Config, disc_only_copies).
+
+idx_schema_changes(Config, Storage) ->
+ [N1, N2] = Nodes = ?acquire_nodes(2, Config),
+ Tab = index_schema_changes,
+ Idx = 3,
+ Schema = [{name, Tab}, {index, [Idx]}, {attributes, [k, v]}, {Storage, Nodes}],
+ ?match({atomic, ok}, mnesia:create_table(Schema)),
+
+ {Storage1, Storage2} =
+ case Storage of
+ disc_only_copies ->
+ {ram_copies, disc_copies};
+ disc_copies ->
+ {disc_only_copies, ram_copies};
+ ram_copies ->
+ {disc_copies, disc_only_copies}
+ end,
+
+ Write = fun(N) ->
+ mnesia:write({Tab, N, N+50})
+ end,
+
+ [mnesia:sync_transaction(Write, [N]) || N <- lists:seq(1, 10)],
+ ?match([{Tab, 1, 51}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 51, Idx])),
+ ?match([{Tab, 1, 51}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 51, Idx])),
+
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, N1, Storage1)),
+
+ ?match({atomic, ok}, rpc:call(N1, mnesia, sync_transaction, [Write, [17]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, sync_transaction, [Write, [18]])),
+
+ ?match([{Tab, 17, 67}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 67, Idx])),
+ ?match([{Tab, 18, 68}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 68, Idx])),
+
+ ?match({atomic, ok}, mnesia:del_table_copy(Tab, N1)),
+ ?match({atomic, ok}, rpc:call(N1, mnesia, sync_transaction, [Write, [11]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, sync_transaction, [Write, [12]])),
+
+ ?match([{Tab, 11, 61}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 61, Idx])),
+ ?match([{Tab, 12, 62}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 62, Idx])),
+
+ ?match({atomic, ok}, mnesia:move_table_copy(Tab, N2, N1)),
+ ?match({atomic, ok}, rpc:call(N1, mnesia, sync_transaction, [Write, [19]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, sync_transaction, [Write, [20]])),
+
+ ?match([{Tab, 19, 69}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 69, Idx])),
+ ?match([{Tab, 20, 70}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 70, Idx])),
+
+ ?match({atomic, ok}, mnesia:add_table_copy(Tab, N2, Storage)),
+ ?match({atomic, ok}, rpc:call(N1, mnesia, sync_transaction, [Write, [13]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, sync_transaction, [Write, [14]])),
+
+ ?match([{Tab, 13, 63}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 63, Idx])),
+ ?match([{Tab, 14, 64}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 64, Idx])),
+
+ ?match({atomic, ok}, mnesia:change_table_copy_type(Tab, N2, Storage2)),
+
+ ?match({atomic, ok}, rpc:call(N1, mnesia, sync_transaction, [Write, [15]])),
+ ?match({atomic, ok}, rpc:call(N2, mnesia, sync_transaction, [Write, [16]])),
+
+ ?match([{Tab, 15, 65}], rpc:call(N2, mnesia, dirty_index_read, [Tab, 65, Idx])),
+ ?match([{Tab, 16, 66}], rpc:call(N1, mnesia, dirty_index_read, [Tab, 66, Idx])),
+
+ ?verify_mnesia(Nodes, []).
diff --git a/lib/mnesia/test/mt b/lib/mnesia/test/mt
new file mode 100755
index 0000000000..25243f1149
--- /dev/null
+++ b/lib/mnesia/test/mt
@@ -0,0 +1,60 @@
+#! /bin/sh -f
+# ``The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved via the world wide web at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+# Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+# AB. All Rights Reserved.''
+#
+# $Id$
+#
+#
+# Author: Hakan Mattsson <[email protected]>
+# Purpose: Simplified execution of the test suite
+#
+# Usage: mt <args to erlang startup script>
+
+#top=".."
+top="$ERL_TOP/lib/mnesia"
+h=`hostname`
+p="-pa $top/examples -pa $top/ebin -pa $top/test -mnesia_test_verbose true"
+log=test_log$$
+latest=test_log_latest
+args=${1+"$@"}
+erlcmd="erl -sname a $p $args -mnesia_test_timeout"
+erlcmd1="erl -sname a1 $p $args"
+erlcmd2="erl -sname a2 $p $args"
+
+xterm -geometry 70x20+0+550 -T a1 -e $erlcmd1 &
+xterm -geometry 70x20+450+550 -T a2 -e $erlcmd2 &
+
+rm "$latest" 2>/dev/null
+ln -s "$log" "$latest"
+touch "$log"
+
+echo "$erlcmd1"
+echo ""
+echo "$erlcmd2"
+echo ""
+echo "$erlcmd"
+echo ""
+echo "Give the following command in order to see the outcome from node a@$h"":"
+echo ""
+echo " less test_log$$"
+
+ostype=`uname -s`
+if [ "$ostype" = "SunOS" ] ; then
+ /usr/openwin/bin/xterm -geometry 145x40+0+0 -T a -l -lf "$log" -e $erlcmd &
+else
+ xterm -geometry 145x40+0+0 -T a -e script -f -c "$erlcmd" "$log" &
+fi
+tail -f "$log" | egrep 'Eval|<>ERROR|NYI'
+
diff --git a/lib/mnesia/test/mt.erl b/lib/mnesia/test/mt.erl
new file mode 100644
index 0000000000..f69c4a11fd
--- /dev/null
+++ b/lib/mnesia/test/mt.erl
@@ -0,0 +1,262 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%%% Author: Hakan Mattsson [email protected]
+%%% Purpose: Nice shortcuts intended for testing of Mnesia
+%%%
+%%% See the mnesia_SUITE module about the structure of
+%%% the test suite.
+%%%
+%%% See the mnesia_test_lib module about the test case execution.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-module(mt).
+-author('[email protected]').
+-export([
+ t/0, t/1, t/2, t/3, % Run test cases
+ loop/1, loop/2, loop/3, % loop test cases
+ doc/0, doc/1, % Generate test case doc
+ struct/0, struct/1, % View test suite struct
+ shutdown/0, ping/0, start_nodes/0, % Node admin
+ read_config/0, write_config/1 % Config admin
+ ]).
+
+-include("mnesia_test_lib.hrl").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Aliases for the (sub) test suites
+alias(all) -> mnesia_SUITE;
+alias(atomicity) -> mnesia_atomicity_test;
+alias(backup) -> mnesia_evil_backup;
+alias(config) -> mnesia_config_test;
+alias(consistency) -> mnesia_consistency_test;
+alias(dirty) -> mnesia_dirty_access_test;
+alias(durability) -> mnesia_durability_test;
+alias(evil) -> mnesia_evil_coverage_test;
+alias(qlc) -> mnesia_qlc_test;
+alias(examples) -> mnesia_examples_test;
+alias(frag) -> mnesia_frag_test;
+alias(heavy) -> {mnesia_SUITE, heavy};
+alias(install) -> mnesia_install_test;
+alias(isolation) -> mnesia_isolation_test;
+alias(light) -> {mnesia_SUITE, light};
+alias(measure) -> mnesia_measure_test;
+alias(medium) -> {mnesia_SUITE, medium};
+alias(nice) -> mnesia_nice_coverage_test;
+alias(recover) -> mnesia_recover_test;
+alias(recovery) -> mnesia_recovery_test;
+alias(registry) -> mnesia_registry_test;
+alias(suite) -> mnesia_SUITE;
+alias(trans) -> mnesia_trans_access_test;
+alias(Other) -> Other.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Resolves the name of test suites and test cases
+%% according to the alias definitions. Single atoms
+%% are assumed to be the name of a test suite.
+resolve(Suite0) when is_atom(Suite0) ->
+ case alias(Suite0) of
+ Suite when is_atom(Suite) ->
+ {Suite, all};
+ {Suite, Case} ->
+ {Suite, Case}
+ end;
+resolve({Suite0, Case}) when is_atom(Suite0), is_atom(Case) ->
+ case alias(Suite0) of
+ Suite when is_atom(Suite) ->
+ {Suite, Case};
+ {Suite, Case2} ->
+ {Suite, Case2}
+ end;
+resolve(List) when is_list(List) ->
+ [resolve(Case) || Case <- List].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Run one or more test cases
+
+%% Run the default test case with default config
+t() ->
+ t(read_test_case()).
+
+%% Resolve the test case name and run the test case
+%% The test case is noted as default test case
+%% and the outcome of the tests are written to
+%% to a file.
+t(silly) ->
+ mnesia_install_test:silly();
+t(diskless) ->
+ %% Run the default test case with default config,
+ %% but diskless
+ t(read_test_case(), diskless);
+t(Case) ->
+ %% Use the default config
+ t(Case, read_config()).
+
+t(Case, Config) when Config == diskless ->
+ %% Run the test case with default config, but diskless
+ Config2 = [{diskless, true} | read_config()],
+ t(Case, Config2);
+t(Mod, Fun) when is_atom(Mod), is_atom(Fun) ->
+ %% Run the test case with default config
+ t({Mod, Fun}, read_config());
+t(RawCase, Config) when is_list(Config) ->
+ %% Resolve the test case name and run the test case
+ Case = resolve(RawCase),
+ write_test_case(Case),
+ Res = mnesia_test_lib:test(Case, Config),
+ append_test_case_info(Case, Res).
+
+t(Mod, Fun, Config) when Config == diskless ->
+ t({Mod, Fun}, diskless).
+
+config_fname() ->
+ "mnesia_test_case_config".
+
+%% Read default config file
+read_config() ->
+ Fname = config_fname(),
+ mnesia_test_lib:log("Consulting file ~s...~n", [Fname]),
+ case file:consult(Fname) of
+ {ok, Config} ->
+ mnesia_test_lib:log("Read config ~w~n", [Config]),
+ Config;
+ _Error ->
+ Config = mnesia_test_lib:default_config(),
+ mnesia_test_lib:log("<>WARNING<> Using default config: ~w~n", [Config]),
+ Config
+ end.
+
+%% Write new default config file
+write_config(Config) when is_list(Config) ->
+ Fname = config_fname(),
+ {ok, Fd} = file:open(Fname, write),
+ write_list(Fd, Config),
+ file:close(Fd).
+
+write_list(Fd, [H | T]) ->
+ ok = io:format(Fd, "~p.~n",[H]),
+ write_list(Fd, T);
+write_list(_, []) ->
+ ok.
+
+test_case_fname() ->
+ "mnesia_test_case_info".
+
+%% Read name of test case
+read_test_case() ->
+ Fname = test_case_fname(),
+ case file:open(Fname, [read]) of
+ {ok, Fd} ->
+ Res = io:read(Fd, []),
+ file:close(Fd),
+ case Res of
+ {ok, TestCase} ->
+ mnesia_test_lib:log("Using test case ~w from file ~s~n",
+ [TestCase, Fname]),
+ TestCase;
+ {error, _} ->
+ default_test_case(Fname)
+ end;
+ {error, _} ->
+ default_test_case(Fname)
+ end.
+
+default_test_case(Fname) ->
+ TestCase = all,
+ mnesia_test_lib:log("<>WARNING<> Cannot read file ~s, "
+ "using default test case: ~w~n",
+ [Fname, TestCase]),
+ TestCase.
+
+write_test_case(TestCase) ->
+ Fname = test_case_fname(),
+ {ok, Fd} = file:open(Fname, write),
+ ok = io:format(Fd, "~p.~n",[TestCase]),
+ file:close(Fd).
+
+append_test_case_info(TestCase, TestCaseInfo) ->
+ Fname = test_case_fname(),
+ {ok, Fd} = file:open(Fname, [read, write]),
+ ok = io:format(Fd, "~p.~n",[TestCase]),
+ ok = io:format(Fd, "~p.~n",[TestCaseInfo]),
+ file:close(Fd),
+ TestCaseInfo.
+
+%% Generate HTML pages from the test case structure
+doc() ->
+ doc(suite).
+
+doc(Case) ->
+ mnesia_test_lib:doc(resolve(Case)).
+
+%% Display out the test case structure
+struct() ->
+ struct(suite).
+
+struct(Case) ->
+ mnesia_test_lib:struct([resolve(Case)]).
+
+%% Shutdown all nodes with erlang:halt/0
+shutdown() ->
+ mnesia_test_lib:shutdown().
+
+%% Ping all nodes in config spec
+ping() ->
+ Config = read_config(),
+ Nodes = mnesia_test_lib:select_nodes(all, Config, ?FILE, ?LINE),
+ [{N, net_adm:ping(N)} || N <- Nodes].
+
+%% Slave start all nodes in config spec
+start_nodes() ->
+ Config = read_config(),
+ Nodes = mnesia_test_lib:select_nodes(all, Config, ?FILE, ?LINE),
+ mnesia_test_lib:init_nodes(Nodes, ?FILE, ?LINE),
+ ping().
+
+%% loop one testcase /suite until it fails
+
+loop(Case) ->
+ loop_1(Case,-1,read_config()).
+
+loop(M,F) when is_atom(F) ->
+ loop_1({M,F},-1,read_config());
+loop(Case,N) when is_integer(N) ->
+ loop_1(Case, N,read_config()).
+
+loop(M,F,N) when is_integer(N) ->
+ loop_1({M,F},N,read_config()).
+
+loop_1(Case,N,Config) when N /= 0 ->
+ io:format("Loop test ~p ~n", [abs(N)]),
+ case ok_result(Res = t(Case,Config)) of
+ true ->
+ loop_1(Case,N-1,Config);
+ error ->
+ Res
+ end;
+loop_1(_,_,_) ->
+ ok.
+
+ok_result([{_T,{ok,_,_}}|R]) ->
+ ok_result(R);
+ok_result([{_T,{TC,List}}|R]) when is_tuple(TC), is_list(List) ->
+ ok_result(List) andalso ok_result(R);
+ok_result([]) -> true;
+ok_result(_) -> error.
diff --git a/lib/public_key/asn1/Makefile b/lib/public_key/asn1/Makefile
index fbea701be9..94abec083c 100644
--- a/lib/public_key/asn1/Makefile
+++ b/lib/public_key/asn1/Makefile
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2008-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2008-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
@@ -40,7 +40,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/public_key-$(VSN)
ASN_TOP = OTP-PUB-KEY
ASN_MODULES = PKIX1Explicit88 PKIX1Implicit88 PKIX1Algorithms88 \
- PKIXAttributeCertificate OTP-PKIX
+ PKIXAttributeCertificate PKCS-1 PKCS-3 OTP-PKIX
ASN_ASNS = $(ASN_MODULES:%=%.asn1)
ASN_ERLS = $(ASN_TOP).erl
ASN_HRLS = $(ASN_TOP).hrl
@@ -110,4 +110,6 @@ OTP-PUB-KEY.asn1db: PKIX1Algorithms88.asn1 \
PKIX1Explicit88.asn1 \
PKIX1Implicit88.asn1 \
PKIXAttributeCertificate.asn1 \
+ PKCS-1.asn1\
+ PKCS-3.asn1\
OTP-PKIX.asn1
diff --git a/lib/public_key/asn1/OTP-PUB-KEY.set.asn b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
index 2f9ccd6b0e..5c76d13115 100644
--- a/lib/public_key/asn1/OTP-PUB-KEY.set.asn
+++ b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
@@ -4,4 +4,5 @@ PKIX1Implicit88.asn1
PKIXAttributeCertificate.asn1
PKIX1Algorithms88.asn1
PKCS-1.asn1
+PKCS-3.asn1
DSS.asn1
diff --git a/lib/public_key/asn1/PKCS-3.asn1 b/lib/public_key/asn1/PKCS-3.asn1
new file mode 100644
index 0000000000..64180b3a85
--- /dev/null
+++ b/lib/public_key/asn1/PKCS-3.asn1
@@ -0,0 +1,21 @@
+PKCS-3 {
+ iso(1) member-body(2) us(840) rsadsi(113549)
+ pkcs(1) 3
+}
+
+DEFINITIONS EXPLICIT TAGS ::=
+
+BEGIN
+
+pkcs-3 OBJECT IDENTIFIER ::=
+ { iso(1) member-body(2) us(840) rsadsi(113549)
+ pkcs(1) 3 }
+
+dhKeyAgreement OBJECT IDENTIFIER ::= { pkcs-3 1 }
+
+DHParameter ::= SEQUENCE {
+ prime INTEGER, -- p
+ base INTEGER, -- g
+ privateValueLength INTEGER OPTIONAL }
+
+END \ No newline at end of file
diff --git a/lib/public_key/doc/src/notes.xml b/lib/public_key/doc/src/notes.xml
index a5fbc81093..33a424f432 100644
--- a/lib/public_key/doc/src/notes.xml
+++ b/lib/public_key/doc/src/notes.xml
@@ -33,6 +33,27 @@
<rev>A</rev>
<file>notes.xml</file>
</header>
+<section><title>Public_Key 0.6</title>
+
+<section><title>Improvements and New Features</title>
+<list>
+ <item>
+ <p>
+ Support for Diffie-Hellman. ssl-3.11 requires
+ public_key-0.6.</p>
+ <p>
+ Own Id: OTP-7046</p>
+ </item>
+ <item>
+ <p>
+ Moved extended key usage test for ssl values to ssl.</p>
+ <p>
+ Own Id: OTP-8553 Aux Id: seq11541, OTP-8554 </p>
+ </item>
+</list>
+</section>
+
+</section>
<section><title>Public_Key 0.5</title>
diff --git a/lib/public_key/src/pubkey_cert.erl b/lib/public_key/src/pubkey_cert.erl
index 0ccc74799c..8f7dfa8352 100644
--- a/lib/public_key/src/pubkey_cert.erl
+++ b/lib/public_key/src/pubkey_cert.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -455,24 +455,6 @@ validate_extensions([#'Extension'{extnID = ?'id-ce-keyUsage',
AccErr)
end;
-validate_extensions([#'Extension'{extnID = ?'id-ce-extKeyUsage',
- extnValue = KeyUse,
- critical = true} | Rest],
- #path_validation_state{} = ValidationState,
- ExistBasicCon, SelfSigned, UnknownExtensions, Verify,
- AccErr0) ->
- case is_valid_extkey_usage(KeyUse) of
- true ->
- validate_extensions(Rest, ValidationState, ExistBasicCon,
- SelfSigned, UnknownExtensions,
- Verify, AccErr0);
- false ->
- AccErr =
- not_valid({bad_cert, invalid_ext_key_usage}, Verify, AccErr0),
- validate_extensions(Rest, ValidationState, ExistBasicCon,
- SelfSigned, UnknownExtensions, Verify, AccErr)
- end;
-
validate_extensions([#'Extension'{extnID = ?'id-ce-subjectAltName',
extnValue = Names} | Rest],
ValidationState, ExistBasicCon,
@@ -590,13 +572,6 @@ validate_extensions([Extension | Rest], ValidationState,
is_valid_key_usage(KeyUse, Use) ->
lists:member(Use, KeyUse).
-is_valid_extkey_usage(?'id-kp-clientAuth') ->
- true;
-is_valid_extkey_usage(?'id-kp-serverAuth') ->
- true;
-is_valid_extkey_usage(_) ->
- false.
-
validate_subject_alt_names([]) ->
true;
validate_subject_alt_names([AltName | Rest]) ->
diff --git a/lib/public_key/src/pubkey_crypto.erl b/lib/public_key/src/pubkey_crypto.erl
index fe4e97fcc5..4ab655e977 100644
--- a/lib/public_key/src/pubkey_crypto.erl
+++ b/lib/public_key/src/pubkey_crypto.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -26,7 +26,7 @@
-export([encrypt_public/3, decrypt_private/3,
encrypt_private/3, decrypt_public/3,
- sign/2, sign/3, verify/5]).
+ sign/2, sign/3, verify/5, gen_key/2]).
-define(UINT32(X), X:32/unsigned-big-integer).
@@ -44,10 +44,14 @@
%%
%% Description: Public key encrypts PlainText.
%%--------------------------------------------------------------------
-encrypt_public(PlainText, #'RSAPublicKey'{modulus=N,publicExponent=E},Padding) ->
- crypto:rsa_public_encrypt(PlainText, [crypto:mpint(E),crypto:mpint(N)],Padding);
-encrypt_public(PlainText, #'RSAPrivateKey'{modulus=N,publicExponent=E},Padding) ->
- crypto:rsa_public_encrypt(PlainText, [crypto:mpint(E),crypto:mpint(N)],Padding).
+encrypt_public(PlainText, #'RSAPublicKey'{modulus=N,publicExponent=E},
+ Padding) ->
+ crypto:rsa_public_encrypt(PlainText, [crypto:mpint(E),crypto:mpint(N)],
+ Padding);
+encrypt_public(PlainText, #'RSAPrivateKey'{modulus=N,publicExponent=E},
+ Padding) ->
+ crypto:rsa_public_encrypt(PlainText, [crypto:mpint(E),crypto:mpint(N)],
+ Padding).
encrypt_private(PlainText, #'RSAPrivateKey'{modulus = N,
publicExponent = E,
@@ -67,15 +71,20 @@ encrypt_private(PlainText, #'RSAPrivateKey'{modulus = N,
%% Description: Uses private key to decrypt public key encrypted data.
%%--------------------------------------------------------------------
decrypt_private(CipherText,
- #'RSAPrivateKey'{modulus = N,publicExponent = E,privateExponent = D},
+ #'RSAPrivateKey'{modulus = N,publicExponent = E,
+ privateExponent = D},
Padding) ->
crypto:rsa_private_decrypt(CipherText,
- [crypto:mpint(E), crypto:mpint(N),crypto:mpint(D)],
- Padding).
-decrypt_public(CipherText, #'RSAPublicKey'{modulus = N, publicExponent = E}, Padding) ->
- crypto:rsa_public_decrypt(CipherText,[crypto:mpint(E), crypto:mpint(N)], Padding);
-decrypt_public(CipherText, #'RSAPrivateKey'{modulus = N, publicExponent = E}, Padding) ->
- crypto:rsa_public_decrypt(CipherText,[crypto:mpint(E), crypto:mpint(N)], Padding).
+ [crypto:mpint(E), crypto:mpint(N),
+ crypto:mpint(D)], Padding).
+decrypt_public(CipherText, #'RSAPublicKey'{modulus = N, publicExponent = E},
+ Padding) ->
+ crypto:rsa_public_decrypt(CipherText,[crypto:mpint(E), crypto:mpint(N)],
+ Padding);
+decrypt_public(CipherText, #'RSAPrivateKey'{modulus = N, publicExponent = E},
+ Padding) ->
+ crypto:rsa_public_decrypt(CipherText,[crypto:mpint(E), crypto:mpint(N)],
+ Padding).
%%--------------------------------------------------------------------
%% Function: sign(PlainText, Key) ->
@@ -125,10 +134,24 @@ verify(sha, PlainText, Signature, Key, #'Dss-Parms'{p = P, q = Q, g = G}) ->
[crypto:mpint(P), crypto:mpint(Q),
crypto:mpint(G), crypto:mpint(Key)]).
+
+%%--------------------------------------------------------------------
+%% Function: gen_key(Type, Params) ->
+%% Type = diffie_hellman
+%% Params = [P,G] | [Y, P, G]
+%% Description: Generates keys.
+%% -----------------------------------------------------------------
+gen_key(diffie_hellman, [Y, P, G]) ->
+ crypto:dh_generate_key(crypto:mpint(Y), [crypto:mpint(P),
+ crypto:mpint(G)]);
+gen_key(diffie_hellman, [P, G]) ->
+ crypto:dh_generate_key([crypto:mpint(P), crypto:mpint(G)]).
+
+%%% TODO: Support rsa, dss key_gen
+
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
-
sized_binary(Binary) when is_binary(Binary) ->
Size = size(Binary),
<<?UINT32(Size), Binary/binary>>;
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index abd46fa00e..9fc17b6f73 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -155,7 +155,7 @@ unhex(S) ->
unhex(S, []).
unhex("", Acc) ->
- lists:reverse(Acc);
+ list_to_binary(lists:reverse(Acc));
unhex([D1, D2 | Rest], Acc) ->
unhex(Rest, [erlang:list_to_integer([D1, D2], 16) | Acc]).
diff --git a/lib/public_key/src/public_key.appup.src b/lib/public_key/src/public_key.appup.src
index ee0f9a3cc1..46e5ecca33 100644
--- a/lib/public_key/src/public_key.appup.src
+++ b/lib/public_key/src/public_key.appup.src
@@ -1,18 +1,40 @@
%% -*- erlang -*-
{"%VSN%",
[
+ {"0.5",
+ [
+ {update, public_key, soft, soft_purge, soft_purge, []},
+ {update, pubkey_crypto, soft, soft_purge, soft_purge, []},
+ {update, pubkey_pem, soft, soft_purge, soft_purge, []},
+ {update, pubkey_cert, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"0.4",
[
{update, public_key, soft, soft_purge, soft_purge, []},
- {update, pubkey_cert_records, soft, soft_purge, soft_purge, []}
+ {update, pubkey_cert_records, soft, soft_purge, soft_purge, []},
+ {update, pubkey_crypto, soft, soft_purge, soft_purge, []},
+ {update, pubkey_pem, soft, soft_purge, soft_purge, []},
+ {update, pubkey_cert, soft, soft_purge, soft_purge, []}
]
}
],
[
+ {"0.5",
+ [
+ {update, public_key, soft, soft_purge, soft_purge, []},
+ {update, pubkey_crypto, soft, soft_purge, soft_purge, []},
+ {update, pubkey_pem, soft, soft_purge, soft_purge, []},
+ {update, pubkey_cert, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"0.4",
[
{update, public_key, soft, soft_purge, soft_purge, []},
- {update, pubkey_cert_records, soft, soft_purge, soft_purge, []}
+ {update, pubkey_cert_records, soft, soft_purge, soft_purge, []},
+ {update, pubkey_crypto, soft, soft_purge, soft_purge, []},
+ {update, pubkey_pem, soft, soft_purge, soft_purge, []},
+ {update, pubkey_cert, soft, soft_purge, soft_purge, []}
]
}
]}.
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 52c695523f..157e76bb21 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -23,13 +23,12 @@
-include("public_key.hrl").
--export([decode_private_key/1, decode_private_key/2,
+-export([decode_private_key/1, decode_private_key/2, decode_dhparams/1,
decrypt_private/2, decrypt_private/3, encrypt_public/2,
encrypt_public/3, decrypt_public/2, decrypt_public/3,
- encrypt_private/2, encrypt_private/3,
- sign/2, sign/3,
+ encrypt_private/2, encrypt_private/3, gen_key/1, sign/2, sign/3,
verify_signature/3, verify_signature/4, verify_signature/5,
- pem_to_der/1, pem_to_der/2,
+ pem_to_der/1, pem_to_der/2, der_to_pem/2,
pkix_decode_cert/2, pkix_encode_cert/1, pkix_transform/2,
pkix_is_self_signed/1, pkix_is_fixed_dh_cert/1,
pkix_issuer_id/2,
@@ -62,6 +61,21 @@ decode_private_key(KeyInfo = {dsa_private_key, _, _}, Password) ->
DerEncoded = pubkey_pem:decode_key(KeyInfo, Password),
'OTP-PUB-KEY':decode('DSAPrivateKey', DerEncoded).
+
+%%--------------------------------------------------------------------
+%% Function: decode_dhparams(DhParamInfo) ->
+%% {ok, DhParams} | {error, Reason}
+%%
+%% DhParamsInfo = {Type, der_bin(), ChipherInfo} - as returned from
+%% pem_to_der/[1,2] for DH parameters.
+%% Type = dh_params
+%% ChipherInfo = opaque() | no_encryption
+%%
+%% Description: Decodes an asn1 der encoded DH parameters.
+%%--------------------------------------------------------------------
+decode_dhparams({dh_params, DerEncoded, not_encrypted}) ->
+ 'OTP-PUB-KEY':decode('DHParameter', DerEncoded).
+
%%--------------------------------------------------------------------
%% Function: decrypt_private(CipherText, Key) ->
%% decrypt_private(CipherText, Key, Options) -> PlainTex
@@ -109,6 +123,18 @@ encrypt_private(PlainText, Key, Options) ->
pubkey_crypto:encrypt_private(PlainText, Key, Padding).
%%--------------------------------------------------------------------
+%% Function: gen_key(Params) -> Keys
+%%
+%% Params = #'DomainParameters'{} - Currently only supported option
+%% Keys = {PublicDHKey = integer(), PrivateDHKey = integer()}
+%%
+%% Description: Generates keys. Currently supports Diffie-Hellman keys.
+%%--------------------------------------------------------------------
+gen_key(#'DHParameter'{prime = P, base = G}) when is_integer(P),
+ is_integer(G) ->
+ pubkey_crypto:gen_key(diffie_hellman, [P, G]).
+
+%%--------------------------------------------------------------------
%% Function: pem_to_der(CertSource) ->
%% pem_to_der(CertSource, Password) -> {ok, [Entry]} |
%% {error, Reason}
@@ -116,7 +142,6 @@ encrypt_private(PlainText, Key, Options) ->
%% CertSource = File | CertData
%% CertData = binary()
%% File = path()
-%% Password = string()
%% Entry = {entry_type(), der_bin(), ChipherInfo}
%% ChipherInfo = opague() | no_encryption
%% der_bin() = binary()
@@ -127,7 +152,9 @@ encrypt_private(PlainText, Key, Options) ->
%% entries as asn1 der encoded entities. Currently supported entry
%% types are certificates, certificate requests, rsa private keys and
%% dsa private keys. In the case of a key entry ChipherInfo will be
-%% used by decode_private_key/2 if the key is protected by a password.
+%% private keys and Diffie Hellam parameters .In the case of a key
+%% entry ChipherInfo will be used by decode_private_key/2 if the key
+%% is protected by a password.
%%--------------------------------------------------------------------
pem_to_der(CertSource) ->
pem_to_der(CertSource, no_passwd).
@@ -137,6 +164,9 @@ pem_to_der(File, Password) when is_list(File) ->
pem_to_der(PemBin, Password) when is_binary(PemBin) ->
pubkey_pem:decode(PemBin, Password).
+der_to_pem(File, TypeDerList) ->
+ pubkey_pem:write_file(File, TypeDerList).
+
%%--------------------------------------------------------------------
%% Function: pkix_decode_cert(BerCert, Type) -> {ok, Cert} | {error, Reason}
%%
@@ -288,9 +318,10 @@ sign(Msg, #'RSAPrivateKey'{} = Key) when is_binary(Msg) ->
sign(Msg, #'DSAPrivateKey'{} = Key) when is_binary(Msg) ->
pubkey_crypto:sign(Msg, Key);
-sign(#'OTPTBSCertificate'{signature = SigAlg} = TBSCert, Key) ->
+sign(#'OTPTBSCertificate'{signature = #'SignatureAlgorithm'{algorithm = Alg}
+ = SigAlg} = TBSCert, Key) ->
Msg = pubkey_cert_records:encode_tbs_cert(TBSCert),
- DigestType = pubkey_cert:digest_type(SigAlg),
+ DigestType = pubkey_cert:digest_type(Alg),
Signature = pubkey_crypto:sign(DigestType, Msg, Key),
Cert = #'OTPCertificate'{tbsCertificate= TBSCert,
signatureAlgorithm = SigAlg,
diff --git a/lib/public_key/vsn.mk b/lib/public_key/vsn.mk
index f4d5281e94..da1465d538 100644
--- a/lib/public_key/vsn.mk
+++ b/lib/public_key/vsn.mk
@@ -1,6 +1,7 @@
-PUBLIC_KEY_VSN = 0.5
-
-TICKETS = OTP-8372
+PUBLIC_KEY_VSN = 0.6
+TICKETS = OTP-7046 \
+ OTP-8553
+#TICKETS_o.5 = OTP-8372
#TICKETS_0.4 = OTP-8250
#TICKETS_0.3 = OTP-8100 OTP-8142
#TICKETS_0.2 = OTP-7860
diff --git a/lib/sasl/doc/src/notes.xml b/lib/sasl/doc/src/notes.xml
index 390b353386..e528af2522 100644
--- a/lib/sasl/doc/src/notes.xml
+++ b/lib/sasl/doc/src/notes.xml
@@ -30,6 +30,49 @@
</header>
<p>This document describes the changes made to the SASL application.</p>
+<section><title>SASL 2.1.9.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>In R13B04 sys:get_status was modified to invoke
+ format_status/2 in the callback module if the module
+ exports that function. This resulted in a change to the
+ term returned from calling sys:get_status on the
+ supervisor module, since supervisor is a gen_server and
+ gen_server exports format_status. The sasl
+ release_handler_1 module had a dependency on the
+ pre-R13B04 term returned by sys:get_status when invoked
+ on a supervisor, so the R13B04 change broke that
+ dependency.</p>
+ <p>This problem has been fixed by change
+ release_handler_1 to handle both the pre-R13B04 and
+ R13B04 terms that sys:get_status can return from a
+ supervisor.</p>
+ <p>
+ Own Id: OTP-8619 Aux Id: seq11570 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SASL 2.1.9.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>Use an infinity timeout in all calls to
+ <c>gen_server:call()</c> in the <c>sasl</c>
+ application.</p>
+ <p>
+ Own Id: OTP-8506 Aux Id: seq11509 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SASL 2.1.9</title>
<section><title>Improvements and New Features</title>
diff --git a/lib/sasl/src/overload.erl b/lib/sasl/src/overload.erl
index 3a9a51e8bf..5a4782efff 100644
--- a/lib/sasl/src/overload.erl
+++ b/lib/sasl/src/overload.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(overload).
@@ -71,7 +71,7 @@ init([]) ->
%% establish a call.
%% Returns: accept | reject
%%-----------------------------------------------------------------
-request() -> gen_server:call(overload, request).
+request() -> call(request).
%%-----------------------------------------------------------------
%% Func: set_config_data/2
@@ -82,13 +82,19 @@ request() -> gen_server:call(overload, request).
%% documented at all.
%%-----------------------------------------------------------------
set_config_data(MaxIntensity, Weight) ->
- gen_server:call(overload, {set_config_data, MaxIntensity, Weight}).
+ call({set_config_data, MaxIntensity, Weight}).
%%-----------------------------------------------------------------
%% Func: get_overload_info/0
%% Returns: A list of tagged items: TotalIntensity, AcceptIntensity,
%% MaxIntensity, Weight, TotalRequests, AcceptedRequests.
%%-----------------------------------------------------------------
-get_overload_info() -> gen_server:call(overload, get_overload_info).
+get_overload_info() -> call(get_overload_info).
+
+%%-----------------------------------------------------------------
+%% call(Request) -> Term
+%%-----------------------------------------------------------------
+call(Req) ->
+ gen_server:call(overload, Req, infinity).
%%%-----------------------------------------------------------------
%%% Callback functions from gen_server
diff --git a/lib/sasl/src/rb.erl b/lib/sasl/src/rb.erl
index 332a99c6f9..38e486b7a7 100644
--- a/lib/sasl/src/rb.erl
+++ b/lib/sasl/src/rb.erl
@@ -53,35 +53,35 @@ start_link(Options) ->
gen_server:start_link({local, rb_server}, rb, Options, []).
stop() ->
- gen_server:call(rb_server, stop),
+ call(stop),
supervisor:delete_child(sasl_sup, rb_server).
rescan() -> rescan([]).
rescan(Options) ->
- gen_server:call(rb_server, {rescan, Options}, infinity).
+ call({rescan, Options}).
list() -> list(all).
-list(Type) -> gen_server:call(rb_server, {list, Type}, infinity).
+list(Type) -> call({list, Type}).
show() ->
- gen_server:call(rb_server, show, infinity).
+ call(show).
show(Number) when is_integer(Number) ->
- gen_server:call(rb_server, {show_number, Number}, infinity);
+ call({show_number, Number});
show(Type) when is_atom(Type) ->
- gen_server:call(rb_server, {show_type, Type}, infinity).
+ call({show_type, Type}).
-grep(RegExp) -> gen_server:call(rb_server, {grep, RegExp}, infinity).
+grep(RegExp) -> call({grep, RegExp}).
filter(Filters) when is_list(Filters) ->
- gen_server:call(rb_server, {filter, Filters}, infinity).
+ call({filter, Filters}).
filter(Filters, FDates) when is_list(Filters) andalso is_tuple(FDates) ->
- gen_server:call(rb_server, {filter, {Filters, FDates}}, infinity).
+ call({filter, {Filters, FDates}}).
-start_log(FileName) -> gen_server:call(rb_server, {start_log, FileName}).
+start_log(FileName) -> call({start_log, FileName}).
-stop_log() -> gen_server:call(rb_server, stop_log).
+stop_log() -> call(stop_log).
h() -> help().
help() ->
@@ -122,6 +122,13 @@ help() ->
%%-----------------------------------------------------------------
%% Internal functions.
%%-----------------------------------------------------------------
+
+%%-----------------------------------------------------------------
+%% call(Request) -> Term
+%%-----------------------------------------------------------------
+call(Req) ->
+ gen_server:call(rb_server, Req, infinity).
+
%%-----------------------------------------------------------------
%% MAKE SURE THESE TWO FUNCTIONS ARE UPDATED!
%%-----------------------------------------------------------------
diff --git a/lib/sasl/src/release_handler.erl b/lib/sasl/src/release_handler.erl
index 42c3d9dd4b..4c43277848 100644
--- a/lib/sasl/src/release_handler.erl
+++ b/lib/sasl/src/release_handler.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(release_handler).
@@ -144,7 +144,7 @@ start_link() ->
%% exit_reason()
%%-----------------------------------------------------------------
unpack_release(ReleaseName) ->
- gen_server:call(release_handler, {unpack_release, ReleaseName}, infinity).
+ call({unpack_release, ReleaseName}).
%%-----------------------------------------------------------------
%% Purpose: Checks the relup script for the specified version.
@@ -157,7 +157,7 @@ unpack_release(ReleaseName) ->
%% exit_reason()
%%-----------------------------------------------------------------
check_install_release(Vsn) ->
- gen_server:call(release_handler, {check_install_release, Vsn}, infinity).
+ call({check_install_release, Vsn}).
%%-----------------------------------------------------------------
@@ -172,16 +172,13 @@ check_install_release(Vsn) ->
%% exit_reason()
%%-----------------------------------------------------------------
install_release(Vsn) ->
- gen_server:call(release_handler,
- {install_release, Vsn, restart, []},
- infinity).
+ call({install_release, Vsn, restart, []}).
+
install_release(Vsn, Opt) ->
case check_install_options(Opt, restart, []) of
{ok, ErrorAction, InstallOpt} ->
- gen_server:call(release_handler,
- {install_release, Vsn, ErrorAction, InstallOpt},
- infinity);
+ call({install_release, Vsn, ErrorAction, InstallOpt});
Error ->
Error
end.
@@ -222,13 +219,13 @@ check_timeout(_Else) -> false.
%% exit_reason()
%%-----------------------------------------------------------------
make_permanent(Vsn) ->
- gen_server:call(release_handler, {make_permanent, Vsn}, infinity).
+ call({make_permanent, Vsn}).
%%-----------------------------------------------------------------
%% Purpose: Reboots the system from an old release.
%%-----------------------------------------------------------------
reboot_old_release(Vsn) ->
- gen_server:call(release_handler, {reboot_old_release, Vsn}, infinity).
+ call({reboot_old_release, Vsn}).
%%-----------------------------------------------------------------
%% Purpose: Deletes all files and directories used by the release
@@ -238,7 +235,7 @@ reboot_old_release(Vsn) ->
%% Reason = {permanent, Vsn} |
%%-----------------------------------------------------------------
remove_release(Vsn) ->
- gen_server:call(release_handler, {remove_release, Vsn}, infinity).
+ call({remove_release, Vsn}).
%%-----------------------------------------------------------------
%% Args: RelFile = string()
@@ -257,7 +254,7 @@ remove_release(Vsn) ->
%% Returns: ok | {error, Reason}
%%-----------------------------------------------------------------
set_unpacked(RelFile, LibDirs) ->
- gen_server:call(release_handler, {set_unpacked, RelFile, LibDirs}).
+ call({set_unpacked, RelFile, LibDirs}).
%%-----------------------------------------------------------------
%% Args: Vsn = string()
@@ -267,7 +264,7 @@ set_unpacked(RelFile, LibDirs) ->
%% Returns: ok | {error, Reason}
%%-----------------------------------------------------------------
set_removed(Vsn) ->
- gen_server:call(release_handler, {set_removed, Vsn}).
+ call({set_removed, Vsn}).
%%-----------------------------------------------------------------
%% Purpose: Makes it possible to install the start.boot,
@@ -278,14 +275,14 @@ set_removed(Vsn) ->
%% Returns: ok | {error, {no_such_release, Vsn}}
%%-----------------------------------------------------------------
install_file(Vsn, File) when is_list(File) ->
- gen_server:call(release_handler, {install_file, File, Vsn}).
+ call({install_file, File, Vsn}).
%%-----------------------------------------------------------------
%% Returns: [{Name, Vsn, [LibName], Status}]
%% Status = unpacked | current | permanent | old
%%-----------------------------------------------------------------
which_releases() ->
- gen_server:call(release_handler, which_releases).
+ call(which_releases).
%%-----------------------------------------------------------------
%% check_script(Script, LibDirs) -> ok | {error, Reason}
@@ -468,11 +465,11 @@ read_appspec(App, Dir) ->
throw(Reason)
end.
-
-
-
-
-
+%%-----------------------------------------------------------------
+%% call(Request) -> Term
+%%-----------------------------------------------------------------
+call(Req) ->
+ gen_server:call(release_handler, Req, infinity).
%%-----------------------------------------------------------------
diff --git a/lib/sasl/src/release_handler_1.erl b/lib/sasl/src/release_handler_1.erl
index e3e3caba99..9c0edf4e99 100644
--- a/lib/sasl/src/release_handler_1.erl
+++ b/lib/sasl/src/release_handler_1.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(release_handler_1).
@@ -554,7 +554,13 @@ get_supervisor_module(SupPid) ->
get_supervisor_module1(SupPid) ->
{status, _Pid, {module, _Mod},
[_PDict, _SysState, _Parent, _Dbg, Misc]} = sys:get_status(SupPid),
- [_Name, State, _Type, _Time] = Misc,
+ %% supervisor Misc field changed at R13B04, handle old and new variants here
+ State = case Misc of
+ [_Name, State1, _Type, _Time] ->
+ State1;
+ [_Header, _Data, {data, [{"State", State2}]}] ->
+ State2
+ end,
%% Cannot use #supervisor_state{module = Module} = State.
{ok, element(#supervisor_state.module, State)}.
diff --git a/lib/sasl/src/si_sasl_supp.erl b/lib/sasl/src/si_sasl_supp.erl
index 52dbed2e00..9c96d11c28 100644
--- a/lib/sasl/src/si_sasl_supp.erl
+++ b/lib/sasl/src/si_sasl_supp.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(si_sasl_supp).
@@ -57,13 +57,13 @@
h() -> print_help().
help() -> print_help().
-si_exec(Fun, Args) -> gen_server:call(si_server, {si_exec, Fun, Args}).
+si_exec(Fun, Args) -> call({si_exec, Fun, Args}).
start_log(FileName) ->
- gen_server:call(si_server, {start_log, FileName}).
+ call({start_log, FileName}).
stop_log() ->
- gen_server:call(si_server, stop_log).
+ call(stop_log).
abbrevs() ->
io:format("~p", [process_abbrevs()]).
@@ -123,7 +123,7 @@ start_link(_Options) ->
gen_server:start_link({local, si_server}, si_sasl_supp, [], []).
stop() ->
- gen_server:call(si_server, stop),
+ call(stop),
supervisor:delete_child(sasl_sup, si_server).
@@ -208,6 +208,12 @@ open_log_file(FileName) ->
%% 3. Code
%%--------------------------------------------------
+%%-----------------------------------------------------------------
+%% call(Request) -> Term
+%%-----------------------------------------------------------------
+call(Req) ->
+ gen_server:call(si_server, Req, infinity).
+
%%--------------------------------------------------
%% Makes a Pid of almost anything.
%% Returns: Pid|{error, Reason}
diff --git a/lib/sasl/vsn.mk b/lib/sasl/vsn.mk
index c73301687a..d01a9bc4f1 100644
--- a/lib/sasl/vsn.mk
+++ b/lib/sasl/vsn.mk
@@ -1 +1 @@
-SASL_VSN = 2.1.9
+SASL_VSN = 2.1.9.2
diff --git a/lib/snmp/doc/src/notes.xml b/lib/snmp/doc/src/notes.xml
index 8939fbd6df..d5d6605b64 100644
--- a/lib/snmp/doc/src/notes.xml
+++ b/lib/snmp/doc/src/notes.xml
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>SNMP Release Notes</title>
@@ -33,6 +33,226 @@
</header>
<section>
+ <title>SNMP Development Toolkit 4.17.1</title>
+ <p>Version 4.17.1 supports code replacement in runtime from/to
+ version 4.17, 4.16.2, 4.16.1, 4.16, 4.15, 4.14 and 4.13.5.</p>
+
+ <section>
+ <title>Improvements and new features</title>
+ <p>-</p>
+ </section>
+
+ <section>
+ <title>Reported Fixed Bugs and Malfunctions</title>
+ <list type="bulleted">
+ <item>
+ <p>When the function FilterMod:accept_recv/2
+ returned false the SNMP agent stopped collecting messages from UDP.</p>
+ <p>Own Id: OTP-8761</p>
+ </item>
+ </list>
+ </section>
+
+ <section>
+ <title>Incompatibilities</title>
+ <p>-</p>
+ </section>
+ </section> <!-- 4.17.1 -->
+
+
+ <section>
+ <title>SNMP Development Toolkit 4.17</title>
+ <p>Version 4.17 supports code replacement in runtime from/to
+ version 4.16.2, 4.16.1, 4.16, 4.15, 4.14 and 4.13.5.</p>
+
+ <section>
+ <title>Improvements and new features</title>
+ <!--
+ <p>-</p>
+ -->
+ <list type="bulleted">
+ <item>
+ <p>[agent] Added very basic support for multiple SNMPv3
+ EngineIDs in a single agent. See
+ <seealso marker="snmpa#send_notification">send_notification/7</seealso>,
+ <seealso marker="snmpa_mpd#process_packet">process_packet/7</seealso>,
+ <seealso marker="snmpa_mpd#generate_response_msg">generate_response_msg/6</seealso> or
+ <seealso marker="snmpa_mpd#generate_msg">generate_msg/6</seealso>
+ for more info. </p>
+
+ <p>Own Id: OTP-8478</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Reported Fixed Bugs and Malfunctions</title>
+ <p>-</p>
+
+ <!--
+ <list type="bulleted">
+ <item>
+ <p>The config utility
+ (<seealso marker="snmp#config">snmp:config/0</seealso>)
+ generated a default notify.conf
+ with a bad name for the starndard trap entry (was "stadard trap",
+ but should have been "standard trap"). This has been corrected. </p>
+ <p>Kenji Rikitake</p>
+ <p>Own Id: OTP-8433</p>
+ </item>
+
+ </list>
+ -->
+
+ </section>
+
+ <section>
+ <title>Incompatibilities</title>
+ <p>-</p>
+ </section>
+ </section> <!-- 4.17 -->
+
+
+ <section>
+ <title>SNMP Development Toolkit 4.16.2</title>
+ <p>Version 4.16.2 supports code replacement in runtime from/to
+ version 4.16.1, 4.16, 4.15, 4.14 and 4.13.5.</p>
+
+ <section>
+ <title>Improvements and new features</title>
+ <!--
+ <p>-</p>
+ -->
+ <list type="bulleted">
+ <item>
+ <p>[compiler] The SMI specifies that a table row OID should be
+ named: { &lt;tableIdentifier&gt; "1" }. </p>
+ <p>A new option has been introduced,
+ <seealso marker="snmpc#compiler_opts">relaxed_row_name_assign_check</seealso>,
+ that allows for a more liberal numbering scheme</p>
+ <p>Own Id: OTP-8574</p>
+ </item>
+
+ <item>
+ <p>[agent|manager] Changes to make snmp (forward) compatible with
+ the new version of the crypto application (released in R14).
+ As of R14, crypto is implemented using NIFs. Also,
+ the API is more strict. </p>
+ <p>Own Id: OTP-8594</p>
+ </item>
+
+ <item>
+ <p>Auto [agent] Changed default value for the MIB server cache.
+ GC is now on by default. </p>
+ <p>Own Id: OTP-8648</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Reported Fixed Bugs and Malfunctions</title>
+ <!--
+ <p>-</p>
+ -->
+
+ <list type="bulleted">
+ <item>
+ <p>Encode/decode of Counter64 values larger than
+ 16#7fffffffffffffff (9223372036854775807) failed. </p>
+ <p>Own Id: OTP-8563</p>
+ </item>
+
+ <item>
+ <p>[compiler] Fails to compile non-contiguous BITS. </p>
+ <p>Per Hedeland</p>
+ <p>Own Id: OTP-8595</p>
+ </item>
+
+ <item>
+ <p>[manager] Raise condition causing the manager server process to
+ crash. Unregistering an agent while traffic (set/get-operations)
+ is ongoing could cause a crash in the manager server process
+ (raise condition). </p>
+ <p>Own Id: OTP-8646</p>
+ <p>Aux Id: Seq 11585</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Incompatibilities</title>
+ <p>-</p>
+ </section>
+ </section> <!-- 4.16.2 -->
+
+
+ <section>
+ <title>SNMP Development Toolkit 4.16.1</title>
+ <p>Version 4.16.1 supports code replacement in runtime from/to
+ version 4.16, 4.15, 4.14 and 4.13.5.</p>
+
+ <section>
+ <title>Improvements and new features</title>
+ <p>-</p>
+ <!--
+ <list type="bulleted">
+ <item>
+ <p>[agent|manager] Entries in the audit trail log can now be
+ augmented by a sequence number. </p>
+ <p>This is enabled by the <c>seqno</c> option, which is part of the
+ <seealso marker="snmp_config#audit_trail_log">Audit Trail Log</seealso>
+ config option. </p>
+ <p>See the
+ <seealso marker="snmp_app#configuration_params">reference manual</seealso>
+ or the
+ <seealso marker="snmp_config#configuration_params">Configuring the application</seealso>
+ chapter of the User's Guide for further info. </p>
+
+ <p>Own Id: OTP-8395</p>
+ </item>
+
+ </list>
+ -->
+
+ </section>
+
+ <section>
+ <title>Reported Fixed Bugs and Malfunctions</title>
+ <!--
+ <p>-</p>
+ -->
+
+ <list type="bulleted">
+ <item>
+ <p>[manager] Fixed an upgrade/downgrade problem. </p>
+ <p>Upgrade/downgrade from/to 4.13.5 did not work for the net-if
+ process. This has now been fixed. </p>
+ <p>Own Id: OTP-8481</p>
+ </item>
+
+ <item>
+ <p>[agent] A minor mnesia related performance improvement. </p>
+ <p>Own Id: OTP-8480</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Incompatibilities</title>
+ <p>-</p>
+ </section>
+ </section> <!-- 4.16.1 -->
+
+
+ <section>
<title>SNMP Development Toolkit 4.16</title>
<p>Version 4.16 supports code replacement in runtime from/to
version 4.15, 4.14 and 4.13.5.</p>
diff --git a/lib/snmp/doc/src/notes_history.xml b/lib/snmp/doc/src/notes_history.xml
index 8739400773..934df87866 100644
--- a/lib/snmp/doc/src/notes_history.xml
+++ b/lib/snmp/doc/src/notes_history.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>SNMP Release Notes history</title>
@@ -1732,7 +1732,7 @@
version 4.1. </p>
<p>When performing a downgrade, make sure the verbosity of the
manager server process is silence, or else the process will crash
- (due to a bug in version 4.0.4) and be restarted by it's
+ (due to a bug in version 4.0.4) and be restarted by its
supervisor.</p>
<section>
@@ -1773,7 +1773,7 @@
version 4.0.4. </p>
<p>When performing a downgrade, make sure the verbosity of the
manager server process is silence, or else the process will crash
- (due to a bug in version 4.0.4) and be restarted by it's
+ (due to a bug in version 4.0.4) and be restarted by its
supervisor.</p>
<section>
@@ -1827,8 +1827,8 @@
</item>
<item>
<p>[manager] The server process contained a bug that caused it
- to crash, if it received an exit message from it's gct (GC timer)
- process and it's verbosity was <em>log</em> or higher. </p>
+ to crash, if it received an exit message from its gct (GC timer)
+ process and its verbosity was <em>log</em> or higher. </p>
<p>This also effects the application downgrade. </p>
<p>Own Id: OTP-5306</p>
</item>
diff --git a/lib/snmp/doc/src/snmp_app.xml b/lib/snmp/doc/src/snmp_app.xml
index e5a725d720..694e619da1 100644
--- a/lib/snmp/doc/src/snmp_app.xml
+++ b/lib/snmp/doc/src/snmp_app.xml
@@ -80,7 +80,7 @@
<!-- The info below is also found in the snmp_config.xml file -->
- <p>Each snmp component has it's own set of configuration parameters,
+ <p>Each snmp component has its own set of configuration parameters,
even though some of the types are common to both components. </p>
<pre>
@@ -346,7 +346,7 @@
<p>Defines if the mib server shall perform cache gc automatically or
leave it to the user (see
<seealso marker="snmpa#gc_mibs_cache">gc_mibs_cache/0,1,2,3</seealso>). </p>
- <p>Default is <c>false</c>.</p>
+ <p>Default is <c>true</c>.</p>
</item>
<tag><c><![CDATA[mibs_cache_age() = integer() > 0 <optional>]]></c></tag>
@@ -693,7 +693,7 @@
<p>Specifies if and how the audit trail log shall be repaired
when opened. Unless this parameter has the value <c>snmp_repair</c>
it is sent to <c>disk_log</c>. If, on the other hand, the value is
- <c>snmp_repair</c>, snmp attempts to handle certain faults on it's
+ <c>snmp_repair</c>, snmp attempts to handle certain faults on its
own. And even if it cannot repair the file, it does not truncate it
directly, but instead <em>moves it aside</em> for later off-line
analysis.</p>
diff --git a/lib/snmp/doc/src/snmp_config.xml b/lib/snmp/doc/src/snmp_config.xml
index 6fc9cc49f2..769b908adc 100644
--- a/lib/snmp/doc/src/snmp_config.xml
+++ b/lib/snmp/doc/src/snmp_config.xml
@@ -343,7 +343,7 @@
<p>Defines if the mib server shall perform cache gc automatically or
leave it to the user (see
<seealso marker="snmpa#gc_mibs_cache">gc_mibs_cache/0,1,2,3</seealso>). </p>
- <p>Default is <c>false</c>.</p>
+ <p>Default is <c>true</c>.</p>
</item>
<tag><c><![CDATA[mibs_cache_age() = integer() > 0 <optional>]]></c></tag>
@@ -704,7 +704,7 @@
<p>Specifies if and how the audit trail log shall be repaired
when opened. Unless this parameter has the value <c>snmp_repair</c>
it is sent to <c>disk_log</c>. If, on the other hand, the value is
- <c>snmp_repair</c>, snmp attempts to handle certain faults on it's
+ <c>snmp_repair</c>, snmp attempts to handle certain faults on its
own. And even if it cannot repair the file, it does not truncate it
directly, but instead <em>moves it aside</em> for later off-line
analysis.</p>
diff --git a/lib/snmp/doc/src/snmpa.xml b/lib/snmp/doc/src/snmpa.xml
index b3661ae9b0..1be6abe6dd 100644
--- a/lib/snmp/doc/src/snmpa.xml
+++ b/lib/snmp/doc/src/snmpa.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>snmpa</title>
@@ -648,6 +648,20 @@ notification_delivery_info() = #snmpa_notification_delivery_info{}
<desc>
<p>Disable the mib server cache. </p>
+ <marker id="which_mibs_cache_size"></marker>
+ </desc>
+ </func>
+
+ <func>
+ <name>which_mibs_cache_size() -> void()</name>
+ <name>which_mibs_cache_size(Agent) -> void()</name>
+ <fsummary>The size of the mib server cache</fsummary>
+ <type>
+ <v>Agent = pid() | atom()</v>
+ </type>
+ <desc>
+ <p>Retreive the size of the mib server cache. </p>
+
<marker id="gc_mibs_cache"></marker>
</desc>
</func>
@@ -867,6 +881,7 @@ snmp_agent:register_subagent(SA1,[1,2,3], SA2).
<name>send_notification(Agent, Notification, Receiver, Varbinds)</name>
<name>send_notification(Agent, Notification, Receiver, NotifyName, Varbinds)</name>
<name>send_notification(Agent, Notification, Receiver, NotifyName, ContextName, Varbinds) -> void() </name>
+ <name>send_notification(Agent, Notification, Receiver, NotifyName, ContextName, Varbinds, LocalEngineID) -> void() </name>
<fsummary>Send a notification</fsummary>
<type>
<v>Agent = pid() | atom()</v>
@@ -888,6 +903,7 @@ snmp_agent:register_subagent(SA1,[1,2,3], SA2).
<v>OID = oid()</v>
<v>Value = term()</v>
<v>RowIndex = [int()]</v>
+ <v>LocalEngineID = string()</v>
</type>
<desc>
<p>Sends the notification <c>Notification</c> to the
@@ -1027,6 +1043,7 @@ snmp_agent:register_subagent(SA1,[1,2,3], SA2).
<item><c>{?sysLocation_instance, "upstairs"}</c> (provided
that the generated <c>.hrl</c> file is included)</item>
</list>
+
<p>If a variable in the notification is a table element, the
<c>RowIndex</c> for the element must be given in the
<c>Varbinds</c> list. In this case, the OBJECT IDENTIFIER sent
@@ -1034,15 +1051,27 @@ snmp_agent:register_subagent(SA1,[1,2,3], SA2).
element. This OBJECT IDENTIFIER could be used in a get
operation later.
</p>
+
<p>This function is asynchronous, and does not return any
information. If an error occurs, <c>user_err/2</c> of the error
report module is called and the notification is discarded.
</p>
+ <note>
+ <p>Note that the use of the LocalEngineID argument is only intended
+ for special cases, if the agent is to "emulate" multiple EngineIDs!
+ By default, the agent uses the value of <c>SnmpEngineID</c>
+ (see SNMP-FRAMEWORK-MIB). </p>
+ </note>
+
+<!--
<marker id="send_trap"></marker>
+-->
+ <marker id="discovery"></marker>
</desc>
</func>
+<!--
<func>
<name>send_trap(Agent,Trap,Community)</name>
<name>send_trap(Agent,Trap,Community,Varbinds) -> void()</name>
@@ -1114,6 +1143,7 @@ snmp_agent:register_subagent(SA1,[1,2,3], SA2).
<marker id="discovery"></marker>
</desc>
</func>
+-->
<func>
<name>discovery(TargetName, Notification) -> {ok, ManagerEngineID} | {error, Reason}</name>
diff --git a/lib/snmp/doc/src/snmpa_mpd.xml b/lib/snmp/doc/src/snmpa_mpd.xml
index ea5bde8956..202e6b5661 100644
--- a/lib/snmp/doc/src/snmpa_mpd.xml
+++ b/lib/snmp/doc/src/snmpa_mpd.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1999</year><year>2009</year>
+ <year>1999</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>snmpa_mpd</title>
@@ -63,15 +63,19 @@
</func>
<func>
- <name>process_packet(Packet, TDomain, TAddress, State) -> {ok, Vsn, Pdu, PduMS, ACMData} | {discarded, Reason} | {discovery, DiscoPacket}</name>
+ <name>process_packet(Packet, TDomain, TAddress, State, NoteStore, Log) -> {ok, Vsn, Pdu, PduMS, ACMData} | {discarded, Reason} | {discovery, DiscoPacket}</name>
+ <name>process_packet(Packet, TDomain, TAddress, LocalEngineID, State, NoteStore, Log) -> {ok, Vsn, Pdu, PduMS, ACMData} | {discarded, Reason} | {discovery, DiscoPacket}</name>
<fsummary>Process a packet received from the network</fsummary>
<type>
<v>Packet = binary()</v>
<v>TDomain = snmpUDPDomain</v>
<v>TAddress = {Ip, Udp}</v>
+ <v>LocalEngineID = string()</v>
<v>Ip = {integer(), integer(), integer(), integer()}</v>
<v>Udp = integer()</v>
<v>State = mpd_state()</v>
+ <v>NoteStore = pid()</v>
+ <v>Log = snmp_log()</v>
<v>Vsn = 'version-1' | 'version-2' | 'version-3'</v>
<v>Pdu = #pdu</v>
<v>PduMs = integer()</v>
@@ -84,18 +88,27 @@
decryption as necessary. The return values should be passed the
agent.</p>
+ <note>
+ <p>Note that the use of the LocalEngineID argument is only intended
+ for special cases, if the agent is to "emulate" multiple EngineIDs!
+ By default, the agent uses the value of <c>SnmpEngineID</c>
+ (see SNMP-FRAMEWORK-MIB). </p>
+ </note>
+
<marker id="generate_response_msg"></marker>
</desc>
</func>
<func>
- <name>generate_response_msg(Vsn, RePdu, Type, ACMData) -> {ok, Packet} | {discarded, Reason}</name>
+ <name>generate_response_msg(Vsn, RePdu, Type, ACMData, Log) -> {ok, Packet} | {discarded, Reason}</name>
+ <name>generate_response_msg(Vsn, RePdu, Type, ACMData, LocalEngineID, Log) -> {ok, Packet} | {discarded, Reason}</name>
<fsummary>Generate a response packet to be sent to the network</fsummary>
<type>
<v>Vsn = 'version-1' | 'version-2' | 'version-3'</v>
<v>RePdu = #pdu</v>
<v>Type = atom()</v>
<v>ACMData = acm_data()</v>
+ <v>LocalEngineID = string()</v>
<v>Packet = binary()</v>
</type>
<desc>
@@ -103,17 +116,27 @@
network. <c>Type</c> is the <c>#pdu.type</c> of the original
request.</p>
+ <note>
+ <p>Note that the use of the LocalEngineID argument is only intended
+ for special cases, if the agent is to "emulate" multiple EngineIDs!
+ By default, the agent uses the value of <c>SnmpEngineID</c>
+ (see SNMP-FRAMEWORK-MIB). </p>
+ </note>
+
<marker id="generate_msg"></marker>
</desc>
</func>
<func>
- <name>generate_msg(Vsn, Pdu, MsgData, To) -> {ok, PacketsAndAddresses} | {discarded, Reason}</name>
+ <name>generate_msg(Vsn, NoteStore, Pdu, MsgData, To) -> {ok, PacketsAndAddresses} | {discarded, Reason}</name>
+ <name>generate_msg(Vsn, NoteStore, Pdu, MsgData, LocalEngineID, To) -> {ok, PacketsAndAddresses} | {discarded, Reason}</name>
<fsummary>Generate a request message to be sent to the network</fsummary>
<type>
<v>Vsn = 'version-1' | 'version-2' | 'version-3'</v>
+ <v>NoteStore = pid()</v>
<v>Pdu = #pdu</v>
<v>MsgData = msg_data()</v>
+ <v>LocalEngineID = string()</v>
<v>To = [dest_addrs()]</v>
<v>PacketsAndAddresses = [{TDomain, TAddress, Packet}]</v>
<v>TDomain = snmpUDPDomain</v>
@@ -136,6 +159,13 @@
also received from the requests mentioned above.
</p>
+ <note>
+ <p>Note that the use of the LocalEngineID argument is only intended
+ for special cases, if the agent is to "emulate" multiple EngineIDs!
+ By default, the agent uses the value of <c>SnmpEngineID</c>
+ (see SNMP-FRAMEWORK-MIB). </p>
+ </note>
+
<marker id="discarded_pdu"></marker>
</desc>
</func>
diff --git a/lib/snmp/doc/src/snmpa_network_interface_filter.xml b/lib/snmp/doc/src/snmpa_network_interface_filter.xml
index d625fd3e4a..10419517dd 100644
--- a/lib/snmp/doc/src/snmpa_network_interface_filter.xml
+++ b/lib/snmp/doc/src/snmpa_network_interface_filter.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2007</year><year>2009</year>
+ <year>2007</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>snmpa_network_interface_filter</title>
@@ -153,7 +153,7 @@ pdu_type() = 'get-request' | 'get-next-request' | 'get-response' | 'set-request'
<p>For the message to be discarded all together, the function
<em>must</em> return <em>false</em>. </p>
<p>Note that it is possible for this function to filter out targets
- (but <em>not</em> add it's own) by returning an updated
+ (but <em>not</em> add its own) by returning an updated
<c>Targets</c> list (<c>NewTargets</c>). </p>
</desc>
</func>
diff --git a/lib/snmp/doc/src/snmpc.xml b/lib/snmp/doc/src/snmpc.xml
index 48d63d6c91..fbd0950c69 100644
--- a/lib/snmp/doc/src/snmpc.xml
+++ b/lib/snmp/doc/src/snmpc.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>snmpc</title>
@@ -47,9 +47,10 @@
<type>
<v>File = string()</v>
<v>Options = [opt()]</v>
- <v>opt() = db() | deprecated() | description() | reference() | group_check() | i() | il() | imports() | module() | module_identity() | outdir() | no_defs() | verbosity() | warnings()</v>
+ <v>opt() = db() | relaxed_row_name_assign_check() | deprecated() | description() | reference() | group_check() | i() | il() | imports() | module() | module_identity() | outdir() | no_defs() | verbosity() | warnings()</v>
<v>db() = {db, volatile|persistent|mnesia}</v>
<v>deprecated() = {deprecated, bool()}</v>
+ <v>relaxed_row_name_assign_check() = relaxed_row_name_assign_check</v>
<v>description() = description</v>
<v>reference() = reference</v>
<v>group_check() = {group_check, bool()}</v>
@@ -71,75 +72,104 @@
compiled file <c>BinFileName</c> is called
<c><![CDATA[<File>.bin]]></c>. </p>
<list type="bulleted">
- <item>The option <c>db</c> specifies which database should
- be used for the default instrumentation. Default is
- <c>volatile</c>.
+ <item>
+ <p>The option <c>db</c> specifies which database should
+ be used for the default instrumentation. </p>
+ <p>Default is <c>volatile</c>. </p>
+ </item>
+ <item>
+ <p>The option <c>deprecated</c> specifies if a deprecated
+ definition should be kept or not. If the option is
+ false the MIB compiler will ignore all deprecated
+ definitions. </p>
+ <p>Default is <c>true</c>. </p>
</item>
- <item>The option <c>deprecated</c> specifies if a deprecated
- definition should be kept or not. If the option is
- false the MIB compiler will ignore all deprecated
- definitions. Default is <c>true</c>.
+ <item>
+ <p>The option <c>relaxed_row_name_assign_check</c>, if present,
+ specifies that the row name assign check shall not be done
+ strictly according to the SMI (which allows only the value 1).
+ With this option, all values greater than zero is allowed
+ (>= 1). This means that the error will be converted to a
+ warning. </p>
+ <p>By default it is not included, but if this option is present
+ it will be. </p>
</item>
- <item>The option <c>description</c> specifies if the text
- of the DESCRIPTION field will be included or not. By default
- it is not included, but if this option is present it will
- be.
+ <item>
+ <p>The option <c>description</c> specifies if the text
+ of the DESCRIPTION field will be included or not. </p>
+ <p>By default it is not included, but if this option is
+ present it will be. </p>
</item>
- <item>The option <c>reference</c> specifies if the text
- of the REFERENCE field, when found in a table definition,
- will be included or not. By default
- it is not included, but if this option is present it will
- be. The reference text will be placed in the allocList field
- of the mib-entry record (#me{}) for the table.
+ <item>
+ <p>The option <c>reference</c> specifies if the text
+ of the REFERENCE field, when found in a table definition,
+ will be included or not. </p>
+ <p>By default it is not included, but if this option is present
+ it will be. The reference text will be placed in the allocList
+ field of the mib-entry record (#me{}) for the table. </p>
</item>
- <item>The option <c>group_check</c> specifies whether the
- mib compiler should check the OBJECT-GROUP macro and
- the NOTIFICATION-GROUP macro for correctness or not.
- Default is <c>true</c>.
+ <item>
+ <p>The option <c>group_check</c> specifies whether the
+ mib compiler should check the OBJECT-GROUP macro and
+ the NOTIFICATION-GROUP macro for correctness or not. </p>
+ <p>Default is <c>true</c>. </p>
</item>
- <item>The option <c>i</c> specifies the path to search for
- imported (compiled) MIB files. The directories should be
- strings with a trailing directory delimiter. Default is
- <c>["./"]</c>.
+ <item>
+ <p>The option <c>i</c> specifies the path to search for
+ imported (compiled) MIB files. The directories should be
+ strings with a trailing directory delimiter. </p>
+ <p>Default is <c>["./"]</c>. </p>
</item>
- <item>The option <c>il</c> (include_lib) also specifies a
- list of directories to search for imported MIBs. It
- assumes that the first element in the directory name
- corresponds to an OTP application. The compiler will find
- the current installed version. For example, the value
- ["snmp/mibs/"] will be replaced by ["snmp-3.1.1/mibs/"]
- (or what the current version may be in the system). The
- current directory and the <c><![CDATA[<snmp-home>/priv/mibs/]]></c>
- are always listed last in the include path.
+ <item>
+ <p>The option <c>il</c> (include_lib) also specifies a
+ list of directories to search for imported MIBs. It
+ assumes that the first element in the directory name
+ corresponds to an OTP application. The compiler will find
+ the current installed version. For example, the value
+ ["snmp/mibs/"] will be replaced by ["snmp-3.1.1/mibs/"]
+ (or what the current version may be in the system). The
+ current directory and the
+ <c><![CDATA[<snmp-home>/priv/mibs/]]></c>
+ are always listed last in the include path. </p>
</item>
- <item>The option <c>imports</c>, if present, specifies that the
- IMPORT statement of the MIB shall be included in the compiled mib.
+ <item>
+ <p>The option <c>imports</c>, if present, specifies that
+ the IMPORT statement of the MIB shall be included in the
+ compiled mib. </p>
</item>
- <item>The option <c>module</c>, if present, specifies the
- name of a module which implements all instrumentation
- functions for the MIB. The name of all instrumentation
- functions must be the same as the corresponding managed
- object it implements.
+ <item>
+ <p>The option <c>module</c>, if present, specifies the
+ name of a module which implements all instrumentation
+ functions for the MIB. </p>
+ <p>The name of all instrumentation
+ functions must be the same as the corresponding managed
+ object it implements. </p>
</item>
- <item>The option <c>module_identity</c>, if present, specifies
- that the info part of the MODULE-IDENTITY statement of the MIB
- shall be included in the compiled mib.
+ <item>
+ <p>The option <c>module_identity</c>, if present, specifies
+ that the info part of the MODULE-IDENTITY statement of the MIB
+ shall be included in the compiled mib. </p>
</item>
- <item>The option <c>no_defs</c>, if present, specifies
- that if a managed object does not have an instrumentation
- function, the default instrumentation function should NOT
- be used, instead this is reported as an error, and the
- compilation aborts.
+ <item>
+ <p>The option <c>no_defs</c>, if present, specifies
+ that if a managed object does not have an instrumentation
+ function, the default instrumentation function should NOT
+ be used, instead this is reported as an error, and the
+ compilation aborts. </p>
</item>
- <item>The option <c>verbosity</c> specifies the verbosity of
- the SNMP mib compiler. I.e. if warning, info, log, debug
- and trace messages shall be shown. Default is <c>silence</c>.
- Note that if the option <c>warnings</c> is <c>true</c> and the
- option <c>verbosity</c> is <c>silence</c>, warning messages will
- still be shown.
+ <item>
+ <p>The option <c>verbosity</c> specifies the verbosity of
+ the SNMP mib compiler. I.e. if warning, info, log, debug
+ and trace messages shall be shown. </p>
+ <p>Default is <c>silence</c>. </p>
+ <p>Note that if the option <c>warnings</c> is <c>true</c> and the
+ option <c>verbosity</c> is <c>silence</c>, warning messages will
+ still be shown. </p>
</item>
- <item>The option <c>warnings</c> specifies whether warning
- messages should be shown. Default is <c>true</c>.
+ <item>
+ <p>The option <c>warnings</c> specifies whether warning
+ messages should be shown. </p>
+ <p>Default is <c>true</c>. </p>
</item>
</list>
<p>The MIB compiler understands both SMIv1 and SMIv2 MIBs. It
diff --git a/lib/snmp/doc/src/snmpm.xml b/lib/snmp/doc/src/snmpm.xml
index 9e7ac75daf..1ee391d9ba 100644
--- a/lib/snmp/doc/src/snmpm.xml
+++ b/lib/snmp/doc/src/snmpm.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>snmpm</title>
@@ -920,7 +920,7 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1
<v>Oids = [oid()]</v>
</type>
<desc>
- <p>Transform a alias-name to it's oid.</p>
+ <p>Transform a alias-name to its oid.</p>
<p>Note that an alias-name is only unique within the mib, so
when loading several mib's into a manager, there might be
several instances of the same aliasname.</p>
@@ -938,7 +938,7 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1
<v>Reason = term()</v>
</type>
<desc>
- <p>Transform a oid to it's aliasname.</p>
+ <p>Transform a oid to its aliasname.</p>
<marker id="oid_to_type"></marker>
</desc>
diff --git a/lib/snmp/src/agent/snmpa.erl b/lib/snmp/src/agent/snmpa.erl
index a113bba3a7..87b191caed 100644
--- a/lib/snmp/src/agent/snmpa.erl
+++ b/lib/snmp/src/agent/snmpa.erl
@@ -47,6 +47,7 @@
mib_of/1, mib_of/2,
me_of/1, me_of/2,
invalidate_mibs_cache/0, invalidate_mibs_cache/1,
+ which_mibs_cache_size/0, which_mibs_cache_size/1,
enable_mibs_cache/0, enable_mibs_cache/1,
disable_mibs_cache/0, disable_mibs_cache/1,
gc_mibs_cache/0, gc_mibs_cache/1, gc_mibs_cache/2, gc_mibs_cache/3,
@@ -60,7 +61,7 @@
register_subagent/3, unregister_subagent/2,
send_notification/3, send_notification/4, send_notification/5,
- send_notification/6,
+ send_notification/6, send_notification/7,
send_trap/3, send_trap/4,
discovery/2, discovery/3, discovery/4, discovery/5, discovery/6,
@@ -302,6 +303,13 @@ invalidate_mibs_cache(Agent) ->
snmpa_agent:invalidate_mibs_cache(Agent).
+which_mibs_cache_size() ->
+ which_mibs_cache_size(snmp_master_agent).
+
+which_mibs_cache_size(Agent) ->
+ snmpa_agent:which_mibs_cache_size(Agent).
+
+
enable_mibs_cache() ->
enable_mibs_cache(snmp_master_agent).
@@ -415,14 +423,23 @@ send_notification(Agent, Notification, Recv, Varbinds) ->
send_notification(Agent, Notification, Recv, NotifyName, Varbinds) ->
send_notification(Agent, Notification, Recv, NotifyName, "", Varbinds).
-send_notification(Agent, Notification, Recv,
- NotifyName, ContextName, Varbinds)
+send_notification(Agent, Notification, Recv, NotifyName,
+ ContextName, Varbinds)
when (is_list(NotifyName) andalso
is_list(ContextName) andalso
is_list(Varbinds)) ->
snmpa_agent:send_trap(Agent, Notification, NotifyName,
ContextName, Recv, Varbinds).
+send_notification(Agent, Notification, Recv,
+ NotifyName, ContextName, Varbinds, LocalEngineID)
+ when (is_list(NotifyName) andalso
+ is_list(ContextName) andalso
+ is_list(Varbinds) andalso
+ is_list(LocalEngineID)) ->
+ snmpa_agent:send_trap(Agent, Notification, NotifyName,
+ ContextName, Recv, Varbinds, LocalEngineID).
+
%% Kept for backwards compatibility
send_trap(Agent, Trap, Community) ->
send_notification(Agent, Trap, no_receiver, Community, "", []).
diff --git a/lib/snmp/src/agent/snmpa_agent.erl b/lib/snmp/src/agent/snmpa_agent.erl
index fb04fca632..f70885b2ec 100644
--- a/lib/snmp/src/agent/snmpa_agent.erl
+++ b/lib/snmp/src/agent/snmpa_agent.erl
@@ -30,7 +30,7 @@
-export([subagent_set/2,
load_mibs/2, unload_mibs/2, which_mibs/1, whereis_mib/2, info/1,
register_subagent/3, unregister_subagent/2,
- send_trap/6,
+ send_trap/6, send_trap/7,
register_notification_filter/5,
unregister_notification_filter/2,
which_notification_filter/1,
@@ -48,6 +48,7 @@
get/2, get/3, get_next/2, get_next/3]).
-export([mib_of/1, mib_of/2, me_of/1, me_of/2,
invalidate_mibs_cache/1,
+ which_mibs_cache_size/1,
enable_mibs_cache/1, disable_mibs_cache/1,
gc_mibs_cache/1, gc_mibs_cache/2, gc_mibs_cache/3,
enable_mibs_cache_autogc/1, disable_mibs_cache_autogc/1,
@@ -64,7 +65,7 @@
%% Internal exports
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3, tr_var/2, tr_varbind/1,
- handle_pdu/7, worker/2, worker_loop/1, do_send_trap/6]).
+ handle_pdu/7, worker/2, worker_loop/1, do_send_trap/7]).
-ifndef(default_verbosity).
-define(default_verbosity,silence).
@@ -245,6 +246,10 @@ disable_mibs_cache(Agent) ->
call(Agent, {mibs_cache_request, disable_cache}).
+which_mibs_cache_size(Agent) ->
+ call(Agent, {mibs_cache_request, cache_size}).
+
+
enable_mibs_cache_autogc(Agent) ->
call(Agent, {mibs_cache_request, enable_autogc}).
@@ -524,14 +529,15 @@ which_notification_filter(Agent) ->
send_trap(Agent, Trap, NotifyName, CtxName, Recv, Varbinds) ->
?d("send_trap -> entry with"
- "~n self(): ~p"
- "~n Agent: ~p [~p]"
- "~n Trap: ~p"
- "~n NotifyName: ~p"
- "~n CtxName: ~p"
- "~n Recv: ~p"
- "~n Varbinds: ~p",
- [self(), Agent, wis(Agent), Trap, NotifyName, CtxName, Recv, Varbinds]),
+ "~n self(): ~p"
+ "~n Agent: ~p [~p]"
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n CtxName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p",
+ [self(), Agent, wis(Agent),
+ Trap, NotifyName, CtxName, Recv, Varbinds]),
Msg = {send_trap, Trap, NotifyName, CtxName, Recv, Varbinds},
case (wis(Agent) =:= self()) of
false ->
@@ -540,6 +546,27 @@ send_trap(Agent, Trap, NotifyName, CtxName, Recv, Varbinds) ->
Agent ! Msg
end.
+send_trap(Agent, Trap, NotifyName, CtxName, Recv, Varbinds, LocalEngineID) ->
+ ?d("send_trap -> entry with"
+ "~n self(): ~p"
+ "~n Agent: ~p [~p]"
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n CtxName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p"
+ "~n LocalEngineID: ~p",
+ [self(), Agent, wis(Agent),
+ Trap, NotifyName, CtxName, Recv, Varbinds, LocalEngineID]),
+ Msg =
+ {send_trap, Trap, NotifyName, CtxName, Recv, Varbinds, LocalEngineID},
+ case (wis(Agent) =:= self()) of
+ false ->
+ call(Agent, Msg);
+ true ->
+ Agent ! Msg
+ end.
+
%% -- Discovery functions --
@@ -626,6 +653,7 @@ wis(Pid) when is_pid(Pid) ->
wis(Atom) when is_atom(Atom) ->
whereis(Atom).
+
forward_trap(Agent, TrapRecord, NotifyName, CtxName, Recv, Varbinds) ->
Agent ! {forward_trap, TrapRecord, NotifyName, CtxName, Recv, Varbinds}.
@@ -719,14 +747,15 @@ handle_info(worker_available, S) ->
handle_info({send_trap, Trap, NotifyName, ContextName, Recv, Varbinds}, S) ->
?vlog("[handle_info] send trap request:"
- "~n Trap: ~p"
- "~n NotifyName: ~p"
- "~n ContextName: ~p"
- "~n Recv: ~p"
- "~n Varbinds: ~p",
- [Trap,NotifyName,ContextName,Recv,Varbinds]),
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p",
+ [Trap, NotifyName, ContextName, Recv, Varbinds]),
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
case catch handle_send_trap(S, Trap, NotifyName, ContextName,
- Recv, Varbinds) of
+ Recv, Varbinds, LocalEngineID) of
{ok, NewS} ->
{noreply, NewS};
{'EXIT', R} ->
@@ -736,17 +765,39 @@ handle_info({send_trap, Trap, NotifyName, ContextName, Recv, Varbinds}, S) ->
{noreply, S}
end;
-handle_info({forward_trap, TrapRecord, NotifyName, ContextName,
- Recv, Varbinds},S) ->
+handle_info({send_trap, Trap, NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID}, S) ->
+ ?vlog("[handle_info] send trap request:"
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p"
+ "~n LocalEngineID: ~p",
+ [Trap, NotifyName, ContextName, Recv, Varbinds, LocalEngineID]),
+ case catch handle_send_trap(S, Trap, NotifyName, ContextName,
+ Recv, Varbinds, LocalEngineID) of
+ {ok, NewS} ->
+ {noreply, NewS};
+ {'EXIT', R} ->
+ ?vinfo("Trap not sent:~n ~p", [R]),
+ {noreply, S};
+ _ ->
+ {noreply, S}
+ end;
+
+handle_info({forward_trap, TrapRecord, NotifyName, ContextName,
+ Recv, Varbinds}, S) ->
?vlog("[handle_info] forward trap request:"
- "~n TrapRecord: ~p"
- "~n NotifyName: ~p"
- "~n ContextName: ~p"
- "~n Recv: ~p"
- "~n Varbinds: ~p",
- [TrapRecord,NotifyName,ContextName,Recv,Varbinds]),
+ "~n TrapRecord: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p",
+ [TrapRecord, NotifyName, ContextName, Recv, Varbinds]),
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
case (catch maybe_send_trap(S, TrapRecord, NotifyName, ContextName,
- Recv, Varbinds)) of
+ Recv, Varbinds, LocalEngineID)) of
{ok, NewS} ->
{noreply, NewS};
{'EXIT', R} ->
@@ -856,17 +907,52 @@ handle_call(restart_set_worker, _From, #state{set_worker = Pid} = S) ->
ok
end,
{reply, ok, S};
+
handle_call({send_trap, Trap, NotifyName, ContextName, Recv, Varbinds},
_From, S) ->
?vlog("[handle_call] send trap request:"
- "~n Trap: ~p"
- "~n NotifyName: ~p"
- "~n ContextName: ~p"
- "~n Recv: ~p"
- "~n Varbinds: ~p",
- [Trap,NotifyName,ContextName,Recv,Varbinds]),
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p",
+ [Trap, NotifyName, ContextName, Recv, Varbinds]),
+ LocalEngineID =
+ case S#state.type of
+ master_agent ->
+ ?DEFAULT_LOCAL_ENGINE_ID;
+ _ ->
+ %% subagent -
+ %% we don't need this, eventually the trap sent request
+ %% will reach the master-agent and then it will look up
+ %% the proper engine id.
+ ignore
+ end,
+ case (catch handle_send_trap(S, Trap, NotifyName, ContextName,
+ Recv, Varbinds, LocalEngineID)) of
+ {ok, NewS} ->
+ {reply, ok, NewS};
+ {'EXIT', Reason} ->
+ ?vinfo("Trap not sent:~n ~p", [Reason]),
+ {reply, {error, {send_failed, Reason}}, S};
+ _ ->
+ ?vinfo("Trap not sent", []),
+ {reply, {error, send_failed}, S}
+ end;
+
+handle_call({send_trap, Trap, NotifyName,
+ ContextName, Recv, Varbinds, LocalEngineID},
+ _From, S) ->
+ ?vlog("[handle_call] send trap request:"
+ "~n Trap: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n Recv: ~p"
+ "~n Varbinds: ~p"
+ "~n LocalEngineID: ~p",
+ [Trap, NotifyName, ContextName, Recv, Varbinds, LocalEngineID]),
case (catch handle_send_trap(S, Trap, NotifyName, ContextName,
- Recv, Varbinds)) of
+ Recv, Varbinds, LocalEngineID)) of
{ok, NewS} ->
{reply, ok, NewS};
{'EXIT', Reason} ->
@@ -876,8 +962,10 @@ handle_call({send_trap, Trap, NotifyName, ContextName, Recv, Varbinds},
?vinfo("Trap not sent", []),
{reply, {error, send_failed}, S}
end;
+
handle_call({discovery,
- TargetName, Notification, ContextName, Vbs, DiscoHandler, ExtraInfo},
+ TargetName, Notification, ContextName, Vbs, DiscoHandler,
+ ExtraInfo},
From,
#state{disco = undefined} = S) ->
?vlog("[handle_call] initiate discovery process:"
@@ -1219,6 +1307,8 @@ handle_mibs_cache_request(MibServer, Req) ->
snmpa_mib:gc_cache(MibServer, Age);
{gc_cache, Age, GcLimit} ->
snmpa_mib:gc_cache(MibServer, Age, GcLimit);
+ cache_size ->
+ snmpa_mib:which_cache_size(MibServer);
enable_cache ->
snmpa_mib:enable_cache(MibServer);
disable_cache ->
@@ -1432,17 +1522,20 @@ spawn_thread(Vsn, Pdu, PduMS, ACMData, Address, Extra) ->
Args = [Vsn, Pdu, PduMS, ACMData, Address, Extra, Dict],
proc_lib:spawn_link(?MODULE, handle_pdu, Args).
-spawn_trap_thread(TrapRec, NotifyName, ContextName, Recv, V) ->
+spawn_trap_thread(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID) ->
Dict = get(),
proc_lib:spawn_link(?MODULE, do_send_trap,
- [TrapRec, NotifyName, ContextName, Recv, V, Dict]).
+ [TrapRec, NotifyName, ContextName,
+ Recv, Vbs, LocalEngineID, Dict]).
-do_send_trap(TrapRec, NotifyName, ContextName, Recv, V, Dict) ->
+do_send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID, Dict) ->
lists:foreach(fun({Key, Val}) -> put(Key, Val) end, Dict),
put(sname,trap_sender_short_name(get(sname))),
?vlog("starting",[]),
- snmpa_trap:send_trap(TrapRec, NotifyName, ContextName, Recv, V,
- get(net_if)).
+ snmpa_trap:send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID, get(net_if)).
worker(Master, Dict) ->
lists:foreach(fun({Key, Val}) -> put(Key, Val) end, Dict),
@@ -1457,17 +1550,22 @@ worker_loop(Master) ->
handle_pdu(Vsn, Pdu, PduMS, ACMData, Address, Extra),
Master ! worker_available;
- %% Old style message
- {MibView, Vsn, Pdu, PduMS, ACMData, AgentData, Extra} ->
- ?vtrace("worker_loop -> received (old) request", []),
- do_handle_pdu(MibView, Vsn, Pdu, PduMS, ACMData, AgentData, Extra),
+ %% We don't trap exits!
+ {TrapRec, NotifyName, ContextName, Recv, Vbs} ->
+ ?vtrace("worker_loop -> send trap:"
+ "~n ~p", [TrapRec]),
+ snmpa_trap:send_trap(TrapRec, NotifyName,
+ ContextName, Recv, Vbs, get(net_if)),
Master ! worker_available;
- {TrapRec, NotifyName, ContextName, Recv, V} -> % We don't trap exits!
+ %% We don't trap exits!
+ {send_trap,
+ TrapRec, NotifyName, ContextName, Recv, Vbs, LocalEngineID} ->
?vtrace("worker_loop -> send trap:"
"~n ~p", [TrapRec]),
snmpa_trap:send_trap(TrapRec, NotifyName,
- ContextName, Recv, V, get(net_if)),
+ ContextName, Recv, Vbs, LocalEngineID,
+ get(net_if)),
Master ! worker_available;
{verbosity, Verbosity} ->
@@ -1616,13 +1714,15 @@ handle_acm_error(Vsn, Reason, Pdu, ACMData, Address, Extra) ->
end.
-handle_send_trap(S, TrapName, NotifyName, ContextName, Recv, Varbinds) ->
+handle_send_trap(S, TrapName, NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID) ->
?vtrace("handle_send_trap -> entry with"
- "~n S#state.type: ~p"
- "~n TrapName: ~p"
- "~n NotifyName: ~p"
- "~n ContextName: ~p",
- [S#state.type, TrapName, NotifyName, ContextName]),
+ "~n S#state.type: ~p"
+ "~n TrapName: ~p"
+ "~n NotifyName: ~p"
+ "~n ContextName: ~p"
+ "~n LocalEngineID: ~p",
+ [S#state.type, TrapName, NotifyName, ContextName, LocalEngineID]),
case snmpa_trap:construct_trap(TrapName, Varbinds) of
{ok, TrapRecord, VarList} ->
?vtrace("handle_send_trap -> construction complete: "
@@ -1639,7 +1739,8 @@ handle_send_trap(S, TrapName, NotifyName, ContextName, Recv, Varbinds) ->
?vtrace("handle_send_trap -> "
"[master] handle send trap",[]),
maybe_send_trap(S, TrapRecord, NotifyName,
- ContextName, Recv, VarList)
+ ContextName, Recv, VarList,
+ LocalEngineID)
end;
error ->
error
@@ -1676,7 +1777,8 @@ maybe_forward_trap(#state{parent = Parent, nfilters = NFs} = S,
maybe_send_trap(#state{nfilters = NFs} = S,
- TrapRec, NotifyName, ContextName, Recv, Varbinds) ->
+ TrapRec, NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID) ->
?vtrace("maybe_send_trap -> entry with"
"~n NFs: ~p", [NFs]),
case filter_notification(NFs, [], TrapRec) of
@@ -1693,39 +1795,45 @@ maybe_send_trap(#state{nfilters = NFs} = S,
?vtrace("maybe_send_trap -> send trap:"
"~n ~p", [TrapRec2]),
do_handle_send_trap(S, TrapRec2,
- NotifyName, ContextName, Recv, Varbinds);
+ NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID);
{send, Removed, TrapRec2} ->
?vtrace("maybe_send_trap -> send trap:"
"~n ~p", [TrapRec2]),
NFs2 = del_notification_filter(Removed, NFs),
do_handle_send_trap(S#state{nfilters = NFs2}, TrapRec2,
- NotifyName, ContextName, Recv, Varbinds)
+ NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID)
end.
-do_handle_send_trap(S, TrapRec, NotifyName, ContextName, Recv, Varbinds) ->
- V = snmpa_trap:try_initialise_vars(get(mibserver), Varbinds),
+do_handle_send_trap(S, TrapRec, NotifyName, ContextName, Recv, Varbinds,
+ LocalEngineID) ->
+ Vbs = snmpa_trap:try_initialise_vars(get(mibserver), Varbinds),
case S#state.type of
subagent ->
forward_trap(S#state.parent, TrapRec, NotifyName, ContextName,
- Recv, V),
+ Recv, Vbs),
{ok, S};
master_agent when S#state.multi_threaded =:= false ->
?vtrace("do_handle_send_trap -> send trap:"
"~n ~p", [TrapRec]),
snmpa_trap:send_trap(TrapRec, NotifyName, ContextName,
- Recv, V, get(net_if)),
+ Recv, Vbs, LocalEngineID, get(net_if)),
{ok, S};
master_agent when S#state.worker_state =:= busy ->
%% Main worker busy => create new worker
?vtrace("do_handle_send_trap -> main worker busy: "
"spawn a trap sender", []),
- spawn_trap_thread(TrapRec, NotifyName, ContextName, Recv, V),
+ spawn_trap_thread(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID),
{ok, S};
master_agent ->
%% Send to main worker
?vtrace("do_handle_send_trap -> send to main worker",[]),
- S#state.worker ! {TrapRec, NotifyName, ContextName, Recv, V},
+ S#state.worker ! {send_trap,
+ TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID},
{ok, S#state{worker_state = busy}}
end.
diff --git a/lib/snmp/src/agent/snmpa_general_db.erl b/lib/snmp/src/agent/snmpa_general_db.erl
index 795c353a9e..a06604c9cf 100644
--- a/lib/snmp/src/agent/snmpa_general_db.erl
+++ b/lib/snmp/src/agent/snmpa_general_db.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2000-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpa_general_db).
@@ -114,14 +114,7 @@ ets_open(Name, Dir, clear, Type) ->
mnesia_open({table_exist,Name},_Nodes,_RecName,_Attr,_Type,clear) ->
?vtrace("[mnesia] database ~p already exists; clear content",[Name]),
- Pattern = '_',
- F = fun() ->
- Recs = mnesia:match_object(Name,Pattern,read),
- lists:foreach(fun(Rec) ->
- mnesia:delete_object(Name,Rec,write)
- end, Recs),
- Recs
- end,
+ F = fun() -> mnesia:clear_table(Name) end,
case mnesia:transaction(F) of
{aborted,Reason} ->
exit({aborted,Reason});
diff --git a/lib/snmp/src/agent/snmpa_internal.hrl b/lib/snmp/src/agent/snmpa_internal.hrl
index a33a6809dc..9fa874f119 100644
--- a/lib/snmp/src/agent/snmpa_internal.hrl
+++ b/lib/snmp/src/agent/snmpa_internal.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2006-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2006-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -22,6 +22,8 @@
-include_lib("snmp/src/app/snmp_internal.hrl").
+-define(DEFAULT_LOCAL_ENGINE_ID, snmp_framework_mib:get_engine_id()).
+
-define(snmpa_info(F, A), ?snmp_info("agent", F, A)).
-define(snmpa_warning(F, A), ?snmp_warning("agent", F, A)).
-define(snmpa_error(F, A), ?snmp_error("agent", F, A)).
diff --git a/lib/snmp/src/agent/snmpa_mib.erl b/lib/snmp/src/agent/snmpa_mib.erl
index 370989d0be..ce90db18b3 100644
--- a/lib/snmp/src/agent/snmpa_mib.erl
+++ b/lib/snmp/src/agent/snmpa_mib.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpa_mib).
@@ -55,7 +55,7 @@
-define(NO_CACHE, no_mibs_cache).
-define(DEFAULT_CACHE_USAGE, true).
-define(CACHE_GC_TICKTIME, timer:minutes(1)).
--define(DEFAULT_CACHE_AUTOGC, false).
+-define(DEFAULT_CACHE_AUTOGC, true).
-define(DEFAULT_CACHE_GCLIMIT, 100).
-define(DEFAULT_CACHE_AGE, timer:minutes(10)).
-define(CACHE_GC_TRIGGER, cache_gc_trigger).
diff --git a/lib/snmp/src/agent/snmpa_mpd.erl b/lib/snmp/src/agent/snmpa_mpd.erl
index 2e09286b87..fd75b98f84 100644
--- a/lib/snmp/src/agent/snmpa_mpd.erl
+++ b/lib/snmp/src/agent/snmpa_mpd.erl
@@ -1,27 +1,28 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpa_mpd).
-export([init/1, reset/0, inc/1, counters/0,
discarded_pdu/1,
- process_packet/6,
- generate_response_msg/5, generate_msg/5,
+ process_packet/6, process_packet/7,
+ generate_response_msg/5, generate_response_msg/6,
+ generate_msg/5, generate_msg/6,
generate_discovery_msg/4,
process_taddrs/1,
generate_req_id/0]).
@@ -34,6 +35,7 @@
-define(VMODULE,"MPD").
-include("snmp_verbosity.hrl").
+-include("snmpa_internal.hrl").
-define(empty_msg_size, 24).
@@ -120,6 +122,12 @@ reset() ->
%% section 4.2.1 in rfc2272)
%%-----------------------------------------------------------------
process_packet(Packet, TDomain, TAddress, State, NoteStore, Log) ->
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ process_packet(Packet, TDomain, TAddress, LocalEngineID,
+ State, NoteStore, Log).
+
+process_packet(Packet, TDomain, TAddress, LocalEngineID,
+ State, NoteStore, Log) ->
inc(snmpInPkts),
case catch snmp_pdus:dec_message_only(binary_to_list(Packet)) of
@@ -127,15 +135,17 @@ process_packet(Packet, TDomain, TAddress, State, NoteStore, Log) ->
when State#state.v1 =:= true ->
?vlog("v1, community: ~s", [Community]),
HS = ?empty_msg_size + length(Community),
- v1_v2c_proc('version-1', NoteStore, Community, TDomain, TAddress,
- Data, HS, Log, Packet);
+ v1_v2c_proc('version-1', NoteStore, Community,
+ TDomain, TAddress,
+ LocalEngineID, Data, HS, Log, Packet);
#message{version = 'version-2', vsn_hdr = Community, data = Data}
when State#state.v2c =:= true ->
?vlog("v2c, community: ~s", [Community]),
HS = ?empty_msg_size + length(Community),
- v1_v2c_proc('version-2', NoteStore, Community, TDomain, TAddress,
- Data, HS, Log, Packet);
+ v1_v2c_proc('version-2', NoteStore, Community,
+ TDomain, TAddress,
+ LocalEngineID, Data, HS, Log, Packet);
#message{version = 'version-3', vsn_hdr = V3Hdr, data = Data}
when State#state.v3 =:= true ->
@@ -143,7 +153,9 @@ process_packet(Packet, TDomain, TAddress, State, NoteStore, Log) ->
[V3Hdr#v3_hdr.msgID,
V3Hdr#v3_hdr.msgFlags,
V3Hdr#v3_hdr.msgSecurityModel]),
- v3_proc(NoteStore, Packet, TDomain, TAddress, V3Hdr, Data, Log);
+ v3_proc(NoteStore, Packet,
+ TDomain, TAddress,
+ LocalEngineID, V3Hdr, Data, Log);
{'EXIT', {bad_version, Vsn}} ->
?vtrace("exit: bad version: ~p",[Vsn]),
@@ -170,10 +182,11 @@ discarded_pdu(Variable) -> inc(Variable).
%%-----------------------------------------------------------------
%% Handles a Community based message (v1 or v2c).
%%-----------------------------------------------------------------
-v1_v2c_proc(Vsn, NoteStore, Community, snmpUDPDomain, {Ip, Udp},
+v1_v2c_proc(Vsn, NoteStore, Community, snmpUDPDomain,
+ {Ip, Udp}, LocalEngineID,
Data, HS, Log, Packet) ->
TAddress = tuple_to_list(Ip) ++ [Udp div 256, Udp rem 256],
- AgentMS = snmp_framework_mib:get_engine_max_message_size(),
+ AgentMS = get_engine_max_message_size(LocalEngineID),
MgrMS = snmp_community_mib:get_target_addr_ext_mms(?snmpUDPDomain,
TAddress),
PduMS = case MgrMS of
@@ -220,10 +233,10 @@ v1_v2c_proc(Vsn, NoteStore, Community, snmpUDPDomain, {Ip, Udp},
{discarded, trap_pdu}
end;
v1_v2c_proc(_Vsn, _NoteStore, _Community, snmpUDPDomain, TAddress,
- _Data, _HS, _Log, _Packet) ->
+ _LocalEngineID, _Data, _HS, _Log, _Packet) ->
{discarded, {badarg, TAddress}};
v1_v2c_proc(_Vsn, _NoteStore, _Community, TDomain, _TAddress,
- _Data, _HS, _Log, _Packet) ->
+ _LocalEngineID, _Data, _HS, _Log, _Packet) ->
{discarded, {badarg, TDomain}}.
sec_model('version-1') -> ?SEC_V1;
@@ -234,15 +247,19 @@ sec_model('version-2') -> ?SEC_V2C.
%% Handles a SNMPv3 Message, following the procedures in rfc2272,
%% section 4.2 and 7.2
%%-----------------------------------------------------------------
-v3_proc(NoteStore, Packet, _TDomain, _TAddress, V3Hdr, Data, Log) ->
- case (catch v3_proc(NoteStore, Packet, V3Hdr, Data, Log)) of
+v3_proc(NoteStore, Packet, _TDomain, _TAddress, LocalEngineID,
+ V3Hdr, Data, Log) ->
+ case (catch v3_proc(NoteStore, Packet, LocalEngineID, V3Hdr, Data, Log)) of
{'EXIT', Reason} ->
exit(Reason);
Result ->
Result
end.
-v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
+v3_proc(NoteStore, Packet, LocalEngineID, V3Hdr, Data, Log) ->
+ ?vtrace("v3_proc -> entry with"
+ "~n LocalEngineID: ~p",
+ [LocalEngineID]),
%% 7.2.3
#v3_hdr{msgID = MsgID,
msgMaxSize = MMS,
@@ -250,7 +267,7 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
msgSecurityModel = MsgSecurityModel,
msgSecurityParameters = SecParams,
hdr_size = HdrSize} = V3Hdr,
- ?vdebug("v3_proc -> version 3 message header:"
+ ?vdebug("v3_proc -> version 3 message header [7.2.3]:"
"~n msgID = ~p"
"~n msgMaxSize = ~p"
"~n msgFlags = ~p"
@@ -263,17 +280,19 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
SecLevel = check_sec_level(MsgFlags),
IsReportable = snmp_misc:is_reportable(MsgFlags),
%% 7.2.6
- ?vtrace("v3_proc -> "
+ ?vtrace("v3_proc -> [7.2.6]"
"~n SecModule = ~p"
"~n SecLevel = ~p"
"~n IsReportable = ~p",
- [SecModule,SecLevel,IsReportable]),
+ [SecModule, SecLevel, IsReportable]),
SecRes = (catch SecModule:process_incoming_msg(Packet, Data,
- SecParams, SecLevel)),
+ SecParams, SecLevel,
+ LocalEngineID)),
?vtrace("v3_proc -> message processing result: "
"~n SecRes: ~p", [SecRes]),
{SecEngineID, SecName, ScopedPDUBytes, SecData, DiscoOrPlain} =
- check_sec_module_result(SecRes, V3Hdr, Data, IsReportable, Log),
+ check_sec_module_result(SecRes, V3Hdr, Data,
+ LocalEngineID, IsReportable, Log),
?vtrace("v3_proc -> "
"~n DiscoOrPlain: ~w"
"~n SecEngineID: ~w"
@@ -311,7 +330,7 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
Log(PDU#pdu.type, Packet)
end,
%% Make sure a get_bulk doesn't get too big.
- AgentMS = snmp_framework_mib:get_engine_max_message_size(),
+ AgentMS = get_engine_max_message_size(LocalEngineID),
%% PduMMS is supposed to be the maximum total length of the response
%% PDU we can send. From the MMS, we need to subtract everything before
%% the PDU, i.e. Message and ScopedPDU.
@@ -415,8 +434,8 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
throw({discarded, received_v2_trap});
Type ->
%% 7.2.13
- SnmpEngineID = snmp_framework_mib:get_engine_id(),
- ?vtrace("v3_proc -> SnmpEngineID = ~w", [SnmpEngineID]),
+ SnmpEngineID = LocalEngineID,
+ ?vtrace("v3_proc -> 7.2.13", []),
case SecEngineID of
SnmpEngineID when (DiscoOrPlain =:= discovery) ->
%% This is a discovery step 2 message!
@@ -429,6 +448,7 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
ContextName,
SecData,
PDU,
+ LocalEngineID,
Log);
SnmpEngineID when (DiscoOrPlain =:= plain) ->
@@ -444,17 +464,18 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
%% 4.2.2.1.2
NIsReportable = snmp_misc:is_reportable_pdu(Type),
Val = inc(snmpUnknownPDUHandlers),
- ErrorInfo = {#varbind{oid = ?snmpUnknownPDUHandlers,
- variabletype = 'Counter32',
- value = Val},
- SecName,
- [{securityLevel, SecLevel},
- {contextEngineID, ContextEngineID},
- {contextName, ContextName}]},
+ ErrorInfo =
+ {#varbind{oid = ?snmpUnknownPDUHandlers,
+ variabletype = 'Counter32',
+ value = Val},
+ SecName,
+ [{securityLevel, SecLevel},
+ {contextEngineID, ContextEngineID},
+ {contextName, ContextName}]},
case generate_v3_report_msg(MsgID,
MsgSecurityModel,
- Data, ErrorInfo,
- Log) of
+ Data, LocalEngineID,
+ ErrorInfo, Log) of
{ok, Report} when NIsReportable =:= true ->
{discarded, snmpUnknownPDUHandlers, Report};
_ ->
@@ -473,6 +494,7 @@ v3_proc(NoteStore, Packet, V3Hdr, Data, Log) ->
ContextName,
SecData,
PDU,
+ LocalEngineID,
Log);
_ ->
@@ -501,7 +523,7 @@ check_sec_level(Unknown) ->
inc(snmpInvalidMsgs),
throw({discarded, snmpInvalidMsgs}).
-check_sec_module_result(Res, V3Hdr, Data, IsReportable, Log) ->
+check_sec_module_result(Res, V3Hdr, Data, LocalEngineID, IsReportable, Log) ->
case Res of
{ok, X} ->
X;
@@ -516,7 +538,7 @@ check_sec_module_result(Res, V3Hdr, Data, IsReportable, Log) ->
#v3_hdr{msgID = MsgID, msgSecurityModel = MsgSecModel} = V3Hdr,
Pdu = get_scoped_pdu(Data),
case generate_v3_report_msg(MsgID, MsgSecModel, Pdu,
- ErrorInfo, Log) of
+ LocalEngineID, ErrorInfo, Log) of
{ok, Report} ->
throw({discarded, {securityError, Reason}, Report});
{discarded, _SomeOtherReason} ->
@@ -545,8 +567,15 @@ get_scoped_pdu(D) ->
generate_response_msg(Vsn, RePdu, Type, ACMData, Log) ->
generate_response_msg(Vsn, RePdu, Type, ACMData, Log, 1).
+generate_response_msg(Vsn, RePdu, Type, ACMData, Log, N) when is_integer(N) ->
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ generate_response_msg(Vsn, RePdu, Type, ACMData, LocalEngineID, Log, N);
+generate_response_msg(Vsn, RePdu, Type, ACMData, LocalEngineID, Log) ->
+ generate_response_msg(Vsn, RePdu, Type, ACMData, LocalEngineID, Log, 1).
+
generate_response_msg(Vsn, RePdu, Type,
{community, _SecModel, Community, _IpUdp},
+ LocalEngineID,
Log, _) ->
case catch snmp_pdus:enc_pdu(RePdu) of
{'EXIT', Reason} ->
@@ -555,8 +584,9 @@ generate_response_msg(Vsn, RePdu, Type,
[RePdu, Community, Reason]),
{discarded, Reason};
PduBytes ->
- Message = #message{version = Vsn, vsn_hdr = Community,
- data = PduBytes},
+ Message = #message{version = Vsn,
+ vsn_hdr = Community,
+ data = PduBytes},
case catch list_to_binary(
snmp_pdus:enc_message_only(Message)) of
{'EXIT', Reason} ->
@@ -565,7 +595,7 @@ generate_response_msg(Vsn, RePdu, Type,
[RePdu, Community, Reason]),
{discarded, Reason};
Packet ->
- MMS = snmp_framework_mib:get_engine_max_message_size(),
+ MMS = get_engine_max_message_size(LocalEngineID),
case size(Packet) of
Len when Len =< MMS ->
Log(Type, Packet),
@@ -584,6 +614,7 @@ generate_response_msg(Vsn, RePdu, Type,
generate_response_msg(Vsn, RePdu, Type,
{v3, MsgID, MsgSecurityModel, SecName, SecLevel,
ContextEngineID, ContextName, SecData},
+ LocalEngineID,
Log, N) ->
%% rfc2272: 7.1 steps 6-8
ScopedPDU = #scopedPdu{contextEngineID = ContextEngineID,
@@ -596,7 +627,7 @@ generate_response_msg(Vsn, RePdu, Type,
[RePdu, ContextName, Reason]),
{discarded, Reason};
ScopedPDUBytes ->
- AgentMS = snmp_framework_mib:get_engine_max_message_size(),
+ AgentMS = get_engine_max_message_size(LocalEngineID),
V3Hdr = #v3_hdr{msgID = MsgID,
msgMaxSize = AgentMS,
msgFlags = snmp_misc:mk_msg_flags(Type, SecLevel),
@@ -611,13 +642,14 @@ generate_response_msg(Vsn, RePdu, Type,
?SEC_USM ->
snmpa_usm
end,
- SecEngineID = snmp_framework_mib:get_engine_id(),
+ SecEngineID = LocalEngineID,
?vtrace("generate_response_msg -> SecEngineID: ~w", [SecEngineID]),
case (catch SecModule:generate_outgoing_msg(Message,
SecEngineID,
SecName,
SecData,
- SecLevel)) of
+ SecLevel,
+ LocalEngineID)) of
{'EXIT', Reason} ->
config_err("~p (message: ~p)", [Reason, Message]),
{discarded, Reason};
@@ -668,12 +700,14 @@ generate_response_msg(Vsn, RePdu, Type,
SecName, SecLevel,
ContextEngineID,
ContextName,
- SecData}, Log, N+1)
+ SecData},
+ LocalEngineID, Log, N+1)
end
end
end.
-generate_v3_report_msg(MsgID, MsgSecurityModel, Data, ErrorInfo, Log) ->
+generate_v3_report_msg(MsgID, MsgSecurityModel, Data, LocalEngineID,
+ ErrorInfo, Log) ->
{Varbind, SecName, Opts} = ErrorInfo,
ReqId =
if
@@ -689,7 +723,7 @@ generate_v3_report_msg(MsgID, MsgSecurityModel, Data, ErrorInfo, Log) ->
error_index = 0,
varbinds = [Varbind]},
SecLevel = snmp_misc:get_option(securityLevel, Opts, 0),
- SnmpEngineID = snmp_framework_mib:get_engine_id(),
+ SnmpEngineID = LocalEngineID,
ContextEngineID =
snmp_misc:get_option(contextEngineID, Opts, SnmpEngineID),
ContextName = snmp_misc:get_option(contextName, Opts, ""),
@@ -697,7 +731,8 @@ generate_v3_report_msg(MsgID, MsgSecurityModel, Data, ErrorInfo, Log) ->
generate_response_msg('version-3', Pdu, report,
{v3, MsgID, MsgSecurityModel, SecName, SecLevel,
- ContextEngineID, ContextName, SecData}, Log).
+ ContextEngineID, ContextName, SecData},
+ LocalEngineID, Log).
%% req_id(#scopedPdu{data = #pdu{request_id = ReqId}}) ->
%% ?vtrace("Report ReqId: ~p",[ReqId]),
@@ -719,7 +754,8 @@ generate_discovery1_report_msg(MsgID, MsgSecurityModel,
SecName, SecLevel,
ContextEngineID, ContextName,
{SecData, Oid, Value},
- #pdu{request_id = ReqId}, Log) ->
+ #pdu{request_id = ReqId},
+ LocalEngineID, Log) ->
?vtrace("generate_discovery1_report_msg -> entry with"
"~n ReqId: ~p"
"~n Value: ~p", [ReqId, Value]),
@@ -734,7 +770,8 @@ generate_discovery1_report_msg(MsgID, MsgSecurityModel,
varbinds = [Varbind]},
case generate_response_msg('version-3', PduOut, report,
{v3, MsgID, MsgSecurityModel, SecName, SecLevel,
- ContextEngineID, ContextName, SecData}, Log) of
+ ContextEngineID, ContextName, SecData},
+ LocalEngineID, Log) of
{ok, Packet} ->
{discovery, Packet};
Error ->
@@ -745,7 +782,8 @@ generate_discovery1_report_msg(MsgID, MsgSecurityModel,
generate_discovery2_report_msg(MsgID, MsgSecurityModel,
SecName, SecLevel,
ContextEngineID, ContextName,
- SecData, #pdu{request_id = ReqId}, Log) ->
+ SecData, #pdu{request_id = ReqId},
+ LocalEngineID, Log) ->
?vtrace("generate_discovery2_report_msg -> entry with"
"~n ReqId: ~p", [ReqId]),
SecModule = get_security_module(MsgSecurityModel),
@@ -757,7 +795,8 @@ generate_discovery2_report_msg(MsgID, MsgSecurityModel,
varbinds = [Vb]},
case generate_response_msg('version-3', PduOut, report,
{v3, MsgID, MsgSecurityModel, SecName, SecLevel,
- ContextEngineID, ContextName, SecData}, Log) of
+ ContextEngineID, ContextName, SecData},
+ LocalEngineID, Log) of
{ok, Packet} ->
{discovery, Packet};
Error ->
@@ -816,7 +855,11 @@ set_vb_null([]) ->
%% Executed when a message that isn't a response is generated, i.e.
%% a trap or an inform.
%%-----------------------------------------------------------------
-generate_msg(Vsn, _NoteStore, Pdu, {community, Community}, To) ->
+generate_msg(Vsn, NoteStore, Pdu, ACMData, To) ->
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ generate_msg(Vsn, NoteStore, Pdu, ACMData, LocalEngineID, To).
+
+generate_msg(Vsn, _NoteStore, Pdu, {community, Community}, LocalEngineID, To) ->
Message = #message{version = Vsn, vsn_hdr = Community, data = Pdu},
case catch list_to_binary(snmp_pdus:enc_message(Message)) of
{'EXIT', Reason} ->
@@ -825,7 +868,7 @@ generate_msg(Vsn, _NoteStore, Pdu, {community, Community}, To) ->
[Pdu, Community, Reason]),
{discarded, Reason};
Packet ->
- AgentMax = snmp_framework_mib:get_engine_max_message_size(),
+ AgentMax = get_engine_max_message_size(LocalEngineID),
case size(Packet) of
Len when Len =< AgentMax ->
{ok, mk_v1_v2_packet_list(To, Packet, Len, Pdu)};
@@ -838,9 +881,9 @@ generate_msg(Vsn, _NoteStore, Pdu, {community, Community}, To) ->
end
end;
generate_msg('version-3', NoteStore, Pdu,
- {v3, ContextEngineID, ContextName}, To) ->
- %% rfc2272: 7.1.6
- ScopedPDU = #scopedPdu{contextEngineID = ContextEngineID,
+ {v3, ContextEngineID, ContextName}, LocalEngineID, To) ->
+ %% rfc2272: 7.1 step 6
+ ScopedPDU = #scopedPdu{contextEngineID = LocalEngineID,
contextName = ContextName,
data = Pdu},
case (catch snmp_pdus:enc_scoped_pdu(ScopedPDU)) of
@@ -851,7 +894,8 @@ generate_msg('version-3', NoteStore, Pdu,
{discarded, Reason};
ScopedPDUBytes ->
{ok, mk_v3_packet_list(NoteStore, To, ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName)}
+ ContextEngineID, ContextName,
+ LocalEngineID)}
end.
@@ -1094,17 +1138,21 @@ mk_msg_flags(PduType, SecLevel) ->
mk_v3_packet_entry(NoteStore, Domain, Addr,
{SecModel, SecName, SecLevel, TargetAddrName},
- ScopedPDUBytes, Pdu, ContextEngineID, ContextName) ->
- %% 7.1.7
- ?vtrace("mk_v3_packet_entry -> entry - 7.1.7", []),
- MsgID = generate_msg_id(),
- PduType = Pdu#pdu.type,
- MsgFlags = mk_msg_flags(PduType, SecLevel),
+ ScopedPDUBytes, Pdu, _ContextEngineID, ContextName,
+ LocalEngineID) ->
+ %% rfc2272 7.1 step 77
+ ?vtrace("mk_v3_packet_entry -> entry - RFC2272-7.1:7", []),
+ MsgVersion = 'version-3', % 7.1:7a
+ MsgID = generate_msg_id(), % 7.1:7b
+ MaxMsgSz = get_max_message_size(), % 7.1:7c
+ PduType = Pdu#pdu.type,
+ MsgFlags = mk_msg_flags(PduType, SecLevel), % 7.1:7d
+ MsgSecModel = SecModel, % 7.1:7e
V3Hdr = #v3_hdr{msgID = MsgID,
- msgMaxSize = get_max_message_size(),
+ msgMaxSize = MaxMsgSz,
msgFlags = MsgFlags,
- msgSecurityModel = SecModel},
- Message = #message{version = 'version-3',
+ msgSecurityModel = MsgSecModel},
+ Message = #message{version = MsgVersion,
vsn_hdr = V3Hdr,
data = ScopedPDUBytes},
SecModule =
@@ -1113,12 +1161,21 @@ mk_v3_packet_entry(NoteStore, Domain, Addr,
snmpa_usm
end,
+ %%
+ %% 7.1:8 - If the PDU is from the Response Class or the Internal Class
+ %% securityEngineID = snmpEngineID (local/source)
+ %% 7.1:9 - If the PDU is from the Unconfirmed Class
+ %% securityEngineID = snmpEngineID (local/source)
+ %% else
+ %% securityEngineID = targetEngineID (remote/destination)
+ %%
+
%% 7.1.9a
?vtrace("mk_v3_packet_entry -> sec engine id - 7.1.9a", []),
SecEngineID =
case PduType of
'snmpv2-trap' ->
- snmp_framework_mib:get_engine_id();
+ LocalEngineID;
_ ->
%% This is the implementation dependent target engine id
%% procedure.
@@ -1141,8 +1198,9 @@ mk_v3_packet_entry(NoteStore, Domain, Addr,
?vdebug("mk_v3_packet_entry -> secEngineID: ~p", [SecEngineID]),
%% 7.1.9b
- case catch SecModule:generate_outgoing_msg(Message, SecEngineID,
- SecName, [], SecLevel) of
+ case (catch SecModule:generate_outgoing_msg(Message, SecEngineID,
+ SecName, [], SecLevel,
+ LocalEngineID)) of
{'EXIT', Reason} ->
config_err("~p (message: ~p)", [Reason, Message]),
skip;
@@ -1169,7 +1227,7 @@ mk_v3_packet_entry(NoteStore, Domain, Addr,
sec_model = SecModel,
sec_name = SecName,
sec_level = SecLevel,
- ctx_engine_id = ContextEngineID,
+ ctx_engine_id = LocalEngineID,
ctx_name = ContextName,
disco = false,
req_id = Pdu#pdu.request_id},
@@ -1180,15 +1238,16 @@ mk_v3_packet_entry(NoteStore, Domain, Addr,
mk_v3_packet_list(NoteStore, To,
- ScopedPDUBytes, Pdu, ContextEngineID, ContextName) ->
+ ScopedPDUBytes, Pdu, ContextEngineID, ContextName,
+ LocalEngineID) ->
mk_v3_packet_list(NoteStore, To,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName, []).
+ ContextEngineID, ContextName, LocalEngineID, []).
mk_v3_packet_list(_, [],
_ScopedPDUBytes, _Pdu,
_ContextEngineID, _ContextName,
- Acc) ->
+ _LocalEngineID, Acc) ->
lists:reverse(Acc);
%% This clause is for backward compatibillity reasons
@@ -1196,20 +1255,21 @@ mk_v3_packet_list(_, [],
mk_v3_packet_list(NoteStore,
[{{?snmpUDPDomain, [A,B,C,D,U1,U2]}, SecData} | T],
ScopedPDUBytes, Pdu, ContextEngineID, ContextName,
- Acc) ->
+ LocalEngineID, Acc) ->
case mk_v3_packet_entry(NoteStore,
snmpUDPDomain, {{A,B,C,D}, U1 bsl 8 + U2}, SecData,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName) of
+ ContextEngineID, ContextName, LocalEngineID) of
skip ->
mk_v3_packet_list(NoteStore, T,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName,
+ ContextEngineID, ContextName, LocalEngineID,
Acc);
{ok, Entry} ->
mk_v3_packet_list(NoteStore, T,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName, [Entry | Acc])
+ ContextEngineID, ContextName, LocalEngineID,
+ [Entry | Acc])
end;
%% This is the new clause
@@ -1218,11 +1278,11 @@ mk_v3_packet_list(NoteStore,
mk_v3_packet_list(NoteStore,
[{{Domain, Addr}, SecData} | T],
ScopedPDUBytes, Pdu, ContextEngineID, ContextName,
- Acc) ->
+ LocalEngineID, Acc) ->
case mk_v3_packet_entry(NoteStore,
Domain, Addr, SecData,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName) of
+ ContextEngineID, ContextName, LocalEngineID) of
skip ->
mk_v3_packet_list(NoteStore, T,
ScopedPDUBytes, Pdu,
@@ -1230,7 +1290,8 @@ mk_v3_packet_list(NoteStore,
{ok, Entry} ->
mk_v3_packet_list(NoteStore, T,
ScopedPDUBytes, Pdu,
- ContextEngineID, ContextName, [Entry | Acc])
+ ContextEngineID, ContextName,
+ LocalEngineID, [Entry | Acc])
end.
@@ -1253,6 +1314,9 @@ gen(Id) ->
get_target_engine_id(TargetAddrName) ->
snmp_target_mib:get_target_engine_id(TargetAddrName).
+get_engine_max_message_size(_LocalEngineID) ->
+ snmp_framework_mib:get_engine_max_message_size().
+
sec_module(?SEC_USM) ->
snmpa_usm.
diff --git a/lib/snmp/src/agent/snmpa_net_if.erl b/lib/snmp/src/agent/snmpa_net_if.erl
index 3a91cf4033..97a7a63dee 100644
--- a/lib/snmp/src/agent/snmpa_net_if.erl
+++ b/lib/snmp/src/agent/snmpa_net_if.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpa_net_if).
@@ -478,12 +478,13 @@ update_req_counter_outgoing(#state{limit = Limit, rcnt = RCnt} = S,
S#state{rcnt = NewRCnt}.
-maybe_handle_recv(#state{filter = FilterMod} = S,
+maybe_handle_recv(#state{usock = Sock, filter = FilterMod} = S,
Ip, Port, Packet) ->
case (catch FilterMod:accept_recv(Ip, Port)) of
false ->
%% Drop the received packet
inc(netIfMsgInDrops),
+ active_once(Sock),
S;
_ ->
handle_recv(S, Ip, Port, Packet)
diff --git a/lib/snmp/src/agent/snmpa_trap.erl b/lib/snmp/src/agent/snmpa_trap.erl
index b1096b1135..450cb2e9f4 100644
--- a/lib/snmp/src/agent/snmpa_trap.erl
+++ b/lib/snmp/src/agent/snmpa_trap.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpa_trap).
@@ -23,14 +23,18 @@
%%%-----------------------------------------------------------------
%% External exports
-export([construct_trap/2,
- try_initialise_vars/2, send_trap/6]).
+ try_initialise_vars/2,
+ send_trap/6, send_trap/7]).
-export([send_discovery/5]).
%% Internal exports
--export([init_v2_inform/9, init_v3_inform/9, send_inform/6]).
+-export([init_v2_inform/9,
+ init_v3_inform/9, init_v3_inform/10,
+ send_inform/6]).
-export([init_discovery_inform/12, send_discovery_inform/5]).
-include("snmp_types.hrl").
+-include("snmpa_internal.hrl").
-include("SNMPv2-MIB.hrl").
-include("SNMPv2-TM.hrl").
-include("SNMPv2-TC.hrl").
@@ -331,13 +335,20 @@ make_varbind_list(Varbinds) ->
%% SnmpTargetAddrTable (using the Tag).
%%-----------------------------------------------------------------
send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs, NetIf) ->
- (catch do_send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs, NetIf)).
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID, NetIf).
+
+send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs, LocalEngineID, NetIf) ->
+ (catch do_send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID, NetIf)).
-do_send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs, NetIf) ->
+do_send_trap(TrapRec, NotifyName, ContextName, Recv, Vbs,
+ LocalEngineID, NetIf) ->
VarbindList = make_varbind_list(Vbs),
Dests = find_dests(NotifyName),
send_trap_pdus(Dests, ContextName, {TrapRec, VarbindList}, [], [], [],
- Recv, NetIf).
+ Recv, LocalEngineID, NetIf).
send_discovery(TargetName, Record, ContextName, Vbs, NetIf) ->
case find_dest(TargetName) of
@@ -619,7 +630,9 @@ send_discovery_inform(Parent, Timeout, Retry, Msg, NetIf) ->
%%-----------------------------------------------------------------
send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
Type} | T],
- ContextName,{TrapRec, Vbs}, V1Res, V2Res, V3Res, Recv, NetIf) ->
+ ContextName,
+ {TrapRec, Vbs}, V1Res, V2Res, V3Res, Recv,
+ LocalEngineID, NetIf) ->
?vdebug("send trap pdus: "
"~n Destination address: ~p"
"~n Target name: ~p"
@@ -634,7 +647,7 @@ send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
case check_all_varbinds(TrapRec, Vbs, MibView) of
true when MpModel =:= ?MP_V1 ->
?vtrace("send_trap_pdus -> v1 mp model",[]),
- ContextEngineId = snmp_framework_mib:get_engine_id(),
+ ContextEngineId = LocalEngineID,
case snmp_community_mib:vacm2community({SecName,
ContextEngineId,
ContextName},
@@ -644,16 +657,18 @@ send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
[element(2, DestAddr)]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
[{DestAddr, Community} | V1Res],
- V2Res, V3Res, Recv, NetIf);
+ V2Res, V3Res, Recv,
+ LocalEngineID, NetIf);
undefined ->
?vdebug("No community found for v1 dest: ~p",
[element(2, DestAddr)]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
- V1Res, V2Res, V3Res, Recv, NetIf)
+ V1Res, V2Res, V3Res, Recv,
+ LocalEngineID, NetIf)
end;
true when MpModel =:= ?MP_V2C ->
?vtrace("send_trap_pdus -> v2c mp model",[]),
- ContextEngineId = snmp_framework_mib:get_engine_id(),
+ ContextEngineId = LocalEngineID,
case snmp_community_mib:vacm2community({SecName,
ContextEngineId,
ContextName},
@@ -664,12 +679,13 @@ send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
V1Res,
[{DestAddr, Community, Type}|V2Res],
- V3Res, Recv, NetIf);
+ V3Res, Recv, LocalEngineID, NetIf);
undefined ->
?vdebug("No community found for v2c dest: ~p",
[element(2, DestAddr)]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
- V1Res, V2Res, V3Res, Recv, NetIf)
+ V1Res, V2Res, V3Res, Recv,
+ LocalEngineID, NetIf)
end;
true when MpModel =:= ?MP_V3 ->
?vtrace("send_trap_pdus -> v3 mp model",[]),
@@ -678,18 +694,20 @@ send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
V1Res, V2Res,
[{DestAddr, MsgData, Type} | V3Res],
- Recv, NetIf);
+ Recv, LocalEngineID, NetIf);
true ->
?vlog("bad MpModel ~p for dest ~p",
[MpModel, element(2, DestAddr)]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
- V1Res, V2Res, V3Res, Recv, NetIf);
+ V1Res, V2Res, V3Res, Recv,
+ LocalEngineID, NetIf);
_ ->
?vlog("no access for dest: "
"~n ~p in target ~p",
[element(2, DestAddr), TargetName]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
- V1Res, V2Res, V3Res, Recv, NetIf)
+ V1Res, V2Res, V3Res, Recv,
+ LocalEngineID, NetIf)
end;
{discarded, Reason} ->
?vlog("mib view error ~p for"
@@ -697,10 +715,10 @@ send_trap_pdus([{DestAddr, TargetName, {MpModel, SecModel, SecName, SecLevel},
"~n SecName: ~w",
[Reason, element(2, DestAddr), SecName]),
send_trap_pdus(T, ContextName, {TrapRec, Vbs},
- V1Res, V2Res, V3Res, Recv, NetIf)
+ V1Res, V2Res, V3Res, Recv, LocalEngineID, NetIf)
end;
send_trap_pdus([], ContextName, {TrapRec, Vbs}, V1Res, V2Res, V3Res,
- Recv, NetIf) ->
+ Recv, LocalEngineID, NetIf) ->
SysUpTime = snmp_standard_mib:sys_up_time(),
?vdebug("send trap pdus with sysUpTime ~p", [SysUpTime]),
InformRecvs = get_inform_recvs(V2Res ++ V3Res),
@@ -708,7 +726,8 @@ send_trap_pdus([], ContextName, {TrapRec, Vbs}, V1Res, V2Res, V3Res,
deliver_recv(Recv, snmp_targets, InformTargets),
send_v1_trap(TrapRec, V1Res, Vbs, NetIf, SysUpTime),
send_v2_trap(TrapRec, V2Res, Vbs, Recv, NetIf, SysUpTime),
- send_v3_trap(TrapRec, V3Res, Vbs, Recv, NetIf, SysUpTime, ContextName).
+ send_v3_trap(TrapRec, V3Res, Vbs, Recv, LocalEngineID, NetIf,
+ SysUpTime, ContextName).
send_v1_trap(_TrapRec, [], _Vbs, _NetIf, _SysUpTime) ->
ok;
@@ -762,21 +781,25 @@ send_v2_trap(TrapRec, V2Res, Vbs, Recv, NetIf, SysUpTime) ->
do_send_v2_trap(TrapRecvs, IVbs, NetIf),
do_send_v2_inform(InformRecvs, IVbs, Recv, NetIf).
-send_v3_trap(_TrapRec, [], _Vbs, _Recv, _NetIf, _SysUpTime, _ContextName) ->
+send_v3_trap(_TrapRec, [], _Vbs, _Recv, _LocalEngineID,
+ _NetIf, _SysUpTime, _ContextName) ->
ok;
-send_v3_trap(TrapRec, V3Res, Vbs, Recv, NetIf, SysUpTime, ContextName) ->
+send_v3_trap(TrapRec, V3Res, Vbs, Recv, LocalEngineID,
+ NetIf, SysUpTime, ContextName) ->
?vdebug("prepare to send v3 trap",[]),
{_Oid, IVbs} = mk_v2_trap(TrapRec, Vbs, SysUpTime), % v2 refers to SMIv2;
- TrapRecvs = get_trap_recvs(V3Res), % same SMI for v3
+ TrapRecvs = get_trap_recvs(V3Res), % same SMI for v3
InformRecvs = get_inform_recvs(V3Res),
do_send_v3_trap(TrapRecvs, ContextName, IVbs, NetIf),
- do_send_v3_inform(InformRecvs, ContextName, IVbs, Recv, NetIf).
+ do_send_v3_inform(InformRecvs, ContextName, IVbs, Recv,
+ LocalEngineID, NetIf).
mk_v2_trap(#notification{oid = Oid}, Vbs, SysUpTime) ->
?vtrace("make v2 notification '~p'",[Oid]),
mk_v2_notif(Oid, Vbs, SysUpTime);
-mk_v2_trap(#trap{enterpriseoid = Enter, specificcode = Spec}, Vbs, SysUpTime) ->
+mk_v2_trap(#trap{enterpriseoid = Enter, specificcode = Spec},
+ Vbs, SysUpTime) ->
%% Use alg. in rfc1908 to map a v1 trap to a v2 trap
?vtrace("make v2 trap for '~p' with ~p",[Enter,Spec]),
{Oid,Enterp} =
@@ -845,16 +868,16 @@ do_send_v3_trap(Recvs, ContextName, Vbs, NetIf) ->
end, Recvs),
ok.
-do_send_v3_inform([], _ContextName, _Vbs, _Recv, _NetIf) ->
+do_send_v3_inform([], _ContextName, _Vbs, _Recv, _LocalEngineID, _NetIf) ->
ok;
-do_send_v3_inform(Recvs, ContextName, Vbs, Recv, NetIf) ->
+do_send_v3_inform(Recvs, ContextName, Vbs, Recv, LocalEngineID, NetIf) ->
lists:foreach(
fun({Addr, MsgData, Timeout, Retry}) ->
?vtrace("~n start inform sender to send v3 inform to ~p",
[Addr]),
proc_lib:spawn_link(?MODULE, init_v3_inform,
[{Addr, MsgData}, Timeout, Retry, Vbs,
- Recv, NetIf, ContextName,
+ Recv, LocalEngineID, NetIf, ContextName,
get(verbosity), get(sname)])
end,
Recvs).
@@ -874,7 +897,13 @@ init_v2_inform(Addr, Timeout, Retry, Vbs, Recv, NetIf, Community,V,S) ->
%% New process
-init_v3_inform(Addr, Timeout, Retry, Vbs, Recv, NetIf, ContextName,V,S) ->
+init_v3_inform(Addr, Timeout, Retry, Vbs, Recv, NetIf, ContextName, V, S) ->
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ init_v3_inform(Addr, Timeout, Retry, Vbs, Recv, LocalEngineID,
+ NetIf, ContextName, V, S).
+
+init_v3_inform(Addr, Timeout, Retry, Vbs, Recv, LocalEngineID,
+ NetIf, ContextName, V, S) ->
%% Make a new Inform for each recipient; they need unique
%% request-ids!
put(verbosity,V),
@@ -882,7 +911,7 @@ init_v3_inform(Addr, Timeout, Retry, Vbs, Recv, NetIf, ContextName,V,S) ->
?vdebug("~n starting with timeout = ~p and retry = ~p",
[Timeout,Retry]),
InformPdu = make_v2_notif_pdu(Vbs, 'inform-request'), % Yes, v2
- ContextEngineId = snmp_framework_mib:get_engine_id(),
+ ContextEngineId = LocalEngineID,
Msg = {send_pdu_req, 'version-3', InformPdu,
{v3, ContextEngineId, ContextName}, [Addr], self()},
?MODULE:send_inform(Addr, Timeout*10, Retry, Msg, Recv, NetIf).
diff --git a/lib/snmp/src/agent/snmpa_usm.erl b/lib/snmp/src/agent/snmpa_usm.erl
index 12a6b996ff..ae584bb3c1 100644
--- a/lib/snmp/src/agent/snmpa_usm.erl
+++ b/lib/snmp/src/agent/snmpa_usm.erl
@@ -19,8 +19,8 @@
-module(snmpa_usm).
-export([
- process_incoming_msg/4,
- generate_outgoing_msg/5,
+ process_incoming_msg/4, process_incoming_msg/5,
+ generate_outgoing_msg/5, generate_outgoing_msg/6,
generate_discovery_msg/4, generate_discovery_msg/5,
current_statsNotInTimeWindows_vb/0
]).
@@ -33,6 +33,7 @@
-define(VMODULE,"A-USM").
-include("snmp_verbosity.hrl").
+-include("snmpa_internal.hrl").
%%-----------------------------------------------------------------
@@ -58,7 +59,11 @@
%%-----------------------------------------------------------------
process_incoming_msg(Packet, Data, SecParams, SecLevel) ->
- TermDiscoEnabled = is_terminating_discovery_enabled(),
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ process_incoming_msg(Packet, Data, SecParams, SecLevel, LocalEngineID).
+
+process_incoming_msg(Packet, Data, SecParams, SecLevel, LocalEngineID) ->
+ TermDiscoEnabled = is_terminating_discovery_enabled(),
TermTriggerUsername = terminating_trigger_username(),
%% 3.2.1
?vtrace("process_incoming_msg -> check security parms: 3.2.1",[]),
@@ -124,7 +129,7 @@ process_incoming_msg(Packet, Data, SecParams, SecLevel) ->
"~n ~p",[UsmUser]),
DiscoOrPlain = authenticate_incoming(Packet,
UsmSecParams, UsmUser,
- SecLevel),
+ SecLevel, LocalEngineID),
%% 3.2.8
?vtrace("process_incoming_msg -> "
"decrypt scoped data: 3.2.8",[]),
@@ -166,7 +171,8 @@ process_discovery_msg(MsgAuthEngineID, Data, SecLevel) ->
end.
-authenticate_incoming(Packet, UsmSecParams, UsmUser, SecLevel) ->
+authenticate_incoming(Packet, UsmSecParams, UsmUser, SecLevel,
+ LocalEngineID) ->
%% 3.2.6
?vtrace("authenticate_incoming -> 3.2.6", []),
AuthProtocol = element(?usmUserAuthProtocol, UsmUser),
@@ -190,7 +196,8 @@ authenticate_incoming(Packet, UsmSecParams, UsmUser, SecLevel) ->
SecName,
MsgAuthEngineID,
MsgAuthEngineBoots,
- MsgAuthEngineTime) of
+ MsgAuthEngineTime,
+ LocalEngineID) of
discovery ->
discovery;
true ->
@@ -205,15 +212,15 @@ authenticate_incoming(Packet, UsmSecParams, UsmUser, SecLevel) ->
plain
end.
-authoritative(SecName, MsgAuthEngineBoots, MsgAuthEngineTime) ->
+authoritative(SecName, MsgAuthEngineBoots, MsgAuthEngineTime, LocalEngineID) ->
?vtrace("authoritative -> entry with"
"~n SecName: ~p"
"~n MsgAuthEngineBoots: ~p"
"~n MsgAuthEngineTime: ~p",
[SecName, MsgAuthEngineBoots, MsgAuthEngineTime]),
- SnmpEngineBoots = snmp_framework_mib:get_engine_boots(),
+ SnmpEngineBoots = get_local_engine_boots(LocalEngineID),
?vtrace("authoritative -> SnmpEngineBoots: ~p", [SnmpEngineBoots]),
- SnmpEngineTime = snmp_framework_mib:get_engine_time(),
+ SnmpEngineTime = get_local_engine_time(LocalEngineID),
?vtrace("authoritative -> SnmpEngineTime: ~p", [SnmpEngineTime]),
InTimeWindow =
if
@@ -320,11 +327,12 @@ non_authoritative(SecName,
end.
-is_auth(?usmNoAuthProtocol, _, _, _, SecName, _, _, _) -> % 3.2.5
+is_auth(?usmNoAuthProtocol, _, _, _, SecName, _, _, _, _) -> % 3.2.5
error(usmStatsUnsupportedSecLevels,
?usmStatsUnsupportedSecLevels_instance, SecName); % OTP-5464
is_auth(AuthProtocol, AuthKey, AuthParams, Packet, SecName,
- MsgAuthEngineID, MsgAuthEngineBoots, MsgAuthEngineTime) ->
+ MsgAuthEngineID, MsgAuthEngineBoots, MsgAuthEngineTime,
+ LocalEngineID) ->
TermDiscoEnabled = is_terminating_discovery_enabled(),
TermDiscoStage2 = terminating_discovery_stage2(),
IsAuth = auth_in(AuthProtocol, AuthKey, AuthParams, Packet),
@@ -334,7 +342,7 @@ is_auth(AuthProtocol, AuthKey, AuthParams, Packet, SecName,
%% 3.2.7
?vtrace("is_auth -> "
"retrieve EngineBoots and EngineTime: 3.2.7",[]),
- SnmpEngineID = snmp_framework_mib:get_engine_id(),
+ SnmpEngineID = LocalEngineID,
?vtrace("is_auth -> SnmpEngineID: ~p", [SnmpEngineID]),
case MsgAuthEngineID of
SnmpEngineID when ((MsgAuthEngineBoots =:= 0) andalso
@@ -351,12 +359,14 @@ is_auth(AuthProtocol, AuthKey, AuthParams, Packet, SecName,
%% This will *always* result in the manager *not*
%% beeing in timewindow
authoritative(SecName,
- MsgAuthEngineBoots, MsgAuthEngineTime);
+ MsgAuthEngineBoots, MsgAuthEngineTime,
+ LocalEngineID);
SnmpEngineID -> %% 3.2.7a
?vtrace("is_auth -> we are authoritative: 3.2.7a", []),
authoritative(SecName,
- MsgAuthEngineBoots, MsgAuthEngineTime);
+ MsgAuthEngineBoots, MsgAuthEngineTime,
+ LocalEngineID);
_ -> %% 3.2.7b - we're non-authoritative
?vtrace("is_auth -> we are non-authoritative: 3.2.7b",[]),
@@ -418,12 +428,19 @@ try_decrypt(?usmAesCfb128Protocol,
generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel) ->
+ LocalEngineID = ?DEFAULT_LOCAL_ENGINE_ID,
+ generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel,
+ LocalEngineID).
+
+generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel,
+ LocalEngineID) ->
%% 3.1.1
?vtrace("generate_outgoing_msg -> [3.1.1] entry with"
- "~n SecEngineID: ~p"
- "~n SecName: ~p"
- "~n SecLevel: ~w",
- [SecEngineID, SecName, SecLevel]),
+ "~n SecEngineID: ~p"
+ "~n SecName: ~p"
+ "~n SecLevel: ~w"
+ "~n LocalEngineID: ~p",
+ [SecEngineID, SecName, SecLevel, LocalEngineID]),
{UserName, AuthProtocol, PrivProtocol, AuthKey, PrivKey} =
case SecData of
[] -> % 3.1.1b
@@ -439,7 +456,7 @@ generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel) ->
element(?usmUserPrivKey, User)};
{_, Name,_,_,_,_,_,_,_,_,_,_,_, RowStatus,_,_} ->
?vdebug("generate_outgoing_msg -> "
- "found user ~p with wrong row status: ~p",
+ "found not active user ~p: ~p",
[Name, RowStatus]),
error(unknownSecurityName);
_ ->
@@ -460,7 +477,7 @@ generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel) ->
ScopedPduBytes = Message#message.data,
{ScopedPduData, MsgPrivParams} =
encrypt(ScopedPduBytes, PrivProtocol, PrivKey, SecLevel),
- SnmpEngineID = snmp_framework_mib:get_engine_id(),
+ SnmpEngineID = LocalEngineID,
?vtrace("generate_outgoing_msg -> SnmpEngineID: ~p [3.1.6]",
[SnmpEngineID]),
%% 3.1.6
@@ -474,8 +491,8 @@ generate_outgoing_msg(Message, SecEngineID, SecName, SecData, SecLevel) ->
{get_engine_boots(SecEngineID),
get_engine_time(SecEngineID)};
_ ->
- {snmp_framework_mib:get_engine_boots(),
- snmp_framework_mib:get_engine_time()}
+ {get_local_engine_boots(SnmpEngineID),
+ get_local_engine_time(SnmpEngineID)}
end,
%% 3.1.5 - 3.1.7
?vtrace("generate_outgoing_msg -> [3.1.5 - 3.1.7]",[]),
@@ -560,11 +577,15 @@ encrypt(Data, PrivProtocol, PrivKey, SecLevel) ->
?vtrace("encrypt -> 3.1.4a",[]),
case (catch try_encrypt(PrivProtocol, PrivKey, Data)) of
{ok, ScopedPduData, MsgPrivParams} ->
- ?vtrace("encrypt -> encode tag",[]),
+ ?vtrace("encrypt -> encrypted - now encode tag",[]),
{snmp_pdus:enc_oct_str_tag(ScopedPduData), MsgPrivParams};
{error, Reason} ->
+ ?vtrace("encrypt -> error: "
+ "~n Reason: ~p", [Reason]),
error(Reason);
- _Error ->
+ Error ->
+ ?vtrace("encrypt -> other: "
+ "~n Error: ~p", [Error]),
error(encryptionError)
end
end.
@@ -677,6 +698,19 @@ current_statsNotInTimeWindows_vb() ->
value = get_counter(usmStatsNotInTimeWindows)}.
+
+%%-----------------------------------------------------------------
+%% Future profing...
+%%-----------------------------------------------------------------
+
+get_local_engine_boots(_LocalEngineID) ->
+ snmp_framework_mib:get_engine_boots().
+
+get_local_engine_time(_LocalEngineID) ->
+ snmp_framework_mib:get_engine_time().
+
+
+
%%-----------------------------------------------------------------
%% We cache the local values of all non-auth engines we know.
%% Keep the values in the snmp_agent_table.
diff --git a/lib/snmp/src/app/snmp.appup.src b/lib/snmp/src/app/snmp.appup.src
index 56b8c593a8..2bd26e11db 100644
--- a/lib/snmp/src/app/snmp.appup.src
+++ b/lib/snmp/src/app/snmp.appup.src
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -22,33 +22,103 @@
%% ----- U p g r a d e -------------------------------------------------------
[
+ {"4.17",
+ [
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16.2",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16.1",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_net_if, soft, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"4.15",
[
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_config, soft_purge, soft_purge, []},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, upgrade_from_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
- {update, snmpa_net_if, {advanced, upgrade_from_pre_4_16},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmp_log, snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_net_if, {advanced, upgrade_from_pre_4_16},
soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
- {update, snmpm_config, soft, soft_purge, soft_purge, []}
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_net_if, {advanced, upgrade_from_pre_4_16},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
+ {update, snmpm_config, soft, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
]
},
{"4.14",
[
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_config, soft_purge, soft_purge, []},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, upgrade_from_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmp_log, snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
{update, snmpa_net_if, {advanced, upgrade_from_pre_4_16},
- soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ soft_purge, soft_purge, [snmp_log, snmpa_agent]},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
{load_module, snmpm_user, soft_purge, soft_purge, []},
{load_module, snmpm_user_default, soft_purge, soft_purge, [snmpm_user]},
+ {update, snmpm_net_if, {advanced, upgrade_from_pre_4_16},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
{update, snmpm_config, soft, soft_purge, soft_purge, []},
{update, snmpm_server, soft, soft_purge, soft_purge,
[snmpm_user_default]}
@@ -56,18 +126,26 @@
},
{"4.13.5",
[
- {load_module, snmpa_mib_data, soft_purge, soft_purge, []},
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_config, soft_purge, soft_purge, []},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, upgrade_from_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmp_log, snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mib_data, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
{update, snmpa_net_if, {advanced, upgrade_from_pre_4_16},
soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, [snmpa_mib_data]},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
{load_module, snmpm_user, soft_purge, soft_purge, []},
{load_module, snmpm_user_default, soft_purge, soft_purge, [snmpm_user]},
+ {update, snmpm_net_if, {advanced, upgrade_from_pre_4_14},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
{update, snmpm_config, soft, soft_purge, soft_purge, []},
{update, snmpm_server, soft, soft_purge, soft_purge, [snmpm_user_default]},
{add_module, snmpm_net_if_filter},
@@ -79,33 +157,104 @@
%% ------D o w n g r a d e ---------------------------------------------------
[
+ {"4.17",
+ [
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16.2",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16.1",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
+ ]
+ },
+ {"4.16",
+ [
+ {load_module, snmp_log, soft_purge, soft_purge, []},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpa_net_if, soft_purge, soft_purge, []},
+
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_net_if, soft, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
+ ]
+ },
{"4.15",
[
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_config, soft_purge, soft_purge, []},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, downgrade_to_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
- {update, snmpa_net_if, {advanced, downgrade_to_pre_4_16},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent, snmp_log]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_net_if, {advanced, downgrade_to_pre_4_16},
soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
- {update, snmpm_config, soft, soft_purge, soft_purge, []}
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
+ {update, snmpm_net_if, {advanced, downgrade_to_pre_4_16},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
+ {update, snmpm_config, soft, soft_purge, soft_purge, []},
+ {update, snmpm_server, soft, soft_purge, soft_purge, []}
]
},
{"4.14",
[
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_config, soft_purge, soft_purge, []},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, downgrade_to_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
- {update, snmpa_net_if, {advanced, downgrade_to_pre_4_16},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent, snmp_log]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
+ {update, snmpa_net_if, {advanced, downgrade_to_pre_4_16},
soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, []},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
{load_module, snmpm_user, soft_purge, soft_purge, []},
{load_module, snmpm_user_default, soft_purge, soft_purge, [snmpm_user]},
+ {update, snmpm_net_if, {advanced, downgrade_to_pre_4_16},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
{update, snmpm_config, soft, soft_purge, soft_purge, []},
{update, snmpm_server, soft, soft_purge, soft_purge,
[snmpm_user_default]}
@@ -113,20 +262,29 @@
},
{"4.13.5",
[
- {load_module, snmpa_mib_data, soft_purge, soft_purge, []},
{load_module, snmp_config, soft_purge, soft_purge, []},
- {load_module, snmpa, soft_purge, soft_purge, [snmp_log]},
{load_module, snmp_log, soft_purge, soft_purge, []},
- {update, snmpm_net_if, {advanced, downgrade_to_pre_4_16},
- soft_purge, soft_purge, [snmpm_config, snmp_log]},
+ {load_module, snmp_pdus, soft_purge, soft_purge, []},
+ {load_module, snmp_usm, soft_purge, soft_purge, []},
+
+ {load_module, snmpa, soft_purge, soft_purge, [snmp_log, snmpa_agent]},
+ {load_module, snmpa_general_db, soft_purge, soft_purge, []},
+ {load_module, snmpa_mib_data, soft_purge, soft_purge, []},
+ {load_module, snmpa_mpd, soft_purge, soft_purge, [snmpa_usm]},
+ {load_module, snmpa_usm, soft_purge, soft_purge, [snmp_usm]},
{update, snmpa_net_if, {advanced, downgrade_to_pre_4_16},
soft_purge, soft_purge, [snmpa_agent, snmp_log]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, []},
+ {update, snmpa_mib, soft, soft_purge, soft_purge, [snmpa_mib_data]},
+ {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mib]},
+ {load_module, snmpm_mpd, soft_purge, soft_purge, []},
{load_module, snmpm_user, soft_purge, soft_purge, []},
{load_module, snmpm_user_default, soft_purge, soft_purge, [snmpm_user]},
+ {update, snmpm_net_if, {advanced, downgrade_to_pre_4_14},
+ soft_purge, soft_purge, [snmpm_config, snmp_log]},
{update, snmpm_config, soft, soft_purge, soft_purge, []},
{update, snmpm_server, soft, soft_purge, soft_purge, [snmpm_user_default]},
+
{remove, {snmpm_net_if_filter, soft_purge, brutal_purge}},
{remove, {snmpm_network_interface_filter, soft_purge, brutal_purge}}
]
diff --git a/lib/snmp/src/compile/snmpc.erl b/lib/snmp/src/compile/snmpc.erl
index 8a1f15d4a4..a7f2cdc2bc 100644
--- a/lib/snmp/src/compile/snmpc.erl
+++ b/lib/snmp/src/compile/snmpc.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(snmpc).
@@ -34,6 +34,7 @@
-include("snmpc.hrl").
-include("snmpc_lib.hrl").
+-record(dldata, {deprecated, relaxed_row_name_assign_check}).
look_at(Mib) ->
io:format("~p ~n", [snmpc_lib:look_at(Mib)]).
@@ -114,6 +115,7 @@ compile(FileName) ->
%% module_identity
%% {module, string()}
%% no_defs
+%% relaxed_row_name_assign_check
%% (hidden) {verbosity, trace|debug|log|info|silence} silence
%% (hidden) version
%% (hidden) options
@@ -201,6 +203,8 @@ get_options([imports|Opts], Formats, Args) ->
get_options(Opts, ["~n imports"|Formats], Args);
get_options([module_identity|Opts], Formats, Args) ->
get_options(Opts, ["~n module_identity"|Formats], Args);
+get_options([relaxed_row_name_assign_check|Opts], Formats, Args) ->
+ get_options(Opts, ["~n relaxed_row_name_assign_check"|Formats], Args);
get_options([_|Opts], Formats, Args) ->
get_options(Opts, Formats, Args).
@@ -284,6 +288,8 @@ check_options([imports| T]) ->
check_options(T);
check_options([module_identity| T]) ->
check_options(T);
+check_options([relaxed_row_name_assign_check| T]) ->
+ check_options(T);
check_options([{module, M} | T]) when is_atom(M) ->
check_options(T);
check_options([no_defs| T]) ->
@@ -309,6 +315,9 @@ get_description(Options) ->
get_reference(Options) ->
get_bool_option(reference, Options).
+get_relaxed_row_name_assign_check(Options) ->
+ lists:member(relaxed_row_name_assign_check, Options).
+
get_bool_option(Option, Options) ->
case lists:member(Option, Options) of
false ->
@@ -406,8 +415,12 @@ compile_parsed_data(#pdata{mib_name = MibName,
defs = Definitions}) ->
snmpc_lib:import(Imports),
update_imports(Imports),
- Deprecated = get_deprecated(get(options)),
- definitions_loop(Definitions, Deprecated),
+ Opts = get(options),
+ Deprecated = get_deprecated(Opts),
+ RelChk = get_relaxed_row_name_assign_check(Opts),
+ Data = #dldata{deprecated = Deprecated,
+ relaxed_row_name_assign_check = RelChk},
+ definitions_loop(Definitions, Data),
MibName.
update_imports(Imports) ->
@@ -436,21 +449,21 @@ update_status(Name, Status) ->
%% A deprecated object
definitions_loop([{#mc_object_type{name = ObjName, status = deprecated},
Line}|T],
- false) ->
+ #dldata{deprecated = false} = Data) ->
%% May be implemented but the compiler chooses not to.
?vinfo2("object_type ~w is deprecated => ignored", [ObjName], Line),
update_status(ObjName, deprecated),
- definitions_loop(T, false);
+ definitions_loop(T, Data);
%% A obsolete object
definitions_loop([{#mc_object_type{name = ObjName, status = obsolete},
Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("object_type ~w is obsolete => ignored", [ObjName], Line),
%% No need to implement a obsolete object
update_status(ObjName, obsolete),
ensure_macro_imported('OBJECT-TYPE', Line),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
%% Defining a table
definitions_loop([{#mc_object_type{name = NameOfTable,
@@ -475,7 +488,7 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
{#mc_sequence{name = SeqName,
fields = FieldList},
Sline}|ColsEtc],
- Deprecated) ->
+ Data) ->
?vlog("defloop -> "
"[object_type(sequence_of),object_type(type,[1]),sequence]:"
"~n NameOfTable: ~p"
@@ -529,7 +542,89 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
TableME#me{assocList=[{table_info,
TableInfo} | make_reference(Ref)]} |
ColMEs]),
- definitions_loop(RestObjs, Deprecated);
+ definitions_loop(RestObjs, Data);
+
+definitions_loop([{#mc_object_type{name = NameOfTable,
+ syntax = {{sequence_of, SeqName}, _},
+ max_access = Taccess,
+ kind = Kind,
+ status = Tstatus,
+ description = Desc1,
+ units = Tunits,
+ reference = Ref,
+ name_assign = Tindex},
+ Tline},
+ {#mc_object_type{name = NameOfEntry,
+ syntax = {{type, SeqName}, TEline},
+ max_access = 'not-accessible',
+ kind = {table_entry, IndexingInfo},
+ status = Estatus,
+ description = Desc2,
+ units = Eunits,
+ name_assign = {NameOfTable,[Idx]} = BadOID},
+ Eline},
+ {#mc_sequence{name = SeqName,
+ fields = FieldList},
+ Sline}|ColsEtc],
+ #dldata{relaxed_row_name_assign_check = true} = Data)
+ when is_integer(Idx) andalso (Idx > 1) ->
+ ?vlog("defloop -> "
+ "[object_type(sequence_of),object_type(type,[~w]),sequence]:"
+ "~n NameOfTable: ~p"
+ "~n SeqName: ~p"
+ "~n Taccess: ~p"
+ "~n Kind: ~p"
+ "~n Tstatus: ~p"
+ "~n Tindex: ~p"
+ "~n Tunits: ~p"
+ "~n Tline: ~p"
+ "~n NameOfEntry: ~p"
+ "~n TEline: ~p"
+ "~n IndexingInfo: ~p"
+ "~n Estatus: ~p"
+ "~n Eunits: ~p"
+ "~n Ref: ~p"
+ "~n Eline: ~p"
+ "~n FieldList: ~p"
+ "~n Sline: ~p",
+ [Idx,
+ NameOfTable,SeqName,Taccess,Kind,Tstatus,
+ Tindex,Tunits,Tline,
+ NameOfEntry,TEline,IndexingInfo,Estatus,Eunits,Ref,Eline,
+ FieldList,Sline]),
+ update_status(NameOfTable, Tstatus),
+ update_status(NameOfEntry, Estatus),
+ update_status(SeqName, undefined),
+ ensure_macro_imported('OBJECT-TYPE', Tline),
+ ?vwarning2("Bad TableEntry OID definition (~w)",
+ [BadOID], Eline),
+ test_table(NameOfTable,Taccess,Kind,Tindex,Tline),
+ {Tfather,Tsubindex} = Tindex,
+ snmpc_lib:register_oid(Tline,NameOfTable,Tfather,Tsubindex),
+ Description1 = make_description(Desc1),
+ TableME = #me{aliasname = NameOfTable,
+ entrytype = table,
+ access = 'not-accessible',
+ description = Description1,
+ units = Tunits},
+ snmpc_lib:register_oid(TEline,NameOfEntry,NameOfTable,[Idx]),
+ Description2 = make_description(Desc2),
+ TableEntryME = #me{aliasname = NameOfEntry,
+ entrytype = table_entry,
+ assocList = [{table_entry_with_sequence, SeqName}],
+ access = 'not-accessible',
+ description = Description2,
+ units = Eunits},
+ {ColMEs, RestObjs} =
+ define_cols(ColsEtc, 1, FieldList, NameOfEntry, NameOfTable, []),
+ TableInfo = snmpc_lib:make_table_info(Eline, NameOfTable,
+ IndexingInfo, ColMEs),
+ snmpc_lib:add_cdata(#cdata.mes,
+ [TableEntryME,
+ TableME#me{assocList=[{table_info,
+ TableInfo} | make_reference(Ref)]} |
+ ColMEs]),
+ definitions_loop(RestObjs, Data);
definitions_loop([{#mc_object_type{name = NameOfTable,
syntax = {{sequence_of, SeqName},_},
@@ -550,7 +645,7 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
name_assign = BadOID}, Eline},
{#mc_sequence{name = SeqName,
fields = FieldList}, Sline}|ColsEtc],
- Deprecated) ->
+ Data) ->
?vlog("defloop -> "
"[object_type(sequence_of),object_type(type),sequence(fieldList)]:"
"~n NameOfTable: ~p"
@@ -605,13 +700,13 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
TableME#me{assocList=[{table_info,
TableInfo} | make_reference(Ref)]} |
ColMEs]),
- definitions_loop(RestObjs, Deprecated);
+ definitions_loop(RestObjs, Data);
definitions_loop([{#mc_new_type{name = NewTypeName,
macro = Macro,
syntax = OldType,
display_hint = DisplayHint},Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> new_type:"
"~n Macro: ~p"
"~n NewTypeName: ~p"
@@ -632,7 +727,7 @@ definitions_loop([{#mc_new_type{name = NewTypeName,
imported = false,
display_hint = DisplayHint}])
end,
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
%% Plain variable
definitions_loop([{#mc_object_type{name = NewVarName,
@@ -643,7 +738,7 @@ definitions_loop([{#mc_object_type{name = NewVarName,
description = Desc1,
units = Units,
name_assign = {Parent,SubIndex}},Line} |T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> object_type (variable):"
"~n NewVarName: ~p"
"~n Type: ~p"
@@ -672,7 +767,7 @@ definitions_loop([{#mc_object_type{name = NewVarName,
VI = snmpc_lib:make_variable_info(NewME2),
snmpc_lib:add_cdata(#cdata.mes,
[NewME2#me{assocList = [{variable_info, VI}]}]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_module_identity{name = NewVarName,
last_updated = LU,
@@ -682,7 +777,7 @@ definitions_loop([{#mc_module_identity{name = NewVarName,
revisions = Revs0,
name_assign = {Parent, SubIndex}},
Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> module-identity: "
"~n NewVarName: ~p"
"~n LU: ~p"
@@ -706,13 +801,13 @@ definitions_loop([{#mc_module_identity{name = NewVarName,
snmpc_lib:add_cdata(
#cdata.mes,
[snmpc_lib:makeInternalNode2(false, NewVarName)]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_internal{name = NewVarName,
macro = Macro,
parent = Parent,
sub_index = SubIndex},Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> internal:"
"~n NewVarName: ~p"
"~n Macro: ~p"
@@ -724,7 +819,7 @@ definitions_loop([{#mc_internal{name = NewVarName,
snmpc_lib:add_cdata(
#cdata.mes,
[snmpc_lib:makeInternalNode2(false, NewVarName)]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
%% A trap message
definitions_loop([{#mc_trap{name = TrapName,
@@ -732,7 +827,7 @@ definitions_loop([{#mc_trap{name = TrapName,
vars = Variables,
description = Desc1,
num = SpecificCode}, Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> trap:"
"~n TrapName: ~p"
"~n EnterPrise: ~p"
@@ -755,7 +850,7 @@ definitions_loop([{#mc_trap{name = TrapName,
lists:foreach(fun(Trap2) -> snmpc_lib:check_trap(Trap2, Trap, Line) end,
CDATA#cdata.traps),
snmpc_lib:add_cdata(#cdata.traps, [Trap]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_object_type{name = NameOfEntry,
syntax = Type,
@@ -763,7 +858,7 @@ definitions_loop([{#mc_object_type{name = NameOfEntry,
kind = {table_entry, Index},
status = Estatus,
name_assign = SubIndex},Eline}|T],
- Deprecated) ->
+ Data) ->
?vlog("defloop -> object_type (table_entry):"
"~n NameOfEntry: ~p"
"~n Type: ~p"
@@ -777,7 +872,7 @@ definitions_loop([{#mc_object_type{name = NameOfEntry,
update_status(NameOfEntry, Estatus),
snmpc_lib:print_error("Misplaced TableEntry definition (~w)",
[NameOfEntry], Eline),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_notification{name = TrapName,
status = deprecated}, Line}|T],
@@ -790,19 +885,19 @@ definitions_loop([{#mc_notification{name = TrapName,
definitions_loop([{#mc_notification{name = TrapName,
status = obsolete}, Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> notification ~w is obsolete => ignored",
[TrapName], Line),
update_status(TrapName, obsolete),
ensure_macro_imported('NOTIFICATION-TYPE', Line),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_notification{name = TrapName,
vars = Variables,
status = Status,
description = Desc,
name_assign = {Parent, SubIndex}},Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> notification:"
"~n TrapName: ~p"
"~n Variables: ~p"
@@ -824,13 +919,13 @@ definitions_loop([{#mc_notification{name = TrapName,
oidobjects = Variables},
snmpc_lib:check_notification(Notif, Line, CDATA#cdata.traps),
snmpc_lib:add_cdata(#cdata.traps, [Notif]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
-definitions_loop([{#mc_module_compliance{name = Name},Line}|T], Deprecated) ->
+definitions_loop([{#mc_module_compliance{name = Name},Line}|T], Data) ->
?vlog2("defloop -> module_compliance:"
"~n Name: ~p", [Name], Line),
ensure_macro_imported('MODULE-COMPLIANCE', Line),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_object_group{name = Name,
objects = GroupObjects,
@@ -838,7 +933,7 @@ definitions_loop([{#mc_object_group{name = Name,
description = Desc,
reference = Ref,
name_assign = {Parent,SubIndex}}, Line}|T],
- Deprecated) ->
+ Data) ->
?vlog2("defloop -> object_group ~p:"
"~n GroupObjects: ~p"
"~n Status: ~p"
@@ -873,7 +968,7 @@ definitions_loop([{#mc_object_group{name = Name,
{objects, GroupObjects}]},
snmpc_lib:add_cdata(#cdata.mes, [NewME]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_notification_group{name = Name,
objects = GroupObjects,
@@ -882,7 +977,7 @@ definitions_loop([{#mc_notification_group{name = Name,
reference = Ref,
name_assign = {Parent,SubIndex}},
Line}
- |T], Deprecated) ->
+ |T], Data) ->
?vlog2("defloop -> notification_group ~p:"
"~n GroupObjects: ~p"
"~n Status: ~p"
@@ -918,13 +1013,13 @@ definitions_loop([{#mc_notification_group{name = Name,
{objects, GroupObjects}]},
snmpc_lib:add_cdata(#cdata.mes, [NewME]),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_object_type{name = NameOfTable,
syntax = {{sequence_of, SeqName},_},
status = Tstatus},Tline},
Entry, Seq|T],
- Deprecated) ->
+ Data) ->
?vlog("defloop -> object_type (sequence_of): "
"~n NameOfTable: ~p"
"~n SeqName: ~p"
@@ -956,12 +1051,12 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
"Invalid TableEntry '~p' (check STATUS, Sequence name, Oid)",
[safe_elem(1,safe_elem(2,Entry))],Tline)
end,
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_object_type{name = NameOfTable,
syntax = {{sequence_of, SeqName},_},
status = Tstatus},Tline}|T],
- Deprecated) ->
+ Data) ->
?vlog("defloop -> object_type (sequence_of):"
"~n object_type: ~p"
"~n sequence_of: ~p"
@@ -969,24 +1064,24 @@ definitions_loop([{#mc_object_type{name = NameOfTable,
update_status(NameOfTable, Tstatus),
snmpc_lib:print_error("Invalid statements following table ~p.",
[NameOfTable],Tline),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
definitions_loop([{#mc_sequence{name = SeqName,
fields = _FieldList},Line}|T],
- Deprecated) ->
+ Data) ->
?vwarning2("Unexpected SEQUENCE ~w => ignoring", [SeqName], Line),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
-definitions_loop([{Obj,Line}|T], Deprecated) ->
+definitions_loop([{Obj,Line}|T], Data) ->
?vinfo2("defloop -> unknown error"
"~n Obj: ~p", [Obj], Line),
snmpc_lib:print_error("Unknown Error in MIB. "
"Can't describe the error better than this: ~999p ignored."
" Please send a trouble report to [email protected].",
[Obj], Line),
- definitions_loop(T, Deprecated);
+ definitions_loop(T, Data);
-definitions_loop([], _Deprecated) ->
+definitions_loop([], _Data) ->
?vlog("defloop -> done", []),
ok.
diff --git a/lib/snmp/src/compile/snmpc_lib.erl b/lib/snmp/src/compile/snmpc_lib.erl
index b7e84e7d6b..4e5bc69f81 100644
--- a/lib/snmp/src/compile/snmpc_lib.erl
+++ b/lib/snmp/src/compile/snmpc_lib.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -125,7 +125,8 @@ test_kibbles(Kibbles,Line) ->
test_kibbles2([],_,_) ->
ok;
-test_kibbles2([{_KibbleName,BitNo}|Ks],BitNo,Line) ->
+test_kibbles2([{_KibbleName,BitNo}|Ks],ExpectBitNo,Line)
+ when BitNo >= ExpectBitNo ->
test_kibbles2(Ks,BitNo+1,Line);
test_kibbles2([{_KibbleName,BitNo}|_Ks],ExpectBitNo,Line) ->
print_error("Expected kibble no ~p but got ~p.",[ExpectBitNo,BitNo],Line).
diff --git a/lib/snmp/src/manager/snmpm_mpd.erl b/lib/snmp/src/manager/snmpm_mpd.erl
index d76ad20051..7712370d28 100644
--- a/lib/snmp/src/manager/snmpm_mpd.erl
+++ b/lib/snmp/src/manager/snmpm_mpd.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -257,11 +257,11 @@ process_v3_msg(NoteStore, Msg, Hdr, Data, Addr, Port, Log) ->
end,
?vlog("7.2.7"
- "~n ContextEngineID: \"~s\" "
+ "~n ContextEngineID: ~p "
"~n context: \"~s\" ",
[CtxEngineID, CtxName]),
if
- SecLevel == 3 -> % encrypted message - log decrypted pdu
+ SecLevel =:= 3 -> % encrypted message - log decrypted pdu
Log({Hdr, ScopedPDUBytes});
true -> % otherwise, log binary
Log(Msg)
@@ -338,7 +338,8 @@ process_v3_msg(NoteStore, Msg, Hdr, Data, Addr, Port, Log) ->
SnmpEngineID = get_engine_id(),
case SecEngineID of
SnmpEngineID -> % 7.2.13.b
- ?vtrace("valid securityEngineID: ~p", [SecEngineID]),
+ ?vtrace("7.2.13d - valid securityEngineID: ~p",
+ [SecEngineID]),
%% 4.2.2.1.1 - we don't handle proxys yet => we only
%% handle CtxEngineID to ourselves
%% Check that we actually know of an agent with this
@@ -353,7 +354,9 @@ process_v3_msg(NoteStore, Msg, Hdr, Data, Addr, Port, Log) ->
{MsgID, MsgSecModel, SecName, SecLevel,
CtxEngineID, CtxName, SecData},
{ok, 'version-3', PDU, PduMMS, ACMData};
- _ ->
+ UnknownEngineID ->
+ ?vtrace("4.2.2.1.2 - UnknownEngineId: ~p",
+ [UnknownEngineID]),
%% 4.2.2.1.2
NIsReportable = snmp_misc:is_reportable_pdu(Type),
Val = inc(snmpUnknownPDUHandlers),
@@ -377,7 +380,8 @@ process_v3_msg(NoteStore, Msg, Hdr, Data, Addr, Port, Log) ->
end
end;
_ -> % 7.2.13.a
- ?vinfo("invalid securityEngineID: ~p",[SecEngineID]),
+ ?vinfo("7.2.13a - invalid securityEngineID: ~p",
+ [SecEngineID]),
discard({badSecurityEngineID, SecEngineID})
end;
diff --git a/lib/snmp/src/manager/snmpm_net_if.erl b/lib/snmp/src/manager/snmpm_net_if.erl
index ad39157721..07156dacd9 100644
--- a/lib/snmp/src/manager/snmpm_net_if.erl
+++ b/lib/snmp/src/manager/snmpm_net_if.erl
@@ -441,7 +441,7 @@ handle_info(Info, State) ->
%% Returns: any (ignored by gen_server)
%%--------------------------------------------------------------------
terminate(Reason, #state{log = Log, irgc = IrGcRef}) ->
- ?vdebug("terminate: ~p",[Reason]),
+ ?vdebug("terminate: ~p", [Reason]),
irgc_stop(IrGcRef),
%% Close logs
do_close_log(Log),
@@ -462,22 +462,59 @@ do_close_log(_) ->
%% Returns: {ok, NewState}
%%----------------------------------------------------------------------
+code_change({down, _Vsn}, OldState, downgrade_to_pre_4_14) ->
+ ?d("code_change(down, downgrade_to_pre_4_14) -> entry with"
+ "~n OldState: ~p", [OldState]),
+ #state{server = Server,
+ note_store = NoteStore,
+ sock = Sock,
+ mpd_state = MpdState,
+ log = {OldLog, Type},
+ irb = IRB,
+ irgc = IRGC} = OldState,
+ NewLog = snmp_log:downgrade(OldLog),
+ State =
+ {state, Server, NoteStore, Sock, MpdState, {NewLog, Type}, IRB, IRGC},
+ {ok, State};
+
code_change({down, _Vsn}, OldState, downgrade_to_pre_4_16) ->
- ?d("code_change(down) -> entry", []),
+ ?d("code_change(down, downgrade_to_pre_4_16) -> entry with"
+ "~n OldState: ~p", [OldState]),
{OldLog, Type} = OldState#state.log,
NewLog = snmp_log:downgrade(OldLog),
State = OldState#state{log = {NewLog, Type}},
{ok, State};
% upgrade
+code_change(_Vsn, OldState, upgrade_from_pre_4_14) ->
+ ?d("code_change(up, upgrade_from_pre_4_14) -> entry with"
+ "~n OldState: ~p", [OldState]),
+ {state, Server, NoteStore, Sock, MpdState, {OldLog, Type}, IRB, IRGC} =
+ OldState,
+ NewLog = snmp_log:upgrade(OldLog),
+ State = #state{server = Server,
+ note_store = NoteStore,
+ sock = Sock,
+ mpd_state = MpdState,
+ log = {NewLog, Type},
+ irb = IRB,
+ irgc = IRGC,
+ filter = ?DEFAULT_FILTER_MODULE},
+ {ok, State};
+
code_change(_Vsn, OldState, upgrade_from_pre_4_16) ->
- ?d("code_change(up) -> entry", []),
+ ?d("code_change(up, upgrade_from_pre_4_16) -> entry with"
+ "~n OldState: ~p", [OldState]),
{OldLog, Type} = OldState#state.log,
NewLog = snmp_log:upgrade(OldLog),
State = OldState#state{log = {NewLog, Type}},
{ok, State};
code_change(_Vsn, State, _Extra) ->
+ ?d("code_change -> entry with"
+ "~n Vsn: ~p"
+ "~n State: ~p"
+ "~n Extra: ~p", [_Vsn, State, _Extra]),
{ok, State}.
diff --git a/lib/snmp/src/manager/snmpm_server.erl b/lib/snmp/src/manager/snmpm_server.erl
index 30aacc0ec3..d64b5b1d53 100644
--- a/lib/snmp/src/manager/snmpm_server.erl
+++ b/lib/snmp/src/manager/snmpm_server.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -2804,16 +2804,16 @@ agent_data(TargetName, CtxName) ->
agent_data(TargetName, CtxName, Config) ->
case snmpm_config:agent_info(TargetName, all) of
{ok, Info} ->
- {value, {_, Version}} = lists:keysearch(version, 1, Info),
+ Version = agent_data_item(version, Info),
MsgData =
case Version of
v3 ->
DefSecModel = agent_data_item(sec_model, Info),
DefSecName = agent_data_item(sec_name, Info),
DefSecLevel = agent_data_item(sec_level, Info),
-
+
EngineId = agent_data_item(engine_id, Info),
-
+
SecModel = agent_data_item(sec_model,
Config,
DefSecModel),
@@ -2829,7 +2829,7 @@ agent_data(TargetName, CtxName, Config) ->
_ ->
DefComm = agent_data_item(community, Info),
DefSecModel = agent_data_item(sec_model, Info),
-
+
Comm = agent_data_item(community,
Config,
DefComm),
@@ -2848,8 +2848,12 @@ agent_data(TargetName, CtxName, Config) ->
end.
agent_data_item(Item, Info) ->
- {value, {_, Val}} = lists:keysearch(Item, 1, Info),
- Val.
+ case lists:keysearch(Item, 1, Info) of
+ {value, {_, Val}} ->
+ Val;
+ false ->
+ throw({error, {not_found, Item, Info}})
+ end.
agent_data_item(Item, Info, Default) ->
case lists:keysearch(Item, 1, Info) of
diff --git a/lib/snmp/src/misc/snmp_pdus.erl b/lib/snmp/src/misc/snmp_pdus.erl
index 6c80fc3876..dc8900c8cd 100644
--- a/lib/snmp/src/misc/snmp_pdus.erl
+++ b/lib/snmp/src/misc/snmp_pdus.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -38,7 +38,10 @@
dec_usm_security_parameters/1,
strip_encrypted_scoped_pdu_data/1,
octet_str_to_bits/1, bits_to_str/1,
- get_encoded_length/1]).
+ get_encoded_length/1,
+ enc_value/2, dec_value/1]).
+
+%% -compile(export_all).
%% Returns the number of octets required to encode Length.
get_encoded_length(Length) ->
@@ -290,12 +293,18 @@ dec_value([68 | Bytes]) ->
{Value, Rest} = dec_oct_str_notag(Bytes),
{{'Opaque', Value}, Rest};
dec_value([70 | Bytes]) ->
+ %% Counter64 is an unsigned 64 but is actually encoded as
+ %% a signed integer 64.
{Value, Rest} = dec_integer_notag(Bytes),
- if Value >= 0, Value =< 18446744073709551615 ->
- {{'Counter64', Value}, Rest};
- true ->
- exit({error, {bad_counter64, Value}})
- end;
+ Value2 =
+ if
+ (Value >= 0) andalso (Value < 16#8000000000000000) ->
+ Value;
+ (Value < 0) ->
+ 18446744073709551615 + Value + 1;
+ true ->
+ exit({error, {bad_counter64, Value}}) end,
+ {{'Counter64', Value2}, Rest};
dec_value([128,0|T]) ->
{{'NULL', noSuchObject}, T};
dec_value([129,0|T]) ->
@@ -633,6 +642,21 @@ enc_value(_Type, endOfMibView) ->
[130,0];
enc_value('NULL', _Val) ->
[5,0];
+enc_value('Counter64', Val) ->
+ Val2 =
+ if
+ Val > 16#ffffffffffffffff ->
+ exit({error, {bad_counter64, Val}});
+ Val >= 16#8000000000000000 ->
+ (Val band 16#7fffffffffffffff) - 16#8000000000000000;
+ Val >= 0 ->
+ Val;
+ true ->
+ exit({error, {bad_counter64, Val}})
+ end,
+ Bytes2 = enc_integer_notag(Val2),
+ Len2 = elength(length(Bytes2)),
+ lists:append([70 | Len2],Bytes2);
enc_value(Type, Val) ->
Bytes2 = enc_integer_notag(Val),
Len2 = elength(length(Bytes2)),
@@ -643,10 +667,7 @@ enc_val_tag('Counter32',Val) when (Val >= 0) andalso (Val =< 4294967295) ->
enc_val_tag('Unsigned32', Val) when (Val >= 0) andalso (Val =< 4294967295) ->
66;
enc_val_tag('TimeTicks', Val) when (Val >= 0) andalso (Val =< 4294967295) ->
- 67;
-enc_val_tag('Counter64', Val) when ((Val >= 0) andalso
- (Val =< 18446744073709551615)) ->
- 70.
+ 67.
%%----------------------------------------------------------------------
diff --git a/lib/snmp/src/misc/snmp_usm.erl b/lib/snmp/src/misc/snmp_usm.erl
index 19be564a8e..3508f9e1c2 100644
--- a/lib/snmp/src/misc/snmp_usm.erl
+++ b/lib/snmp/src/misc/snmp_usm.erl
@@ -198,7 +198,7 @@ des_encrypt(PrivKey, Data, SaltFun) ->
[A,B,C,D,E,F,G,H | PreIV] = PrivKey,
DesKey = [A,B,C,D,E,F,G,H],
Salt = SaltFun(),
- IV = snmp_misc:str_xor(PreIV, Salt),
+ IV = list_to_binary(snmp_misc:str_xor(PreIV, Salt)),
TailLen = (8 - (length(Data) rem 8)) rem 8,
Tail = mk_tail(TailLen),
EncData = crypto:des_cbc_encrypt(DesKey, IV, [Data,Tail]),
@@ -213,13 +213,13 @@ des_decrypt(PrivKey, MsgPrivParams, EncData)
[A,B,C,D,E,F,G,H | PreIV] = PrivKey,
DesKey = [A,B,C,D,E,F,G,H],
Salt = MsgPrivParams,
- IV = snmp_misc:str_xor(PreIV, Salt),
+ IV = list_to_binary(snmp_misc:str_xor(PreIV, Salt)),
%% Whatabout errors here??? E.g. not a mulitple of 8!
Data = binary_to_list(crypto:des_cbc_decrypt(DesKey, IV, EncData)),
Data2 = snmp_pdus:strip_encrypted_scoped_pdu_data(Data),
{ok, Data2};
des_decrypt(PrivKey, BadMsgPrivParams, EncData) ->
- ?vtrace("des_decrypt -> entry with when bad MsgPrivParams"
+ ?vtrace("des_decrypt -> entry when bad MsgPrivParams"
"~n PrivKey: ~p"
"~n BadMsgPrivParams: ~p"
"~n EncData: ~p",
@@ -232,7 +232,7 @@ aes_encrypt(PrivKey, Data, SaltFun) ->
Salt = SaltFun(),
EngineBoots = snmp_framework_mib:get_engine_boots(),
EngineTime = snmp_framework_mib:get_engine_time(),
- IV = [?i32(EngineBoots), ?i32(EngineTime) | Salt],
+ IV = list_to_binary([?i32(EngineBoots), ?i32(EngineTime) | Salt]),
EncData = crypto:aes_cfb_128_encrypt(AesKey, IV, Data),
{ok, binary_to_list(EncData), Salt}.
@@ -240,7 +240,7 @@ aes_decrypt(PrivKey, MsgPrivParams, EncData, EngineBoots, EngineTime)
when length(MsgPrivParams) =:= 8 ->
AesKey = PrivKey,
Salt = MsgPrivParams,
- IV = [?i32(EngineBoots), ?i32(EngineTime) | Salt],
+ IV = list_to_binary([?i32(EngineBoots), ?i32(EngineTime) | Salt]),
%% Whatabout errors here??? E.g. not a mulitple of 8!
Data = binary_to_list(crypto:aes_cfb_128_decrypt(AesKey, IV, EncData)),
Data2 = snmp_pdus:strip_encrypted_scoped_pdu_data(Data),
diff --git a/lib/snmp/test/modules.mk b/lib/snmp/test/modules.mk
index ff848cad1b..6a0c3e9481 100644
--- a/lib/snmp/test/modules.mk
+++ b/lib/snmp/test/modules.mk
@@ -1,20 +1,20 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2004-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2004-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
SUITE_MODULES = \
@@ -57,6 +57,10 @@ MODULES = \
HRL_FILES = snmp_test_lib.hrl
+# These are MIBs that aure used by the compiler test-suite.
+COMPILER_MIB_FILES = \
+ OTP8574-MIB
+
MIB_FILES = \
OLD-SNMPEA-MIB.mib \
OLD-SNMPEA-MIB-v2.mib \
diff --git a/lib/snmp/test/snmp_agent_test.erl b/lib/snmp/test/snmp_agent_test.erl
index af0581150a..9d2e9969c4 100644
--- a/lib/snmp/test/snmp_agent_test.erl
+++ b/lib/snmp/test/snmp_agent_test.erl
@@ -1046,7 +1046,7 @@ v1_cases() ->
sparse_table,
cnt_64,
opaque,
-
+
change_target_addr_config
].
@@ -1977,7 +1977,8 @@ inform_i(Config) ->
?P1("unload TestTrap & TestTrapv2..."),
?line unload_master("TestTrap"),
- ?line unload_master("TestTrapv2").
+ ?line unload_master("TestTrapv2"),
+ ok.
v3_inform_i(X) ->
%% <CONDITIONAL-SKIP>
@@ -3446,7 +3447,7 @@ do_mul_set_err() ->
?line ?v1_2(expect(2, noSuchName, 1, any),
expect(2, [{[friendsEntry, [2,3]], noSuchInstance}])),
g([NewKeyc4]),
- ?line ?v1_2(expect(3, noSuchName, 1, any),
+ ?line ?v1_2(expect(3, noSuchName, 1, any),
expect(3, [{NewKeyc4, noSuchInstance}])).
%% Req. SA-MIB
@@ -3457,10 +3458,10 @@ sa_mib() ->
?line expect(2, [{[sa, [1,0]], "sa_test"}]).
ma_trap1(MA) ->
- snmpa:send_trap(MA, testTrap2, "standard trap"),
+ ok = snmpa:send_trap(MA, testTrap2, "standard trap"),
?line expect(1, trap, [system], 6, 1, [{[system, [4,0]],
"{mbj,eklas}@erlang.ericsson.se"}]),
- snmpa:send_trap(MA, testTrap1, "standard trap"),
+ ok = snmpa:send_trap(MA, testTrap1, "standard trap"),
?line expect(2, trap, [1,2,3] , 1, 0, [{[system, [4,0]],
"{mbj,eklas}@erlang.ericsson.se"}]).
@@ -3509,7 +3510,8 @@ ma_v2_trap1(MA) ->
?DBG("ma_v2_traps -> send standard trap: testTrapv21",[]),
snmpa:send_trap(MA, testTrapv21, "standard trap"),
?line expect(2, v2trap, [{[sysUpTime, 0], any},
- {[snmpTrapOID, 0], ?snmp ++ [1]}]).
+ {[snmpTrapOID, 0], ?snmp ++ [1]}]),
+ ok.
ma_v2_trap2(MA) ->
snmpa:send_trap(MA,testTrapv22,"standard trap",[{sysContact,"pelle"}]),
@@ -3517,7 +3519,7 @@ ma_v2_trap2(MA) ->
{[snmpTrapOID, 0], ?system ++ [0,1]},
{[system, [4,0]], "pelle"}]).
-%% Note: This test case takes a while... actually a couple of minutes.
+%% Note: This test case takes a while... actually a couple of minutes.
ma_v2_inform1(MA) ->
?DBG("ma_v2_inform1 -> entry with"
"~n MA = ~p => "
@@ -5258,7 +5260,35 @@ otp_1131_2(X) -> ?P(otp_1131_2), otp_1131(X).
otp_1131_3(X) ->
%% <CONDITIONAL-SKIP>
- Skippable = [{unix, [darwin]}],
+ %% This is intended to catch Montavista Linux 4.0/ppc (2.6.5)
+ %% Montavista Linux looks like a Debian distro (/etc/issue)
+ LinuxVersionVerify =
+ fun() ->
+ case os:cmd("uname -m") of
+ "ppc" ++ _ ->
+ case file:read_file_info("/etc/issue") of
+ {ok, _} ->
+ case os:cmd("grep -i montavista /etc/issue") of
+ Info when (is_list(Info) andalso
+ (length(Info) > 0)) ->
+ case os:version() of
+ {2, 6, 10} ->
+ true;
+ _ ->
+ false
+ end;
+ _ -> % Maybe plain Debian or Ubuntu
+ false
+ end;
+ _ ->
+ %% Not a Debian based distro
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ Skippable = [{unix, [darwin, {linux, LinuxVersionVerify}]}],
Condition = fun() -> ?OS_BASED_SKIP(Skippable) end,
?NON_PC_TC_MAYBE_SKIP(X, Condition),
%% </CONDITIONAL-SKIP>
@@ -6219,12 +6249,15 @@ verify_old_info([Key|Keys], Info) ->
is(S) -> [length(S) | S].
try_test(Func) ->
+ ?P2("try test ~w...", [Func]),
snmp_agent_test_lib:try_test(?MODULE, Func).
try_test(Func, A) ->
+ ?P2("try test ~w...", [Func]),
snmp_agent_test_lib:try_test(?MODULE, Func, A).
try_test(Func, A, Opts) ->
+ ?P2("try test ~w...", [Func]),
snmp_agent_test_lib:try_test(?MODULE, Func, A, Opts).
diff --git a/lib/snmp/test/snmp_agent_test_lib.erl b/lib/snmp/test/snmp_agent_test_lib.erl
index 31b375efa9..9e89aa889c 100644
--- a/lib/snmp/test/snmp_agent_test_lib.erl
+++ b/lib/snmp/test/snmp_agent_test_lib.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -421,7 +421,7 @@ start_agent(Config, Vsns, Opts) ->
?LOG("start_agent -> entry (~p) with"
"~n Config: ~p"
"~n Vsns: ~p"
- "~n Opts: ~p",[node(), Config, Vsns, Opts]),
+ "~n Opts: ~p", [node(), Config, Vsns, Opts]),
?line AgentDir = ?config(agent_dir, Config),
?line SaNode = ?config(snmp_sa, Config),
diff --git a/lib/snmp/test/snmp_compiler_test.erl b/lib/snmp/test/snmp_compiler_test.erl
index 9a9127a130..ad77b01362 100644
--- a/lib/snmp/test/snmp_compiler_test.erl
+++ b/lib/snmp/test/snmp_compiler_test.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2003-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2003-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -46,7 +46,9 @@
module_identity/1,
tickets/1,
- otp_6150/1
+ otp_6150/1,
+ otp_8574/1,
+ otp_8595/1
]).
@@ -56,6 +58,7 @@
-export([
]).
+
%%----------------------------------------------------------------------
%% Macros
%%----------------------------------------------------------------------
@@ -98,7 +101,9 @@ all(suite) ->
tickets(suite) ->
[
- otp_6150
+ otp_6150,
+ otp_8574,
+ otp_8595
].
@@ -178,6 +183,54 @@ otp_6150(Config) when is_list(Config) ->
ok.
+otp_8574(suite) ->
+ [];
+otp_8574(Config) when is_list(Config) ->
+ put(tname,otp_8574),
+ p("starting with Config: ~p~n", [Config]),
+
+ Dir = ?config(comp_dir, Config),
+ MibDir = ?config(mib_dir, Config),
+ MibFile = join(MibDir, "OTP8574-MIB.mib"),
+
+ p("ensure compile fail without relaxed assign check"),
+ case snmpc:compile(MibFile, [{group_check, false}, {outdir, Dir}]) of
+ {error, compilation_failed} ->
+ p("with relaxed assign check MIB compiles with warning"),
+ case snmpc:compile(MibFile, [{group_check, false},
+ {outdir, Dir},
+ relaxed_row_name_assign_check]) of
+ {ok, _Mib} ->
+ ok;
+ {error, Reason} ->
+ p("unexpected compile failure: "
+ "~n Reason: ~p", [Reason]),
+ exit({unexpected_compile_failure, Reason})
+ end;
+
+ {ok, _} ->
+ p("unexpected compile success"),
+ exit(unexpected_compile_success)
+ end.
+
+
+otp_8595(suite) ->
+ [];
+otp_8595(Config) when is_list(Config) ->
+ put(tname,otp_8595),
+ p("starting with Config: ~p~n", [Config]),
+
+ Dir = ?config(comp_dir, Config),
+ MibDir = ?config(mib_dir, Config),
+ MibFile = join(MibDir, "OTP8595-MIB.mib"),
+ ?line {ok, Mib} =
+ snmpc:compile(MibFile, [{outdir, Dir},
+ {verbosity, trace},
+ {group_check, false}]),
+ io:format("otp_8595 -> Mib: ~n~p~n", [Mib]),
+ ok.
+
+
%%======================================================================
%% Internal functions
%%======================================================================
@@ -373,6 +426,9 @@ join(A,B) ->
%% p(F) ->
%% p(F, []).
+p(F) ->
+ p(F, []).
+
p(F, A) ->
p(get(tname), F, A).
diff --git a/lib/snmp/test/snmp_manager_config_test.erl b/lib/snmp/test/snmp_manager_config_test.erl
index fcb3d7e30c..d5dc1387f7 100644
--- a/lib/snmp/test/snmp_manager_config_test.erl
+++ b/lib/snmp/test/snmp_manager_config_test.erl
@@ -1444,10 +1444,9 @@ start_with_invalid_usm_conf_file1(Conf) when is_list(Conf) ->
p("[test 54] write usm config file with invalid auth-key (4)"),
Usm54 = setelement(4, Usm51, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,kalle]"),
write_usm_conf(ConfDir, [Usm54]),
- %% ?line ok = crypto:start(), %% Varf�r k�r den redan?
- ?line crypto:start(), %% Make sure it's started...
+ ?line maybe_start_crypto(), %% Make sure it's started...
?line {error, Reason54} = config_start(Opts),
- ?line ok = crypto:stop(),
+ ?line ok = maybe_stop_crypto(),
p("start failed (as expected): ~p", [Reason54]),
?line {failed_check, _, _, _, {invalid_auth_key, _}} = Reason54,
await_config_not_running(),
@@ -1492,21 +1491,35 @@ start_with_invalid_usm_conf_file1(Conf) when is_list(Conf) ->
p("[test 59] write usm config file with invalid auth-key (9)"),
Usm59 = setelement(4, Usm57, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,ka]"),
write_usm_conf(ConfDir, [Usm59]),
- ?line ok = crypto:start(),
+ ?line ok = maybe_start_crypto(),
?line {error, Reason59} = config_start(Opts),
- ?line ok = crypto:stop(),
+ ?line ok = maybe_stop_crypto(),
p("start failed (as expected): ~p", [Reason59]),
?line {failed_check, _, _, _, {invalid_auth_key, _}} = Reason59,
await_config_not_running(),
%% --
- p("[test 5A] write usm config file with valid auth-key when crypto not started (10)"),
- Usm5A = setelement(4, Usm57, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]"),
- write_usm_conf(ConfDir, [Usm5A]),
- ?line {error, Reason5A} = config_start(Opts),
- p("start failed (as expected): ~p", [Reason5A]),
- ?line {failed_check, _, _, _, {unsupported_crypto, _}} = Reason5A,
- await_config_not_running(),
+ %% <CRYPTO-MODIFICATIONS>
+ %% The crypto application do no longer need to be started
+ %% explicitly (all of it is as of R14 implemented with NIFs).
+ case (catch crypto:version()) of
+ {'EXIT', {undef, _}} ->
+ p("[test 5A] write usm config file with valid auth-key "
+ "when crypto not started (10)"),
+ Usm5A = setelement(4,
+ Usm57,
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]"),
+ write_usm_conf(ConfDir, [Usm5A]),
+ ?line {error, Reason5A} = config_start(Opts),
+ p("start failed (as expected): ~p", [Reason5A]),
+ ?line {failed_check, _, _, _, {unsupported_crypto, _}} = Reason5A,
+ await_config_not_running();
+ _ ->
+ %% This function is only present in version 2.0 or greater.
+ %% The crypto app no longer needs to be explicitly started
+ ok
+ end,
+ %% </CRYPTO-MODIFICATIONS>
%% --
p("[test 61] write usm config file with invalid priv-protocol (1)"),
@@ -1566,9 +1579,9 @@ start_with_invalid_usm_conf_file1(Conf) when is_list(Conf) ->
p("[test 74] write usm config file with invalid priv-key (4)"),
Usm74 = setelement(6, Usm71, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,kalle]"),
write_usm_conf(ConfDir, [Usm74]),
- ?line ok = crypto:start(),
+ ?line ok = maybe_start_crypto(),
?line {error, Reason74} = config_start(Opts),
- ?line ok = crypto:stop(),
+ ?line ok = maybe_stop_crypto(),
p("start failed (as expected): ~p", [Reason74]),
?line {failed_check, _, _, _, {invalid_priv_key, _}} = Reason74,
await_config_not_running(),
@@ -1592,15 +1605,27 @@ start_with_invalid_usm_conf_file1(Conf) when is_list(Conf) ->
await_config_not_running(),
%% --
- p("[test 77] write usm config file with valid priv-key when crypto not started (7)"),
- Usm77 = setelement(6, Usm71, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6]"),
- write_usm_conf(ConfDir, [Usm77]),
- ?line {error, Reason77} = config_start(Opts),
- p("start failed (as expected): ~p", [Reason77]),
- ?line {failed_check, _, _, _, {unsupported_crypto, _}} = Reason77,
- await_config_not_running(),
+ %% <CRYPTO-MODIFICATIONS>
+ %% The crypto application do no longer need to be started
+ %% explicitly (all of it is as of R14 implemented with NIFs).
+ case (catch crypto:version()) of
+ {'EXIT', {undef, _}} ->
+ p("[test 77] write usm config file with valid priv-key "
+ "when crypto not started (7)"),
+ Usm77 = setelement(6, Usm71, "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6]"),
+ write_usm_conf(ConfDir, [Usm77]),
+ ?line {error, Reason77} = config_start(Opts),
+ p("start failed (as expected): ~p", [Reason77]),
+ ?line {failed_check, _, _, _, {unsupported_crypto, _}} = Reason77,
+ await_config_not_running();
+ _ ->
+ %% This function is only present in version 2.0 or greater.
+ %% The crypto app no longer needs to be explicitly started
+ ok
+ end,
+ %% </CRYPTO-MODIFICATIONS>
- %% --
+ %% --
p("[test 78] write usm config file with invalid usm (1)"),
write_usm_conf2(ConfDir, "{\"bmkEngine\", \"swiusmcf\"}."),
?line {error, Reason81} = config_start(Opts),
@@ -2676,6 +2701,27 @@ write_conf_file(Dir, File, Str) ->
file:close(Fd).
+maybe_start_crypto() ->
+ case (catch crypto:version()) of
+ {'EXIT', {undef, _}} ->
+ %% This is the version of crypto before the NIFs...
+ ?CRYPTO_START();
+ _ ->
+ %% No need to start this version of crypto..
+ ok
+ end.
+
+maybe_stop_crypto() ->
+ case (catch crypto:version()) of
+ {'EXIT', {undef, _}} ->
+ %% This is the version of crypto before the NIFs...
+ crypto:stop();
+ _ ->
+ %% There is nothing to stop in this version of crypto..
+ ok
+ end.
+
+
%% ------
str(X) ->
diff --git a/lib/snmp/test/snmp_manager_test.erl b/lib/snmp/test/snmp_manager_test.erl
index 518b8b34de..cef96417dc 100644
--- a/lib/snmp/test/snmp_manager_test.erl
+++ b/lib/snmp/test/snmp_manager_test.erl
@@ -795,6 +795,35 @@ notify_started02(suite) -> [];
notify_started02(Config) when is_list(Config) ->
process_flag(trap_exit, true),
put(tname,ns02),
+
+ %% <CONDITIONAL-SKIP>
+ %% The point of this is to catch machines running
+ %% SLES9 (2.6.5)
+ LinuxVersionVerify =
+ fun() ->
+ case os:cmd("uname -m") of
+ "i686" ++ _ ->
+%% io:format("found an i686 machine, "
+%% "now check version~n", []),
+ case os:version() of
+ {2, 6, Rev} when Rev >= 16 ->
+ true;
+ {2, Min, _} when Min > 6 ->
+ true;
+ {Maj, _, _} when Maj > 2 ->
+ true;
+ _ ->
+ false
+ end;
+ _ ->
+ true
+ end
+ end,
+ Skippable = [{unix, [{linux, LinuxVersionVerify}]}],
+ Condition = fun() -> ?OS_BASED_SKIP(Skippable) end,
+ ?NON_PC_TC_MAYBE_SKIP(Config, Condition),
+ %% </CONDITIONAL-SKIP>
+
p("starting with Config: ~n~p", [Config]),
ConfDir = ?config(manager_conf_dir, Config),
diff --git a/lib/snmp/test/snmp_manager_user_test.erl b/lib/snmp/test/snmp_manager_user_test.erl
index 24ed3b0b73..0f47d70873 100644
--- a/lib/snmp/test/snmp_manager_user_test.erl
+++ b/lib/snmp/test/snmp_manager_user_test.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -822,10 +822,39 @@ register_monitor_and_crash3(doc) ->
"Start a single user process, "
"register-monitor one user and register one user, "
"crash the single user process.";
-register_monitor_and_crash3(Conf) when is_list(Conf) ->
+register_monitor_and_crash3(Conf) when is_list(Conf) ->
+ process_flag(trap_exit, true),
put(tname,rlac3),
+
+ %% <CONDITIONAL-SKIP>
+ %% The point of this is to catch machines running
+ %% SLES9 (2.6.5)
+ LinuxVersionVerify =
+ fun() ->
+ case os:cmd("uname -m") of
+ "i686" ++ _ ->
+%% io:format("found an i686 machine, "
+%% "now check version~n", []),
+ case os:version() of
+ {2, 6, Rev} when Rev >= 16 ->
+ true;
+ {2, Min, _} when Min > 6 ->
+ true;
+ {Maj, _, _} when Maj > 2 ->
+ true;
+ _ ->
+ false
+ end;
+ _ ->
+ true
+ end
+ end,
+ Skippable = [{unix, [{linux, LinuxVersionVerify}]}],
+ Condition = fun() -> ?OS_BASED_SKIP(Skippable) end,
+ ?NON_PC_TC_MAYBE_SKIP(Conf, Condition),
+ %% </CONDITIONAL-SKIP>
+
p("start"),
- process_flag(trap_exit, true),
ConfDir = ?config(manager_conf_dir, Conf),
DbDir = ?config(manager_db_dir, Conf),
diff --git a/lib/snmp/test/snmp_pdus_test.erl b/lib/snmp/test/snmp_pdus_test.erl
index d5add50f52..6dc5b779aa 100644
--- a/lib/snmp/test/snmp_pdus_test.erl
+++ b/lib/snmp/test/snmp_pdus_test.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2003-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2003-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -37,6 +37,7 @@
all/1,
tickets/1,
otp7575/1,
+ otp8563/1,
init_per_testcase/2, fin_per_testcase/2
]).
@@ -66,6 +67,7 @@ init_per_testcase(_Case, Config) when is_list(Config) ->
fin_per_testcase(_Case, Config) when is_list(Config) ->
Config.
+
%%======================================================================
%% Test case definitions
%%======================================================================
@@ -76,7 +78,8 @@ all(suite) ->
tickets(suite) ->
[
- otp7575
+ otp7575,
+ otp8563
].
@@ -118,6 +121,55 @@ otp7575(Config) when is_list(Config) ->
ok.
+otp8563(suite) -> [];
+otp8563(doc) -> ["OTP-8563"];
+otp8563(Config) when is_list(Config) ->
+ Val1 = 16#7fffffffffffffff,
+ io:format("try encode and decode ~w~n", [Val1]),
+ Enc1 = snmp_pdus:enc_value('Counter64', Val1),
+ {{'Counter64', Val1}, []} = snmp_pdus:dec_value(Enc1),
+
+ Val2 = Val1 + 1,
+ io:format("try encode and decode ~w~n", [Val2]),
+ Enc2 = snmp_pdus:enc_value('Counter64', Val2),
+ {{'Counter64', Val2}, []} = snmp_pdus:dec_value(Enc2),
+
+ Val3 = Val2 + 1,
+ io:format("try encode and decode ~w~n", [Val3]),
+ Enc3 = snmp_pdus:enc_value('Counter64', Val3),
+ {{'Counter64', Val3}, []} = snmp_pdus:dec_value(Enc3),
+
+ Val4 = 16#fffffffffffffffe,
+ io:format("try encode and decode ~w~n", [Val4]),
+ Enc4 = snmp_pdus:enc_value('Counter64', Val4),
+ {{'Counter64', Val4}, []} = snmp_pdus:dec_value(Enc4),
+
+ Val5 = Val4 + 1,
+ io:format("try encode and decode ~w~n", [Val5]),
+ Enc5 = snmp_pdus:enc_value('Counter64', Val5),
+ {{'Counter64', Val5}, []} = snmp_pdus:dec_value(Enc5),
+
+ Val6 = 16#ffffffffffffffff + 1,
+ io:format("try and fail to encode ~w~n", [Val6]),
+ case (catch snmp_pdus:enc_value('Counter64', Val6)) of
+ {'EXIT', {error, {bad_counter64, Val6}}} ->
+ ok;
+ Unexpected6 ->
+ exit({unexpected_encode_result, Unexpected6, Val6})
+ end,
+
+ Val7 = -1,
+ io:format("try and fail to encode ~w~n", [Val7]),
+ case (catch snmp_pdus:enc_value('Counter64', Val7)) of
+ {'EXIT', {error, {bad_counter64, Val7}}} ->
+ ok;
+ Unexpected7 ->
+ exit({unexpected_encode_result, Unexpected7, Val7})
+ end,
+
+ ok.
+
+
%%======================================================================
%% Internal functions
%%======================================================================
diff --git a/lib/snmp/test/snmp_test_data/OLD-SNMPEA-MIB.mib b/lib/snmp/test/snmp_test_data/OLD-SNMPEA-MIB.mib
index dd90d0ab50..2ba1a6fd67 100644
--- a/lib/snmp/test/snmp_test_data/OLD-SNMPEA-MIB.mib
+++ b/lib/snmp/test/snmp_test_data/OLD-SNMPEA-MIB.mib
@@ -12,18 +12,12 @@ OLD-SNMPEA-MIB DEFINITIONS ::= BEGIN
;
-- MODULE-IDENTITY
--- LAST-UPDATED "9709220900Z"
--- ORGANIZATION "ETX/DN/S"
--- CONTACT-INFO
--- " Martin Bj�rklund
---
--- Postal: ERICSSON SOFTWARE TECHNOLOGY AB
--- ERLANG SYSTEMS
--- Box 1214
--- S-164 28 KISTA, SWEDEN
---
--- Tel: +46 8 719 20 89
--- E-mail: [email protected]"
+-- LAST-UPDATED "1004200000Z"
+-- ORGANIZATION "Erlang/OTP"
+-- CONTACT-INFO ""
+-- DESCRIPTION
+-- "Header cleanup."
+-- REVISION "1004200000Z"
-- DESCRIPTION
-- "This MIB module defines MIB objects for the SNMPEA
-- component in OTP."
diff --git a/lib/snmp/test/snmp_test_data/OTP8574-MIB.mib b/lib/snmp/test/snmp_test_data/OTP8574-MIB.mib
new file mode 100644
index 0000000000..b5e5ed1848
--- /dev/null
+++ b/lib/snmp/test/snmp_test_data/OTP8574-MIB.mib
@@ -0,0 +1,77 @@
+OTP8574-MIB DEFINITIONS ::= BEGIN
+
+IMPORTS
+ MODULE-IDENTITY, OBJECT-TYPE, enterprises, IpAddress FROM SNMPv2-SMI
+ RowStatus FROM SNMPv2-TC
+ ;
+
+otp8574MIB MODULE-IDENTITY
+ LAST-UPDATED "1004200000Z"
+ ORGANIZATION "Erlang/OTP"
+ CONTACT-INFO "www.erlang.org"
+ DESCRIPTION "The MIB module is used for testing a compiler feature"
+ ::= { otpSnmp 1 }
+
+ericsson OBJECT IDENTIFIER ::= { enterprises 193 }
+otp OBJECT IDENTIFIER ::= { ericsson 19 }
+otpApplications OBJECT IDENTIFIER ::= { otp 3 }
+otpSnmp OBJECT IDENTIFIER ::= { otpApplications 3 }
+
+testMIBObjects OBJECT IDENTIFIER ::= { otp8574MIB 1 }
+
+testMIBObjectGroup OBJECT IDENTIFIER ::= { testMIBObjects 1 }
+
+example-Table OBJECT-TYPE
+ SYNTAX SEQUENCE OF ExampleEntry
+ MAX-ACCESS not-accessible
+ STATUS current
+ DESCRIPTION "An example table"
+ ::= { testMIBObjectGroup 1 }
+
+example-Entry OBJECT-TYPE
+ SYNTAX ExampleEntry
+ MAX-ACCESS not-accessible
+ STATUS current
+ DESCRIPTION "Example table entry"
+ INDEX { exampleIndex }
+ ::= { example-Table 5 }
+
+ExampleEntry ::= SEQUENCE {
+ exampleIndex INTEGER,
+ exampleColumn OCTET STRING,
+ exampleNotAccessible OCTET STRING,
+ exampleRowStatus RowStatus
+}
+
+exampleIndex OBJECT-TYPE
+ SYNTAX INTEGER (1..100)
+ MAX-ACCESS read-write
+ STATUS current
+ DESCRIPTION "The index for this entry."
+ ::= { example-Entry 1 }
+
+exampleColumn OBJECT-TYPE
+ SYNTAX OCTET STRING
+ MAX-ACCESS read-write
+ STATUS current
+ DESCRIPTION
+ "Example table column"
+ ::= { example-Entry 2 }
+
+exampleNotAccessible OBJECT-TYPE
+ SYNTAX OCTET STRING
+ MAX-ACCESS not-accessible
+ STATUS current
+ DESCRIPTION
+ "Example table column"
+ ::= { example-Entry 3 }
+
+exampleRowStatus OBJECT-TYPE
+ SYNTAX RowStatus
+ MAX-ACCESS read-write
+ STATUS current
+ DESCRIPTION
+ "Example table RowStatus"
+ ::= { example-Entry 4 }
+
+END
diff --git a/lib/snmp/test/snmp_test_data/OTP8595-MIB.mib b/lib/snmp/test/snmp_test_data/OTP8595-MIB.mib
new file mode 100644
index 0000000000..23245bce37
--- /dev/null
+++ b/lib/snmp/test/snmp_test_data/OTP8595-MIB.mib
@@ -0,0 +1,45 @@
+OTP8595-MIB DEFINITIONS ::= BEGIN
+
+IMPORTS
+ MODULE-IDENTITY, OBJECT-TYPE, snmpModules, mib-2
+ FROM SNMPv2-SMI
+ DisplayString, TestAndIncr, TimeStamp, RowStatus, TruthValue,
+ TEXTUAL-CONVENTION
+ FROM SNMPv2-TC
+ MODULE-COMPLIANCE, OBJECT-GROUP, NOTIFICATION-GROUP
+ FROM SNMPv2-CONF
+ sysLocation, sysContact
+ FROM SNMPv2-MIB
+ ;
+
+otp8595MIB MODULE-IDENTITY
+ LAST-UPDATED "1004210000Z"
+ ORGANIZATION ""
+ CONTACT-INFO
+ ""
+ DESCRIPTION
+ "Test mib for OTP-8595"
+ ::= { snmpModules 1 }
+
+
+test OBJECT IDENTIFIER ::= { mib-2 15 }
+
+bits1 OBJECT-TYPE
+ SYNTAX BITS {
+ b0(0),
+ b1(1),
+ b2(2),
+ -- The following are extensions to the original set of
+ -- labels. The extensions start at an octet boundary.
+ -- So for bits 3 - 7, one MUST set them to zero on send
+ -- and one MUST ignore them on receipt.
+ b8(8),
+ b9(9)
+ }
+ MAX-ACCESS read-write
+ STATUS current
+ DESCRIPTION
+ ""
+ ::= { test 1 }
+
+END
diff --git a/lib/snmp/test/snmp_test_lib.erl b/lib/snmp/test/snmp_test_lib.erl
index 2586b66a13..54839d989b 100644
--- a/lib/snmp/test/snmp_test_lib.erl
+++ b/lib/snmp/test/snmp_test_lib.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2002-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2002-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -172,7 +172,17 @@ os_based_skip(Skippable) when is_list(Skippable) ->
{value, {OsFam, OsName}} ->
true;
{value, {OsFam, OsNames}} when is_list(OsNames) ->
- lists:member(OsName, OsNames);
+ case lists:member(OsName, OsNames) of
+ true ->
+ true;
+ false ->
+ case lists:keymember(OsName, 1, OsNames) of
+ {value, {OsName, Check}} when is_function(Check) ->
+ Check();
+ _ ->
+ false
+ end
+ end;
_ ->
false
end
diff --git a/lib/snmp/test/snmp_test_mgr_misc.erl b/lib/snmp/test/snmp_test_mgr_misc.erl
index e6220f9241..ef1ba0b948 100644
--- a/lib/snmp/test/snmp_test_mgr_misc.erl
+++ b/lib/snmp/test/snmp_test_mgr_misc.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -101,8 +101,8 @@ init_packet(Parent, SnmpMgr,
init_debug(Dbg) when is_atom(Dbg) ->
put(debug,Dbg),
- put(verbosity,silence);
- %% put(verbosity,trace);
+ %% put(verbosity, silence);
+ put(verbosity, trace);
init_debug(DbgOptions) when is_list(DbgOptions) ->
case lists:keysearch(debug, 1, DbgOptions) of
{value, {_, Dbg}} when is_atom(Dbg) ->
diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk
index c4550d9b44..b0deb20a9d 100644
--- a/lib/snmp/vsn.mk
+++ b/lib/snmp/vsn.mk
@@ -1,174 +1,59 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
# %CopyrightBegin%
-#
+#
# Copyright Ericsson AB 1997-2010. All Rights Reserved.
-#
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
-SNMP_VSN = 4.16
+SNMP_VSN = 4.17.1
PRE_VSN =
APP_VSN = "snmp-$(SNMP_VSN)$(PRE_VSN)"
-TICKETS = \
- OTP-8395 \
- OTP-8433 \
- OTP-8442
-
-TICKETS_4_15 = OTP-8229 OTP-8249
-
-TICKETS_4_14 = OTP-8223 OTP-8228 OTP-8237
-
-TICKETS_4_13_5 = OTP-8116 OTP-8120 OTP-8181 OTP-8182
-
-TICKETS_4_13_4 = OTP-8044 OTP-8062 OTP-8098
-
-TICKETS_4_13_3 = OTP-8015 OTP-8020
-
-TICKETS_4_13_2 = OTP-7961 OTP-7977 OTP-7983 OTP-7989
-
-TICKETS_4_13_1 = OTP-7902
-
-TICKETS_4_13 = OTP-7571 OTP-7735 OTP-7836 OTP-7851
-
-TICKETS_4_12_2 = OTP-7868
-
-TICKETS_4_12_1 = OTP-7695 OTP-7698
-
-TICKETS_4_12 = OTP-7346 OTP-7525
-
-TICKETS_4_11_2 = OTP-7570 OTP-7575
-
-TICKETS_4_11_1 = OTP-7390 OTP-7412 OTP-7426 OTP-7432
-
-TICKETS_4_11 = OTP-7201 OTP-7287 OTP-7319 OTP-7369 OTP-7371 OTP-7377 OTP-7381
-
-TICKETS_4_10_3 = OTP-7219
-
-TICKETS_4_10_2 = OTP-7152 OTP-7153 OTP-7157 OTP-7158 OTP-7159 OTP-7160
-
-TICKETS_4_10_1 = OTP-7083 OTP-7109 OTP-7110 OTP-7119 OTP-7121 OTP-7123
-
-TICKETS_4_10 = OTP-6649 OTP-6841 OTP-6898 OTP-6945
+TICKETS = OTP-8761
-TICKETS_4_9_6 = OTP-6840 OTP-6843
+TICKETS_4_17 = OTP-8478
-TICKETS_4_9_5 = OTP-6805 OTP-6815
+TICKETS_4_16_2 = \
+ OTP-8563 \
+ OTP-8574 \
+ OTP-8594 \
+ OTP-8595 \
+ OTP-8646 \
+ OTP-8648
-TICKETS_4_9_4 = OTP-6784 OTP-6771
+TICKETS_4_16_1 = \
+ OTP-8480 \
+ OTP-8481
-TICKETS_4_9_3 = OTP-6605 OTP-6712 OTP-6713
-
-TICKETS_4_9_2 = OTP-6571
-
-TICKETS_4_9_1 = OTP-6566 OTP-6569
-
-TICKETS_4_9 = \
- OTP-6317 \
- OTP-6318 \
- OTP-6383 \
- OTP-6487 \
- OTP-6515 \
- OTP-6518 \
- OTP-6529 \
- OTP-6532 \
- OTP-6533 \
- OTP-6540
-
-TICKETS_4_8_4 = OTP-6408
-
-TICKETS_4_8_3 = OTP-6337 OTP-6340
-
-TICKETS_4_8_2 = OTP-6214 OTP-6247 OTP-6293
-
-TICKETS_4_8_1 = OTP-6176 OTP-6177
-
-TICKETS_4_8 = OTP-6137 OTP-6149 OTP-6150 OTP-6164
-
-TICKETS_4_7_4 = \
- OTP-6042 \
- OTP-6044 \
- OTP-6049 \
- OTP-6062 \
- OTP-6068 \
- OTP-6074 \
- OTP-6077 \
- OTP-6081
-
-TICKETS_4_7_3 = \
- OTP-6031 \
- OTP-6032
-
-TICKETS_4_7_2 = \
- OTP-5992 \
- OTP-6024
-
-TICKETS_4_7_1 = \
- OTP-5963 \
- OTP-5968 \
- OTP-5969
-
-TICKETS_4_7 = \
- OTP-5870 \
- OTP-5934 \
- OTP-5935 \
- OTP-5937
-
-TICKETS_4_6_1 = \
- OTP-5834 \
- OTP-5838
-
-TICKETS_4_6 = \
- OTP-5763 \
- OTP-5771 \
- OTP-5787 \
- OTP-5797 \
- OTP-5829
-
-TICKETS_4_5 = \
- OTP-5581 \
- OTP-5726 \
- OTP-5727 \
- OTP-5732 \
- OTP-5733 \
- OTP-5740 \
- OTP-5742
-
-TICKETS_4_4_1 = \
- OTP-5719 \
- OTP-5720
+TICKETS_4_16 = \
+ OTP-8395 \
+ OTP-8433 \
+ OTP-8442
-TICKETS_4_4 = \
- OTP-5666 \
- OTP-5668 \
- OTP-5669 \
- OTP-5675 \
- OTP-5676 \
- OTP-5678 \
- OTP-5703
+TICKETS_4_15 = \
+ OTP-8229 \
+ OTP-8249
-TICKETS_4_3 = \
- OTP-5636 \
- OTP-5637 \
- OTP-5490
+TICKETS_4_14 = \
+ OTP-8223 \
+ OTP-8228 \
+ OTP-8237
-TICKETS_4_2 = \
- OTP-5574 \
- OTP-5578 \
- OTP-5579 \
- OTP-5580 \
- OTP-5590 \
- OTP-5591 \
- OTP-5592
+TICKETS_4_13_5 = \
+ OTP-8116 \
+ OTP-8120 \
+ OTP-8181 \
+ OTP-8182
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index 9597137cdf..ce18cabfb5 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>SSH Release Notes</title>
@@ -29,6 +29,85 @@
<file>notes.xml</file>
</header>
+ <section><title>Ssh 1.1.11</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ SSH in some cases generated a crash report when a channel
+ was closed in a normal way.</p>
+ <p>
+ Own Id: OTP-8735 Aux Id: seq11615</p>
+ </item>
+ </list>
+ </section>
+
+ </section>
+
+ <section><title>Ssh 1.1.10</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ SSH in some cases terminated channels with reason
+ normal when it should have been shutdown.</p>
+ <p>
+ Own Id: OTP-8714 Aux Id:</p>
+ </item>
+ </list>
+ </section>
+
+ </section>
+
+ <section><title>Ssh 1.1.9</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>The function ssh:connect/4 was not exported.</p>
+ <p>Own Id: OTP-8550 Aux Id:</p>
+ </item>
+ <item>
+ <p>Aligned error message with used version (SSH_FX_FAILURE vs
+ SSH_FX_NOT_A_DIRECTORY, the latter introduced in version 6).</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>Own Id: OTP-8644 Aux Id: seq11574</p>
+ </item>
+ <item>
+ <p>Resolved race condition when another connection is started
+ before a channel is opened in the first connection.</p>
+ <p>Own Id: OTP-8645 Aux Id: seq11577</p>
+ </item>
+ </list>
+ </section>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>The configuration parameter ip_v6_disabled is now available,
+ which makes it possible for the user to alter the IP version
+ SSH shall use.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>Own Id: OTP-8535 Aux Id:</p>
+ </item>
+ <item>
+ <p>The ssh_connection:send operation now accepts infinity as timeout.</p>
+ <p>Own Id: OTP-8534 Aux Id:</p>
+ </item>
+ <item>
+ <p>The connection handler now include stack traces when a channel
+ message is not handled correctly.</p>
+ <p>Own Id: OTP-8524 Aux Id:</p>
+ </item>
+ </list>
+ </section>
+
+ </section>
+
<section><title>Ssh 1.1.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index aca5c9cdc4..71e6b2cd3d 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2010</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -13,12 +13,12 @@
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at http://www.erlang.org/.
-
+
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
-
+
</legalnotice>
<title>ssh</title>
@@ -159,6 +159,9 @@
<item>
<p>Allow an existing file-descriptor to be used
(simply passed on to the transport protocol).</p></item>
+ <tag><c><![CDATA[{ip_v6_disabled, boolean()}]]></c></tag>
+ <item>
+ <p>Determines if SSH shall use IPv6 or not.</p></item>
</taglist>
</desc>
</func>
@@ -198,8 +201,8 @@
<item>
Provides specifications for handling of subsystems. The
"sftp" subsystem-spec can be retrieved by calling
- ssh_sftd:subsystem_spec/1. If the subsystems option in not present
- the value of <c>[ssh_sftd:subsystem_spec([])]</c> will be used.
+ ssh_sftpd:subsystem_spec/1. If the subsystems option in not present
+ the value of <c>[ssh_sftpd:subsystem_spec([])]</c> will be used.
It is of course possible to set the option to the empty list
if you do not want the daemon to run any subsystems at all.
</item>
@@ -252,6 +255,10 @@
<item>
<p>Allow an existing file-descriptor to be used
(simply passed on to the transport protocol).</p></item>
+ <tag><c><![CDATA[{ip_v6_disabled, boolean()}]]></c></tag>
+ <item>
+ <p>Determines if SSH shall use IPv6 or not (only used when
+ HostAddress is set to any).</p></item>
</taglist>
</desc>
</func>
diff --git a/lib/ssh/examples/ssh_sample_cli.erl b/lib/ssh/examples/ssh_sample_cli.erl
index 2505c1b759..6f3092e567 100644
--- a/lib/ssh/examples/ssh_sample_cli.erl
+++ b/lib/ssh/examples/ssh_sample_cli.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -30,19 +30,15 @@
%% our command functions
-export([cli_prime/1, cli_primes/1, cli_gcd/2, cli_lcm/2,
cli_factors/1, cli_exit/0, cli_rho/1, cli_help/0,
- cli_crash/0, cli_users/0, cli_self/0,
- cli_user/0, cli_host/0]).
-
-%% imports
--import(lists, [reverse/1, reverse/2, seq/2, prefix/2]).
--import(math, [sqrt/1]).
-
+ cli_crash/0, cli_self/0, cli_user/0, cli_host/0]).
listen(Port) ->
listen(Port, []).
listen(Port, Options) ->
- ssh_cli:listen(fun(U, H) -> start_our_shell(U, H) end, Port, Options).
+ crypto:start(),
+ ssh:start(),
+ ssh:daemon(any, Port, [{shell, fun(U, H) -> start_our_shell(U, H) end} | Options]).
%% our_routines
our_routines() ->
@@ -56,7 +52,6 @@ our_routines() ->
{"prime", cli_prime, "<int> check for primality"},
{"primes", cli_primes, "<int> print all primes up to <int>"},
{"rho", cli_rho, "<int> prime factors using rho's alg."},
- {"who", cli_users, " lists users"},
{"user", cli_user, " print name of user"},
{"host", cli_host, " print host addr"},
{"self", cli_self, " print my pid"}
@@ -85,11 +80,11 @@ our_routines() ->
common_prefix([C | R1], [C | R2], Acc) ->
common_prefix(R1, R2, [C | Acc]);
common_prefix(_, _, Acc) ->
- reverse(Acc).
+ lists:reverse(Acc).
%% longest prefix in a list, given a prefix
longest_prefix(List, Prefix) ->
- case [A || {A, _, _} <- List, prefix(Prefix, A)] of
+ case [A || {A, _, _} <- List, lists:prefix(Prefix, A)] of
[] ->
{none, List};
[S | Rest] ->
@@ -112,7 +107,7 @@ longest_prefix(List, Prefix) ->
expand([$ | _]) ->
{no, "", []};
expand(RevBefore) ->
- Before = reverse(RevBefore),
+ Before = lists:reverse(RevBefore),
case longest_prefix(our_routines(), Before) of
{prefix, P, [_]} ->
{yes, P ++ " ", []};
@@ -139,22 +134,27 @@ start_our_shell(User, Peer) ->
%%% an ordinary Read-Eval-Print-loop
our_shell_loop() ->
% Read
- Line = io:get_line({format, "CLI> ", []}),
+ Line = io:get_line("CLI> "),
% Eval
Result = eval_cli(Line),
% Print
io:format("---> ~p\n", [Result]),
case Result of
- done -> exit(normal);
- crash -> 1 / 0;
- _ -> our_shell_loop()
+ done ->
+ exit(normal);
+ crash ->
+ 1 / 0;
+ _ ->
+ our_shell_loop()
end.
%%% translate a command to a function
command_to_function(Command) ->
case lists:keysearch(Command, 1, our_routines()) of
- {value, {_, Proc, _}} -> Proc;
- false -> unknown_cli
+ {value, {_, Proc, _}} ->
+ Proc;
+ false ->
+ unknown_cli
end.
%%% evaluate a command line
@@ -209,14 +209,6 @@ cli_user() ->
cli_host() ->
get(peer_name).
-cli_users() ->
- case ssh_userauth:get_auth_users() of
- {ok, UsersPids} ->
- UsersPids; % [U || {U, _} <- UsersPids];
- E ->
- E
- end.
-
cli_self() ->
self().
@@ -243,7 +235,7 @@ cli_help() ->
%% a quite simple Sieve of Erastothenes (not tail-recursive, though)
primes(Size) ->
- era(sqrt(Size), seq(2,Size)).
+ era(math:sqrt(Size), lists:seq(2,Size)).
era(Max, [H|T]) when H =< Max ->
[H | era(Max, sieve([H|T], H))];
@@ -267,7 +259,7 @@ next_prime(Primes, P) ->
next_prime1(Primes, P) ->
P1 = P + 2,
- case divides(Primes, trunc(sqrt(P1)), P1) of
+ case divides(Primes, trunc(math:sqrt(P1)), P1) of
false -> P1;
true -> next_prime1(Primes, P1)
end.
@@ -282,7 +274,7 @@ divides([_ | R], Nsqrt, N) ->
divides(R, Nsqrt, N).
is_prime(P) ->
- lists:all(fun(A) -> P rem A =/= 0 end, primes(trunc(sqrt(P)))).
+ lists:all(fun(A) -> P rem A =/= 0 end, primes(trunc(math:sqrt(P)))).
%% Normal gcd, Euclid
gcd(R, Q) when abs(Q) < abs(R) -> gcd1(Q,R);
@@ -300,17 +292,17 @@ lcm(R, Q) ->
%%% Prime factors of a number (na�ve implementation)
factors(N) ->
- Nsqrt = trunc(sqrt(N)),
+ Nsqrt = trunc(math:sqrt(N)),
factors([], N, 2, Nsqrt, []).
factors(_Primes, N, Prime, Nsqrt, Factors) when Prime > Nsqrt ->
- reverse(Factors, [N]);
+ lists:reverse(Factors, [N]);
factors(Primes, N, Prime, Nsqrt, Factors) ->
case N rem Prime of
0 ->
%%io:format("factor ------- ~p\n", [Prime]),
N1 = N div Prime,
- factors(Primes, N1, Prime, trunc(sqrt(N1)), [Prime|Factors]);
+ factors(Primes, N1, Prime, trunc(math:sqrt(N1)), [Prime|Factors]);
_ ->
Primes1 = Primes ++ [Prime],
Prime1 = next_prime(Primes1, Prime),
diff --git a/lib/ssh/src/ssh.appup.src b/lib/ssh/src/ssh.appup.src
index 5329373862..82114c9afd 100644
--- a/lib/ssh/src/ssh.appup.src
+++ b/lib/ssh/src/ssh.appup.src
@@ -1,25 +1,29 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
{"%VSN%",
[
- {"1.1.7", [{load_module, ssh_connection_handler, soft_purge, soft_purge, []}]},
+ {"1.1.10", [{load_module, ssh_connection_manager, soft_purge, soft_purge, []}]},
+ {"1.1.9", [{load_module, ssh_channel, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_manager, soft_purge, soft_purge, []}]},
+ {"1.1.8", [{restart_application, ssh}]},
+ {"1.1.7", [{restart_application, ssh}]},
{"1.1.6", [{restart_application, ssh}]},
{"1.1.5", [{restart_application, ssh}]},
{"1.1.4", [{restart_application, ssh}]},
@@ -27,7 +31,11 @@
{"1.1.2", [{restart_application, ssh}]}
],
[
- {"1.1.7", [{load_module, ssh_connection_handler, soft_purge, soft_purge, []}]},
+ {"1.1.10", [{load_module, ssh_connection_manager, soft_purge, soft_purge, []}]},
+ {"1.1.9", [{load_module, ssh_channel, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_manager, soft_purge, soft_purge, []}]},
+ {"1.1.8", [{restart_application, ssh}]},
+ {"1.1.7", [{restart_application, ssh}]},
{"1.1.6", [{restart_application, ssh}]},
{"1.1.5", [{restart_application, ssh}]},
{"1.1.4", [{restart_application, ssh}]},
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index f9a986a8b6..994c77436a 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -24,7 +24,7 @@
-include("ssh.hrl").
-include("ssh_connect.hrl").
--export([start/0, start/1, stop/0, connect/3, close/1, connection_info/2,
+-export([start/0, start/1, stop/0, connect/3, connect/4, close/1, connection_info/2,
channel_info/3,
daemon/1, daemon/2, daemon/3,
stop_listener/1, stop_listener/2, stop_daemon/1, stop_daemon/2,
@@ -330,10 +330,10 @@ handle_options([{nodelay, _} = Opt | Rest], SockOpts, Opts) ->
handle_options([Opt | Rest], SockOpts, Opts) ->
handle_options(Rest, SockOpts, [Opt | Opts]).
+%% Has IPv6 been disabled?
inetopt(true) ->
- inet6;
-
+ inet;
inetopt(false) ->
- inet.
+ inet6.
diff --git a/lib/ssh/src/ssh_acceptor.erl b/lib/ssh/src/ssh_acceptor.erl
index d19fee14e1..9060626ab3 100644
--- a/lib/ssh/src/ssh_acceptor.erl
+++ b/lib/ssh/src/ssh_acceptor.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -79,11 +79,11 @@ acceptor_loop(Callback, Port, Address, Opts, ListenSocket, AcceptTimeout) ->
handle_connection(Callback, Address, Port, Options, Socket) ->
SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- ssh_system_sup:start_subsystem(SystemSup, Options),
+ {ok, SubSysSup} = ssh_system_sup:start_subsystem(SystemSup, Options),
ConnectionSup = ssh_system_sup:connection_supervisor(SystemSup),
{ok, Pid} =
ssh_connection_controler:start_manager_child(ConnectionSup,
- [server, Socket, Options]),
+ [server, Socket, Options, SubSysSup]),
Callback:controlling_process(Socket, Pid),
SshOpts = proplists:get_value(ssh_opts, Options),
Pid ! {start_connection, server, [Address, Port, Socket, SshOpts]}.
diff --git a/lib/ssh/src/ssh_channel.erl b/lib/ssh/src/ssh_channel.erl
index 3d67065ee1..dcb2d69290 100644
--- a/lib/ssh/src/ssh_channel.erl
+++ b/lib/ssh/src/ssh_channel.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -238,9 +238,19 @@ handle_info(Msg, #state{cm = ConnectionManager, channel_cb = Module,
{noreply, State#state{channel_state = ChannelState}};
{ok, ChannelState, Timeout} ->
{noreply, State#state{channel_state = ChannelState}, Timeout};
+ {stop, Reason, ChannelState} when is_atom(Reason)->
+ {stop, Reason, State#state{close_sent = true,
+ channel_state = ChannelState}};
{stop, ChannelId, ChannelState} ->
- ssh_connection:close(ConnectionManager, ChannelId),
- {stop, normal, State#state{close_sent = true,
+ Reason =
+ case Msg of
+ {'EXIT', _Pid, shutdown} ->
+ shutdown;
+ _ ->
+ normal
+ end,
+ (catch ssh_connection:close(ConnectionManager, ChannelId)),
+ {stop, Reason, State#state{close_sent = true,
channel_state = ChannelState}}
end.
diff --git a/lib/ssh/src/ssh_cli.erl b/lib/ssh/src/ssh_cli.erl
index 964f35121a..2764ea2e43 100644
--- a/lib/ssh/src/ssh_cli.erl
+++ b/lib/ssh/src/ssh_cli.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -428,7 +428,7 @@ start_shell(ConnectionManager, State) ->
{ok, User} =
ssh_userreg:lookup_user(ConnectionManager),
{ok, PeerAddr} =
- ssh_cm:get_peer_addr(ConnectionManager),
+ ssh_connection_manager:peer_addr(ConnectionManager),
fun() -> Shell(User, PeerAddr) end;
_ ->
Shell
diff --git a/lib/ssh/src/ssh_connect.hrl b/lib/ssh/src/ssh_connect.hrl
index a5a4e42cd8..34d4ff8fc1 100755
--- a/lib/ssh/src/ssh_connect.hrl
+++ b/lib/ssh/src/ssh_connect.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -260,5 +260,6 @@
address,
port,
options,
- exec
+ exec,
+ sub_system_supervisor
}).
diff --git a/lib/ssh/src/ssh_connection.erl b/lib/ssh/src/ssh_connection.erl
index 0aaf1c18d2..7b9e9185bf 100644
--- a/lib/ssh/src/ssh_connection.erl
+++ b/lib/ssh/src/ssh_connection.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -123,6 +123,8 @@ send(ConnectionManager, ChannelId, Data) ->
send(ConnectionManager, ChannelId, 0, Data, infinity).
send(ConnectionManager, ChannelId, Data, TimeOut) when is_integer(TimeOut) ->
send(ConnectionManager, ChannelId, 0, Data, TimeOut);
+send(ConnectionManager, ChannelId, Data, infinity) ->
+ send(ConnectionManager, ChannelId, 0, Data, infinity);
send(ConnectionManager, ChannelId, Type, Data) ->
send(ConnectionManager, ChannelId, Type, Data, infinity).
send(ConnectionManager, ChannelId, Type, Data, TimeOut) ->
@@ -944,13 +946,12 @@ encode_ip(Addr) when is_list(Addr) ->
end
end.
-start_channel(Address, Port, Cb, Id, Args) ->
- start_channel(Address, Port, Cb, Id, Args, undefined).
+start_channel(Cb, Id, Args, SubSysSup) ->
+ start_channel(Cb, Id, Args, SubSysSup, undefined).
-start_channel(Address, Port, Cb, Id, Args, Exec) ->
+start_channel(Cb, Id, Args, SubSysSup, Exec) ->
ChildSpec = child_spec(Cb, Id, Args, Exec),
- SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- ChannelSup = ssh_system_sup:channel_supervisor(SystemSup),
+ ChannelSup =ssh_subsystem_sup:channel_supervisor(SubSysSup),
ssh_channel_sup:start_child(ChannelSup, ChildSpec).
%%--------------------------------------------------------------------
@@ -1015,18 +1016,19 @@ start_cli(#connection{address = Address, port = Port, cli_spec = {Fun, [Shell]},
{ok, Pid}
end;
-start_cli(#connection{address = Address, port = Port,
- cli_spec = {CbModule, Args}, exec = Exec}, ChannelId) ->
- start_channel(Address, Port, CbModule, ChannelId, Args, Exec).
+start_cli(#connection{cli_spec = {CbModule, Args}, exec = Exec,
+ sub_system_supervisor = SubSysSup}, ChannelId) ->
+ start_channel(CbModule, ChannelId, Args, SubSysSup, Exec).
start_subsytem(BinName, #connection{address = Address, port = Port,
- options = Options},
+ options = Options,
+ sub_system_supervisor = SubSysSup},
#channel{local_id = ChannelId, remote_id = RemoteChannelId},
ReplyMsg) ->
Name = binary_to_list(BinName),
case check_subsystem(Name, Options) of
{Callback, Opts} when is_atom(Callback), Callback =/= none ->
- start_channel(Address, Port, Callback, ChannelId, Opts);
+ start_channel(Callback, ChannelId, Opts, SubSysSup);
{Other, _} when Other =/= none ->
handle_backwards_compatibility(Other, self(),
ChannelId, RemoteChannelId,
diff --git a/lib/ssh/src/ssh_connection_controler.erl b/lib/ssh/src/ssh_connection_controler.erl
index 7960eb11c6..636ecba532 100644
--- a/lib/ssh/src/ssh_connection_controler.erl
+++ b/lib/ssh/src/ssh_connection_controler.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2009-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%%--------------------------------------------------------------------
@@ -99,8 +99,8 @@ terminate(_Reason, #state{}) ->
handle_call({handler, Pid, [Role, Socket, Opts]}, _From, State) ->
{ok, Handler} = ssh_connection_handler:start_link(Role, Pid, Socket, Opts),
{reply, {ok, Handler}, State#state{handler = Handler}};
-handle_call({manager, [server = Role, Socket, Opts]}, _From, State) ->
- {ok, Manager} = ssh_connection_manager:start_link([Role, Socket, Opts]),
+handle_call({manager, [server = Role, Socket, Opts, SubSysSup]}, _From, State) ->
+ {ok, Manager} = ssh_connection_manager:start_link([Role, Socket, Opts, SubSysSup]),
{reply, {ok, Manager}, State#state{manager = Manager}};
handle_call({manager, [client = Role | Opts]}, _From, State) ->
{ok, Manager} = ssh_connection_manager:start_link([Role, Opts]),
diff --git a/lib/ssh/src/ssh_connection_manager.erl b/lib/ssh/src/ssh_connection_manager.erl
index 3863005e74..9e55312e5f 100644
--- a/lib/ssh/src/ssh_connection_manager.erl
+++ b/lib/ssh/src/ssh_connection_manager.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -146,6 +146,8 @@ adjust_window(ConnectionManager, Channel, Bytes) ->
close(ConnectionManager, ChannelId) ->
try call(ConnectionManager, {close, ChannelId}) of
ok ->
+ ok;
+ {error,normal} ->
ok
catch
exit:{noproc, _} ->
@@ -155,6 +157,8 @@ close(ConnectionManager, ChannelId) ->
stop(ConnectionManager) ->
try call(ConnectionManager, stop) of
ok ->
+ ok;
+ {error,normal} ->
ok
catch
exit:{noproc, _} ->
@@ -178,7 +182,7 @@ send_eof(ConnectionManager, ChannelId) ->
%% {stop, Reason}
%% Description: Initiates the server
%%--------------------------------------------------------------------
-init([server, _Socket, Opts]) ->
+init([server, _Socket, Opts, SubSysSup]) ->
process_flag(trap_exit, true),
ssh_bits:install_messages(ssh_connection:messages()),
Cache = ssh_channel:cache_create(),
@@ -187,7 +191,8 @@ init([server, _Socket, Opts]) ->
channel_id_seed = 0,
port_bindings = [],
requests = [],
- channel_pids = []},
+ channel_pids = [],
+ sub_system_supervisor = SubSysSup},
opts = Opts,
connected = false}};
@@ -272,18 +277,18 @@ handle_call({ssh_msg, Pid, Msg}, From,
{stop, normal, State#state{connection_state = Connection}}
catch
exit:{noproc, Reason} ->
- Report = io_lib:format("Connection probably terminated:~n~p~n~p~n",
- [ConnectionMsg, Reason]),
+ Report = io_lib:format("Connection probably terminated:~n~p~n~p~n~p~n",
+ [ConnectionMsg, Reason, erlang:get_stacktrace()]),
error_logger:info_report(Report),
{noreply, State};
error:Error ->
- Report = io_lib:format("Connection message returned:~n~p~n~p~n",
- [ConnectionMsg, Error]),
+ Report = io_lib:format("Connection message returned:~n~p~n~p~n~p~n",
+ [ConnectionMsg, Error, erlang:get_stacktrace()]),
error_logger:info_report(Report),
{noreply, State};
exit:Exit ->
- Report = io_lib:format("Connection message returned:~n~p~n~p~n",
- [ConnectionMsg, Exit]),
+ Report = io_lib:format("Connection message returned:~n~p~n~p~n~p~n",
+ [ConnectionMsg, Exit, erlang:get_stacktrace()]),
error_logger:info_report(Report),
{noreply, State}
end;
@@ -400,7 +405,7 @@ handle_call({close, ChannelId}, _,
end;
handle_call(stop, _, #state{role = _client,
- client = ChannelPid,
+ client = _ChannelPid,
connection = Pid} = State) ->
DisconnectMsg =
#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
@@ -592,7 +597,9 @@ call(Pid, Msg, Timeout) ->
Result
catch
exit:{timeout, _} ->
- {error, timeout}
+ {error, timeout};
+ exit:{normal, _} ->
+ {error, normal}
end.
cast(Pid, Msg) ->
diff --git a/lib/ssh/src/ssh_sftpd.erl b/lib/ssh/src/ssh_sftpd.erl
index 10b8ede1e4..da91817fd7 100644
--- a/lib/ssh/src/ssh_sftpd.erl
+++ b/lib/ssh/src/ssh_sftpd.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -242,7 +242,8 @@ handle_op(?SSH_FXP_REALPATH, ReqId,
end;
handle_op(?SSH_FXP_OPENDIR, ReqId,
<<?UINT32(RLen), RPath:RLen/binary>>,
- State0 = #state{file_handler = FileMod, file_state = FS0}) ->
+ State0 = #state{xf = #ssh_xfer{vsn = Vsn},
+ file_handler = FileMod, file_state = FS0}) ->
RelPath = binary_to_list(RPath),
AbsPath = relate_file_name(RelPath, State0),
@@ -250,10 +251,14 @@ handle_op(?SSH_FXP_OPENDIR, ReqId,
{IsDir, FS1} = FileMod:is_dir(AbsPath, FS0),
State1 = State0#state{file_state = FS1},
case IsDir of
- false ->
+ false when Vsn > 5 ->
ssh_xfer:xf_send_status(XF, ReqId, ?SSH_FX_NOT_A_DIRECTORY,
"Not a directory"),
State1;
+ false ->
+ ssh_xfer:xf_send_status(XF, ReqId, ?SSH_FX_FAILURE,
+ "Not a directory"),
+ State1;
true ->
add_handle(State1, XF, ReqId, directory, {RelPath,unread})
end;
diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk
index bc7bceeb24..8e31851a8e 100644
--- a/lib/ssh/vsn.mk
+++ b/lib/ssh/vsn.mk
@@ -1,9 +1,20 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
-SSH_VSN = 1.1.8
+SSH_VSN = 1.1.11
APP_VSN = "ssh-$(SSH_VSN)"
-TICKETS = OTP-8356 \
+TICKETS = OTP-8735
+
+TICKETS_1.1.10 = OTP-8714
+
+TICKETS_1.1.9 = OTP-8524 \
+ OTP-8534 \
+ OTP-8535 \
+ OTP-8550 \
+ OTP-8644 \
+ OTP-8645
+
+TICKETS_1.1.8 = OTP-8356 \
OTP-8401
TICKETS_1.1.7 = OTP-8121 \
diff --git a/lib/ssl/doc/src/new_ssl.xml b/lib/ssl/doc/src/new_ssl.xml
index b642280096..08868a1b3c 100644
--- a/lib/ssl/doc/src/new_ssl.xml
+++ b/lib/ssl/doc/src/new_ssl.xml
@@ -84,8 +84,6 @@
<item>New API functions are
ssl:shutdown/2, ssl:cipher_suites/[0,1] and
ssl:versions/0</item>
- <item>Diffie-Hellman keyexchange is
- not supported yet.</item>
<item>CRL and policy certificate
extensions are not supported yet. </item>
<item>Supported SSL/TLS-versions are SSL-3.0 and TLS-1.0 </item>
@@ -118,8 +116,8 @@
{fail_if_no_peer_cert, boolean()}
{depth, integer()} |
{certfile, path()} | {keyfile, path()} | {password, string()} |
- {cacertfile, path()} | {ciphers, ciphers()} | {ssl_imp, ssl_imp()}
- | {reuse_sessions, boolean()} | {reuse_session, fun()}
+ {cacertfile, path()} | {dhfile, path()} | {ciphers, ciphers()} |
+ {ssl_imp, ssl_imp()} | {reuse_sessions, boolean()} | {reuse_session, fun()}
</c></p>
<p><c>transportoption() = {CallbackModule, DataTag, ClosedTag}
@@ -262,6 +260,12 @@ end
CA certificates (trusted certificates used for verifying a peer
certificate). May be omitted if you do not want to verify
the peer.</item>
+
+ <tag>{dhfile, path()}</tag>
+ <item>Path to file containing PEM encoded Diffie Hellman parameters,
+ for the server to use if a cipher suite using Diffie Hellman key exchange
+ is negotiated. If not specified hardcode parameters will be used.
+ </item>
<tag>{ciphers, ciphers()}</tag>
<item>The function <c>ciphers_suites/0</c> can
@@ -491,6 +495,19 @@ end
</func>
<func>
+ <name>renegotiate(Socket) -> ok | {error, Reason}</name>
+ <fsummary> Initiates a new handshake.</fsummary>
+ <type>
+ <v>Socket = sslsocket()</v>
+ </type>
+ <desc><p>Initiates a new handshake. A notable return value is
+ <c>{error, renegotiation_rejected}</c> indicating that the peer
+ refused to go through with the renegotiation but the connection
+ is still active using the previously negotiated session.</p>
+ </desc>
+ </func>
+
+ <func>
<name>send(Socket, Data) -> ok | {error, Reason}</name>
<fsummary>Write data to a socket.</fsummary>
<type>
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 4265445d23..9d13427677 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -30,6 +30,82 @@
</header>
<p>This document describes the changes made to the SSL application.
</p>
+<section><title>SSL 3.11</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixes handling of the option fail_if_no_peer_cert and
+ some undocumented options. Thanks to Rory Byrne.</p>
+ <p>
+ Own Id: OTP-8557</p>
+ </item>
+ </list>
+ </section>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Support for Diffie-Hellman. ssl-3.11 requires
+ public_key-0.6.</p>
+ <p>
+ Own Id: OTP-7046</p>
+ </item>
+ <item>
+ <p>
+ New ssl now properly handles ssl renegotiation, and
+ initiates a renegotiation if ssl/ltls-sequence numbers
+ comes close to the max value. However RFC-5746 is not yet
+ supported, but will be in an upcoming release.</p>
+ <p>
+ Own Id: OTP-8517</p>
+ </item>
+ <item>
+ <p>
+ When gen_tcp is configured with the {packet,http} option,
+ it automatically switches to expect HTTP Headers after a
+ HTTP Request/Response line has been received. This update
+ fixes ssl to behave in the same way. Thanks to Rory
+ Byrne.</p>
+ <p>
+ Own Id: OTP-8545</p>
+ </item>
+ <item>
+ <p>
+ Ssl now correctly verifies the extended_key_usage
+ extension and also allows the user to verify application
+ specific extensions by supplying an appropriate fun.</p>
+ <p>
+ Own Id: OTP-8554 Aux Id: OTP-8553 </p>
+ </item>
+ <item>
+ <p>
+ Fixed ssl:transport_accept/2 to return properly when
+ socket is closed. Thanks to Rory Byrne.</p>
+ <p>
+ Own Id: OTP-8560</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SSL 3.10.9</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed a crash in the certificate certification part.</p>
+ <p>
+ Own Id: OTP-8510 Aux Id: seq11525 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
<section><title>SSL 3.10.8</title>
diff --git a/lib/ssl/src/ssl.appup.src b/lib/ssl/src/ssl.appup.src
index 3aea2428c9..e8ae6846aa 100644
--- a/lib/ssl/src/ssl.appup.src
+++ b/lib/ssl/src/ssl.appup.src
@@ -8,7 +8,9 @@
{"3.10.4", [{restart_application, ssl}]},
{"3.10.5", [{restart_application, ssl}]},
{"3.10.6", [{restart_application, ssl}]},
- {"3.10.7", [{restart_application, ssl}]}
+ {"3.10.7", [{restart_application, ssl}]},
+ {"3.10.8", [{restart_application, ssl}]},
+ {"3.10.9", [{restart_application, ssl}]}
],
[
{"3.10", [{restart_application, ssl}]},
@@ -18,6 +20,7 @@
{"3.10.4", [{restart_application, ssl}]},
{"3.10.5", [{restart_application, ssl}]},
{"3.10.6", [{restart_application, ssl}]},
- {"3.10.7", [{restart_application, ssl}]}
+ {"3.10.8", [{restart_application, ssl}]},
+ {"3.10.9", [{restart_application, ssl}]}
]}.
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index de74c91505..3cd4c7fdbd 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -29,13 +29,15 @@
connect/3, connect/2, connect/4, connection_info/1,
controlling_process/2, listen/2, pid/1, peername/1, recv/2, recv/3,
send/2, getopts/2, setopts/2, seed/1, sockname/1, peercert/1,
- peercert/2, version/0, versions/0, session_info/1, format_error/1]).
+ peercert/2, version/0, versions/0, session_info/1, format_error/1,
+ renegotiate/1]).
%% Should be deprecated as soon as old ssl is removed
%%-deprecated({pid, 1, next_major_release}).
-include("ssl_int.hrl").
-include("ssl_internal.hrl").
+-include("ssl_record.hrl").
-record(config, {ssl, %% SSL parameters
inet_user, %% User set inet options
@@ -151,18 +153,23 @@ transport_accept(#sslsocket{pid = {ListenSocket, #config{cb=CbInfo, ssl=SslOpts}
%% and options should be inherited.
EmOptions = emulated_options(),
{ok, InetValues} = inet:getopts(ListenSocket, EmOptions),
- {CbModule,_,_} = CbInfo,
- {ok, Socket} = CbModule:accept(ListenSocket, Timeout),
- inet:setopts(Socket, internal_inet_values()),
- {ok, Port} = inet:port(Socket),
- case ssl_connection_sup:start_child([server, "localhost", Port, Socket,
- {SslOpts, socket_options(InetValues)}, self(),
- CbInfo]) of
- {ok, Pid} ->
- CbModule:controlling_process(Socket, Pid),
- {ok, SslSocket#sslsocket{pid = Pid}};
- {error, Reason} ->
- {error, Reason}
+ ok = inet:setopts(ListenSocket, internal_inet_values()),
+ {CbModule,_,_} = CbInfo,
+ case CbModule:accept(ListenSocket, Timeout) of
+ {ok, Socket} ->
+ ok = inet:setopts(ListenSocket, InetValues),
+ {ok, Port} = inet:port(Socket),
+ ConnArgs = [server, "localhost", Port, Socket,
+ {SslOpts, socket_options(InetValues)}, self(), CbInfo],
+ case ssl_connection_sup:start_child(ConnArgs) of
+ {ok, Pid} ->
+ CbModule:controlling_process(Socket, Pid),
+ {ok, SslSocket#sslsocket{pid = Pid}};
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ {error, Reason} ->
+ {error, Reason}
end;
transport_accept(#sslsocket{} = ListenSocket, Timeout) ->
@@ -436,6 +443,10 @@ versions() ->
AvailableVsns = ?DEFAULT_SUPPORTED_VERSIONS,
[{ssl_app, ?VSN}, {supported, SupportedVsns}, {available, AvailableVsns}].
+
+renegotiate(#sslsocket{pid = Pid, fd = new_ssl}) ->
+ ssl_connection:renegotiation(Pid).
+
%%%--------------------------------------------------------------
%%% Internal functions
%%%--------------------------------------------------------------------
@@ -509,6 +520,9 @@ handle_options(Opts0, Role) ->
end
end,
+ UserFailIfNoPeerCert = validate_option(fail_if_no_peer_cert,
+ proplists:get_value(fail_if_no_peer_cert, Opts, false)),
+
{Verify, FailIfNoPeerCert, CaCertDefault} =
%% Handle 0, 1, 2 for backwards compatibility
case proplists:get_value(verify, Opts, verify_none) of
@@ -521,9 +535,7 @@ handle_options(Opts0, Role) ->
verify_none ->
{verify_none, false, ca_cert_default(verify_none, Role)};
verify_peer ->
- {verify_peer, proplists:get_value(fail_if_no_peer_cert,
- Opts, false),
- ca_cert_default(verify_peer, Role)};
+ {verify_peer, UserFailIfNoPeerCert, ca_cert_default(verify_peer, Role)};
Value ->
throw({error, {eoptions, {verify, Value}}})
end,
@@ -534,28 +546,31 @@ handle_options(Opts0, Role) ->
versions = handle_option(versions, Opts, []),
verify = validate_option(verify, Verify),
verify_fun = handle_option(verify_fun, Opts, VerifyFun),
- fail_if_no_peer_cert = validate_option(fail_if_no_peer_cert,
- FailIfNoPeerCert),
+ fail_if_no_peer_cert = FailIfNoPeerCert,
verify_client_once = handle_option(verify_client_once, Opts, false),
+ validate_extensions_fun = handle_option(validate_extensions_fun, Opts, undefined),
depth = handle_option(depth, Opts, 1),
certfile = CertFile,
keyfile = handle_option(keyfile, Opts, CertFile),
key = handle_option(key, Opts, undefined),
password = handle_option(password, Opts, ""),
cacertfile = handle_option(cacertfile, Opts, CaCertDefault),
+ dhfile = handle_option(dhfile, Opts, undefined),
ciphers = handle_option(ciphers, Opts, []),
%% Server side option
reuse_session = handle_option(reuse_session, Opts, ReuseSessionFun),
reuse_sessions = handle_option(reuse_sessions, Opts, true),
+ renegotiate_at = handle_option(renegotiate_at, Opts, ?DEFAULT_RENEGOTIATE_AT),
debug = handle_option(debug, Opts, [])
},
CbInfo = proplists:get_value(cb_info, Opts, {gen_tcp, tcp, tcp_closed}),
- SslOptions = [versions, verify, verify_fun,
+ SslOptions = [versions, verify, verify_fun, validate_extensions_fun,
+ fail_if_no_peer_cert, verify_client_once,
depth, certfile, keyfile,
- key, password, cacertfile, ciphers,
+ key, password, cacertfile, dhfile, ciphers,
debug, reuse_session, reuse_sessions, ssl_imp,
- cd_info],
+ cb_info, renegotiate_at],
SockOpts = lists:foldl(fun(Key, PropList) ->
proplists:delete(Key, PropList)
@@ -585,6 +600,9 @@ validate_option(fail_if_no_peer_cert, Value)
validate_option(verify_client_once, Value)
when Value == true; Value == false ->
Value;
+
+validate_option(validate_extensions_fun, Value) when Value == undefined; is_function(Value) ->
+ Value;
validate_option(depth, Value) when is_integer(Value),
Value >= 0, Value =< 255->
Value;
@@ -605,11 +623,17 @@ validate_option(cacertfile, undefined) ->
"";
validate_option(cacertfile, Value) when is_list(Value), Value =/= "" ->
Value;
+validate_option(dhfile, undefined = Value) ->
+ Value;
+validate_option(dhfile, Value) when is_list(Value), Value =/= "" ->
+ Value;
validate_option(ciphers, Value) when is_list(Value) ->
Version = ssl_record:highest_protocol_version([]),
try cipher_suites(Version, Value)
catch
exit:_ ->
+ throw({error, {eoptions, {ciphers, Value}}});
+ error:_->
throw({error, {eoptions, {ciphers, Value}}})
end;
validate_option(reuse_session, Value) when is_function(Value) ->
@@ -617,6 +641,9 @@ validate_option(reuse_session, Value) when is_function(Value) ->
validate_option(reuse_sessions, Value) when Value == true;
Value == false ->
Value;
+validate_option(renegotiate_at, Value) when is_integer(Value) ->
+ min(Value, ?DEFAULT_RENEGOTIATE_AT);
+
validate_option(debug, Value) when is_list(Value); Value == true ->
Value;
validate_option(Opt, Value) ->
@@ -628,7 +655,7 @@ validate_versions([Version | Rest], Versions) when Version == 'tlsv1.1';
Version == tlsv1;
Version == sslv3 ->
validate_versions(Rest, Versions);
-validate_versions(Ver, Versions) ->
+validate_versions([Ver| _], Versions) ->
throw({error, {eoptions, {Ver, {versions, Versions}}}}).
validate_inet_option(mode, Value)
@@ -832,6 +859,11 @@ version() ->
Vsns
end,
{ok, {SSLVsn, CompVsn, LibVsn}}.
+
+min(N,M) when N < M ->
+ N;
+min(_, M) ->
+ M.
%% Only used to remove exit messages from old ssl
%% First is a nonsense clause to provide some
diff --git a/lib/ssl/src/ssl_certificate.erl b/lib/ssl/src/ssl_certificate.erl
index d97b61a5ce..686e90a70c 100644
--- a/lib/ssl/src/ssl_certificate.erl
+++ b/lib/ssl/src/ssl_certificate.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -29,10 +29,12 @@
-include("ssl_alert.hrl").
-include("ssl_internal.hrl").
-include("ssl_debug.hrl").
+-include_lib("public_key/include/public_key.hrl").
-export([trusted_cert_and_path/3,
certificate_chain/2,
- file_to_certificats/1]).
+ file_to_certificats/1,
+ validate_extensions/6]).
%%====================================================================
%% Internal application API
@@ -87,6 +89,30 @@ file_to_certificats(File) ->
{ok, List} = ssl_manager:cache_pem_file(File),
[Bin || {cert, Bin, not_encrypted} <- List].
+
+%% Validates ssl/tls specific extensions
+validate_extensions([], ValidationState, UnknownExtensions, _, AccErr, _) ->
+ {UnknownExtensions, ValidationState, AccErr};
+
+validate_extensions([#'Extension'{extnID = ?'id-ce-extKeyUsage',
+ extnValue = KeyUse,
+ critical = true} | Rest],
+ ValidationState, UnknownExtensions, Verify, AccErr0, Role) ->
+ case is_valid_extkey_usage(KeyUse, Role) of
+ true ->
+ validate_extensions(Rest, ValidationState, UnknownExtensions,
+ Verify, AccErr0, Role);
+ false ->
+ AccErr =
+ not_valid_extension({bad_cert, invalid_ext_key_usage}, Verify, AccErr0),
+ validate_extensions(Rest, ValidationState, UnknownExtensions, Verify, AccErr, Role)
+ end;
+
+validate_extensions([Extension | Rest], ValidationState, UnknownExtensions,
+ Verify, AccErr, Role) ->
+ validate_extensions(Rest, ValidationState, [Extension | UnknownExtensions],
+ Verify, AccErr, Role).
+
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@@ -154,3 +180,18 @@ not_valid(Alert, true, _) ->
throw(Alert);
not_valid(_, false, {ErlCert, Path}) ->
{ErlCert, Path, [{bad_cert, unknown_ca}]}.
+
+is_valid_extkey_usage(KeyUse, client) ->
+ %% Client wants to verify server
+ is_valid_key_usage(KeyUse,?'id-kp-serverAuth');
+is_valid_extkey_usage(KeyUse, server) ->
+ %% Server wants to verify client
+ is_valid_key_usage(KeyUse, ?'id-kp-clientAuth').
+
+is_valid_key_usage(KeyUse, Use) ->
+ lists:member(Use, KeyUse).
+
+not_valid_extension(Error, true, _) ->
+ throw(Error);
+not_valid_extension(Error, false, AccErrors) ->
+ [Error | AccErrors].
diff --git a/lib/ssl/src/ssl_certificate_db.erl b/lib/ssl/src/ssl_certificate_db.erl
index adae92530a..b8c3c6f6b7 100644
--- a/lib/ssl/src/ssl_certificate_db.erl
+++ b/lib/ssl/src/ssl_certificate_db.erl
@@ -159,6 +159,8 @@ issuer_candidate(no_candidate) ->
case ets:first(Db) of
'$end_of_table' ->
no_more_candidates;
+ {file, _} = Key ->
+ issuer_candidate(Key);
Key ->
[Cert] = lookup(Key, Db),
{Key, Cert}
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 4847fd278d..8ff001b172 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -41,15 +41,14 @@
%% Internal application API
-export([send/2, send/3, recv/3, connect/7, accept/6, close/1, shutdown/2,
new_user/2, get_opts/2, set_opts/2, info/1, session_info/1,
- peer_certificate/1,
- sockname/1, peername/1]).
+ peer_certificate/1, sockname/1, peername/1, renegotiation/1]).
%% Called by ssl_connection_sup
-export([start_link/7]).
%% gen_fsm callbacks
--export([init/1, hello/2, certify/2, cipher/2, connection/2, connection/3, abbreviated/2,
- handle_event/3,
+-export([init/1, hello/2, certify/2, cipher/2, connection/2,
+ abbreviated/2, handle_event/3,
handle_sync_event/4, handle_info/3, terminate/3, code_change/4]).
-record(state, {
@@ -78,17 +77,25 @@
client_certificate_requested = false,
key_algorithm, % atom as defined by cipher_suite
public_key_info, % PKIX: {Algorithm, PublicKey, PublicKeyParams}
- private_key, % PKIX: 'RSAPrivateKey'
- diffie_hellman_params, %
+ private_key, % PKIX: #'RSAPrivateKey'{}
+ diffie_hellman_params, % PKIX: #'DHParameter'{} relevant for server side
+ diffie_hellman_keys, % {PublicKey, PrivateKey}
premaster_secret, %
cert_db_ref, % ets_table()
from, % term(), where to reply
bytes_to_read, % integer(), # bytes to read in passive mode
user_data_buffer, % binary()
%% tls_buffer, % Keeps a lookahead one packet if available
- log_alert % boolan()
+ log_alert, % boolean()
+ renegotiation, % {boolean(), From | internal | peer}
+ recv_during_renegotiation, %boolean()
+ send_queue % queue()
}).
+-define(DEFAULT_DIFFIE_HELLMAN_PARAMS,
+ #'DHParameter'{prime = ?DEFAULT_DIFFIE_HELLMAN_PRIME,
+ base = ?DEFAULT_DIFFIE_HELLMAN_GENERATOR}).
+
%%====================================================================
%% Internal application API
%%====================================================================
@@ -99,15 +106,15 @@
%% Description:
%%--------------------------------------------------------------------
send(Pid, Data) ->
- sync_send_event(Pid, {application_data, erlang:iolist_to_binary(Data)}, infinity).
+ sync_send_all_state_event(Pid, {application_data, erlang:iolist_to_binary(Data)}, infinity).
send(Pid, Data, Timeout) ->
- sync_send_event(Pid, {application_data, erlang:iolist_to_binary(Data)}, Timeout).
+ sync_send_all_state_event(Pid, {application_data, erlang:iolist_to_binary(Data)}, Timeout).
%%--------------------------------------------------------------------
%% Function:
%%
%% Description:
%%--------------------------------------------------------------------
-recv(Pid, Length, Timeout) -> % TODO: Prio with renegotiate?
+recv(Pid, Length, Timeout) ->
sync_send_all_state_event(Pid, {recv, Length}, Timeout).
%%--------------------------------------------------------------------
%% Function:
@@ -209,6 +216,14 @@ session_info(ConnectionPid) ->
peer_certificate(ConnectionPid) ->
sync_send_all_state_event(ConnectionPid, peer_certificate).
+%%--------------------------------------------------------------------
+%% Function:
+%%
+%% Description:
+%%--------------------------------------------------------------------
+renegotiation(ConnectionPid) ->
+ sync_send_all_state_event(ConnectionPid, renegotiate).
+
%%====================================================================
%% ssl_connection_sup API
%%====================================================================
@@ -224,7 +239,6 @@ start_link(Role, Host, Port, Socket, Options, User, CbInfo) ->
gen_fsm:start_link(?MODULE, [Role, Host, Port, Socket, Options,
User, CbInfo], []).
-
%%====================================================================
%% gen_fsm callbacks
%%====================================================================
@@ -243,12 +257,13 @@ init([Role, Host, Port, Socket, {SSLOpts, _} = Options,
Hashes0 = ssl_handshake:init_hashes(),
try ssl_init(SSLOpts, Role) of
- {ok, Ref, CacheRef, OwnCert, Key} ->
+ {ok, Ref, CacheRef, OwnCert, Key, DHParams} ->
State = State0#state{tls_handshake_hashes = Hashes0,
own_cert = OwnCert,
cert_db_ref = Ref,
session_cache = CacheRef,
- private_key = Key},
+ private_key = Key,
+ diffie_hellman_params = DHParams},
{ok, hello, State}
catch
throw:Error ->
@@ -261,18 +276,20 @@ init([Role, Host, Port, Socket, {SSLOpts, _} = Options,
%% {next_state, NextStateName,
%% NextState, Timeout} |
%% {stop, Reason, NewState}
-%% Description:There should be one instance of this function for each possible
-%% state name. Whenever a gen_fsm receives an event sent using
-%% gen_fsm:send_event/2, the instance of this function with the same name as
-%% the current state name StateName is called to handle the event. It is also
-%% called if a timeout occurs.
+%%
+%% Description:There should be one instance of this function for each
+%% possible state name. Whenever a gen_fsm receives an event sent
+%% using gen_fsm:send_event/2, the instance of this function with the
+%% same name as the current state name StateName is called to handle
+%% the event. It is also called if a timeout occurs.
%%--------------------------------------------------------------------
hello(socket_control, #state{host = Host, port = Port, role = client,
ssl_options = SslOpts,
transport_cb = Transport, socket = Socket,
connection_states = ConnectionStates}
= State0) ->
- Hello = ssl_handshake:client_hello(Host, Port, ConnectionStates, SslOpts),
+ Hello = ssl_handshake:client_hello(Host, Port,
+ ConnectionStates, SslOpts),
Version = Hello#client_hello.client_version,
Hashes0 = ssl_handshake:init_hashes(),
{BinMsg, CS2, Hashes1} =
@@ -289,7 +306,7 @@ hello(socket_control, #state{host = Host, port = Port, role = client,
hello(socket_control, #state{role = server} = State) ->
{next_state, hello, next_record(State)};
-hello(hello, #state{role = client} = State) ->
+hello(#hello_request{}, #state{role = client} = State) ->
{next_state, hello, State};
hello(#server_hello{cipher_suite = CipherSuite,
@@ -301,13 +318,14 @@ hello(#server_hello{cipher_suite = CipherSuite,
host = Host, port = Port,
session_cache = Cache,
session_cache_cb = CacheCb} = State0) ->
+
{Version, NewId, ConnectionStates1} =
ssl_handshake:hello(Hello, ConnectionStates0),
{KeyAlgorithm, _, _, _} =
ssl_cipher:suite_definition(CipherSuite),
- PremasterSecret = make_premaster_secret(ReqVersion),
+ PremasterSecret = make_premaster_secret(ReqVersion, KeyAlgorithm),
State = State0#state{key_algorithm = KeyAlgorithm,
negotiated_version = Version,
@@ -358,47 +376,45 @@ hello(Hello = #client_hello{client_version = ClientVersion},
abbreviated(socket_control, #state{role = server} = State) ->
{next_state, abbreviated, State};
-abbreviated(hello, State) ->
+abbreviated(#hello_request{}, State) ->
{next_state, certify, State};
abbreviated(Finished = #finished{},
#state{role = server,
negotiated_version = Version,
tls_handshake_hashes = Hashes,
- session = #session{master_secret = MasterSecret},
- from = From} = State) ->
+ session = #session{master_secret = MasterSecret}} =
+ State0) ->
case ssl_handshake:verify_connection(Version, Finished, client,
MasterSecret, Hashes) of
verified ->
- gen_fsm:reply(From, connected),
- {next_state, connection, next_record_if_active(State)};
- #alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ State = ack_connection(State0),
+ next_state_connection(State);
+ #alert{} = Alert ->
+ handle_own_alert(Alert, Version, abbreviated, State0),
+ {stop, normal, State0}
end;
abbreviated(Finished = #finished{},
#state{role = client, tls_handshake_hashes = Hashes0,
session = #session{master_secret = MasterSecret},
- from = From,
- negotiated_version = Version} = State) ->
+ negotiated_version = Version} = State0) ->
case ssl_handshake:verify_connection(Version, Finished, server,
MasterSecret, Hashes0) of
verified ->
- {ConnectionStates, Hashes} = finalize_client_handshake(State),
- gen_fsm:reply(From, connected),
- {next_state, connection,
- next_record_if_active(State#state{tls_handshake_hashes = Hashes,
- connection_states =
- ConnectionStates})};
+ {ConnectionStates, Hashes} = finalize_client_handshake(State0),
+ State = ack_connection(State0),
+ next_state_connection(State#state{tls_handshake_hashes = Hashes,
+ connection_states =
+ ConnectionStates});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, abbreviated, State0),
+ {stop, normal, State0}
end.
certify(socket_control, #state{role = server} = State) ->
{next_state, certify, State};
-certify(hello, State) ->
+certify(#hello_request{}, State) ->
{next_state, certify, State};
certify(#certificate{asn1_certificates = []},
@@ -415,52 +431,71 @@ certify(#certificate{asn1_certificates = []},
ssl_options = #ssl_options{verify = verify_peer,
fail_if_no_peer_cert = false}} =
State) ->
- {next_state, certify, next_record(State#state{client_certificate_requested = false})};
+ {next_state, certify,
+ next_record(State#state{client_certificate_requested = false})};
certify(#certificate{} = Cert,
- #state{session = Session,
- negotiated_version = Version,
+ #state{negotiated_version = Version,
+ role = Role,
cert_db_ref = CertDbRef,
- ssl_options = Opts} = State0) ->
+ ssl_options = Opts} = State) ->
case ssl_handshake:certify(Cert, CertDbRef, Opts#ssl_options.depth,
Opts#ssl_options.verify,
- Opts#ssl_options.verify_fun) of
+ Opts#ssl_options.verify_fun,
+ Opts#ssl_options.validate_extensions_fun, Role) of
{PeerCert, PublicKeyInfo} ->
- State = State0#state{session =
- Session#session{peer_certificate = PeerCert},
- public_key_info = PublicKeyInfo,
- client_certificate_requested = false
- },
- {next_state, certify, next_record(State)};
+ handle_peer_cert(PeerCert, PublicKeyInfo,
+ State#state{client_certificate_requested = false});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify_certificate, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify_certificate, State),
+ {stop, normal, State}
end;
certify(#server_key_exchange{} = KeyExchangeMsg,
- #state{role = client,
- key_algorithm = Alg} = State)
- when Alg == dhe_dss; Alg == dhe_rsa; Alg == dh_anon; Alg == krb5 ->
- NewState = handle_server_key(KeyExchangeMsg, State),
- {next_state, certify, NewState};
+ #state{role = client, negotiated_version = Version,
+ key_algorithm = Alg} = State0)
+ when Alg == dhe_dss; Alg == dhe_rsa ->%%Not imp:Alg == dh_anon;Alg == krb5 ->
+ case handle_server_key(KeyExchangeMsg, State0) of
+ #state{} = State ->
+ {next_state, certify, next_record(State)};
+ #alert{} = Alert ->
+ handle_own_alert(Alert, Version, certify_server_keyexchange,
+ State0),
+ {stop, normal, State0}
+ end;
certify(#server_key_exchange{},
State = #state{role = client, negotiated_version = Version,
key_algorithm = Alg})
- when Alg == rsa; Alg == dh_dss; Alg == dh_rsa ->
+ when Alg == rsa; Alg == dh_dss; Alg == dh_rsa ->
Alert = ?ALERT_REC(?FATAL, ?UNEXPECTED_MESSAGE),
handle_own_alert(Alert, Version, certify_server_key_exchange, State),
{stop, normal, State};
-certify(KeyExchangeMsg = #server_key_exchange{}, State =
- #state{role = server}) ->
- NewState = handle_clinet_key(KeyExchangeMsg, State),
- {next_state, cipher, NewState};
-
certify(#certificate_request{}, State) ->
NewState = State#state{client_certificate_requested = true},
{next_state, certify, next_record(NewState)};
+
+%% Master secret was determined with help of server-key exchange msg
+certify(#server_hello_done{},
+ #state{session = #session{master_secret = MasterSecret} = Session,
+ connection_states = ConnectionStates0,
+ negotiated_version = Version,
+ premaster_secret = undefined,
+ role = client} = State0) ->
+ case ssl_handshake:master_secret(Version, Session,
+ ConnectionStates0, client) of
+ {MasterSecret, ConnectionStates1} ->
+ State = State0#state{connection_states = ConnectionStates1},
+ client_certify_and_key_exchange(State);
+ #alert{} = Alert ->
+ handle_own_alert(Alert, Version,
+ certify_server_hello_done, State0),
+ {stop, normal, State0}
+ end;
+
+%% Master secret is calculated from premaster_secret
certify(#server_hello_done{},
#state{session = Session0,
connection_states = ConnectionStates0,
@@ -487,7 +522,8 @@ certify(#client_key_exchange{},
negotiated_version = Version}) ->
%% We expect a certificate here
Alert = ?ALERT_REC(?FATAL, ?UNEXPECTED_MESSAGE),
- handle_own_alert(Alert, Version, certify_server_waiting_certificate, State),
+ handle_own_alert(Alert, Version,
+ certify_server_waiting_certificate, State),
{stop, normal, State};
@@ -517,11 +553,41 @@ certify(#client_key_exchange{exchange_keys
handle_own_alert(Alert, Version, certify_client_key_exchange,
State0),
{stop, normal, State0}
+ end;
+
+certify(#client_key_exchange{exchange_keys = #client_diffie_hellman_public{
+ dh_public = ClientPublicDhKey}},
+ #state{negotiated_version = Version,
+ diffie_hellman_params = #'DHParameter'{prime = P,
+ base = G},
+ diffie_hellman_keys = {_, ServerDhPrivateKey},
+ role = Role,
+ session = Session,
+ connection_states = ConnectionStates0} = State0) ->
+
+ PMpint = crypto:mpint(P),
+ GMpint = crypto:mpint(G),
+ PremasterSecret = crypto:dh_compute_key(mpint_binary(ClientPublicDhKey),
+ ServerDhPrivateKey,
+ [PMpint, GMpint]),
+
+ case ssl_handshake:master_secret(Version, PremasterSecret,
+ ConnectionStates0, Role) of
+ {MasterSecret, ConnectionStates} ->
+ State = State0#state{session =
+ Session#session{master_secret
+ = MasterSecret},
+ connection_states = ConnectionStates},
+ {next_state, cipher, next_record(State)};
+ #alert{} = Alert ->
+ handle_own_alert(Alert, Version,
+ certify_client_key_exchange, State0),
+ {stop, normal, State0}
end.
cipher(socket_control, #state{role = server} = State) ->
{next_state, cipher, State};
-cipher(hello, State) ->
+cipher(#hello_request{}, State) ->
{next_state, cipher, State};
cipher(#certificate_verify{signature = Signature},
@@ -543,43 +609,41 @@ cipher(#certificate_verify{signature = Signature},
end;
cipher(#finished{} = Finished,
- State = #state{from = From,
- negotiated_version = Version,
- host = Host,
- port = Port,
- role = Role,
- session = #session{master_secret = MasterSecret}
- = Session0,
- tls_handshake_hashes = Hashes}) ->
-
+ #state{negotiated_version = Version,
+ host = Host,
+ port = Port,
+ role = Role,
+ session = #session{master_secret = MasterSecret}
+ = Session0,
+ tls_handshake_hashes = Hashes} = State0) ->
+
case ssl_handshake:verify_connection(Version, Finished,
opposite_role(Role),
MasterSecret, Hashes) of
verified ->
- gen_fsm:reply(From, connected),
+ State = ack_connection(State0),
Session = register_session(Role, Host, Port, Session0),
case Role of
client ->
- {next_state, connection,
- next_record_if_active(State#state{session = Session})};
+ next_state_connection(State#state{session = Session});
server ->
{NewConnectionStates, NewHashes} =
finalize_server_handshake(State#state{
session = Session}),
- NewState =
- State#state{connection_states = NewConnectionStates,
- session = Session,
- tls_handshake_hashes = NewHashes},
- {next_state, connection, next_record_if_active(NewState)}
+ next_state_connection(State#state{connection_states =
+ NewConnectionStates,
+ session = Session,
+ tls_handshake_hashes =
+ NewHashes})
end;
#alert{} = Alert ->
- handle_own_alert(Alert, Version, cipher, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, cipher, State0),
+ {stop, normal, State0}
end.
connection(socket_control, #state{role = server} = State) ->
{next_state, connection, State};
-connection(hello, State = #state{host = Host, port = Port,
+connection(#hello_request{}, State = #state{host = Host, port = Port,
socket = Socket,
ssl_options = SslOpts,
negotiated_version = Version,
@@ -592,42 +656,11 @@ connection(hello, State = #state{host = Host, port = Port,
{BinMsg, ConnectionStates1, Hashes1} =
encode_handshake(Hello, Version, ConnectionStates0, Hashes0),
Transport:send(Socket, BinMsg),
- {next_state, hello, State#state{connection_states = ConnectionStates1,
- tls_handshake_hashes = Hashes1}}.
-
-%%--------------------------------------------------------------------
-%% Function:
-%% state_name(Event, From, State) -> {next_state, NextStateName, NextState} |
-%% {next_state, NextStateName,
-%% NextState, Timeout} |
-%% {reply, Reply, NextStateName, NextState}|
-%% {reply, Reply, NextStateName,
-%% NextState, Timeout} |
-%% {stop, Reason, NewState}|
-%% {stop, Reason, Reply, NewState}
-%% Description: There should be one instance of this function for each
-%% possible state name. Whenever a gen_fsm receives an event sent using
-%% gen_fsm:sync_send_event/2,3, the instance of this function with the same
-%% name as the current state name StateName is called to handle the event.
-%%--------------------------------------------------------------------
-connection({application_data, Data0}, _From,
- State = #state{socket = Socket,
- negotiated_version = Version,
- transport_cb = Transport,
- connection_states = ConnectionStates0}) ->
- %% We should look into having a worker process to do this to
- %% parallize send and receive decoding and not block the receiver
- %% if sending is overloading the socket.
- try
- Data = encode_packet(Data0, State#state.socket_options),
- {Msgs, ConnectionStates1} = encode_data(Data, Version, ConnectionStates0),
- Result = Transport:send(Socket, Msgs),
- {reply, Result,
- connection, State#state{connection_states = ConnectionStates1}}
-
- catch throw:Error ->
- {reply, Error, connection, State}
- end.
+ {next_state, hello, next_record(State#state{connection_states =
+ ConnectionStates1,
+ tls_handshake_hashes = Hashes1})};
+connection(#client_hello{} = Hello, #state{role = server} = State) ->
+ hello(Hello, State).
%%--------------------------------------------------------------------
%% Function:
@@ -642,22 +675,37 @@ connection({application_data, Data0}, _From,
%%--------------------------------------------------------------------
handle_event(#ssl_tls{type = ?HANDSHAKE, fragment = Data},
StateName,
- State = #state{key_algorithm = KeyAlg,
- tls_handshake_buffer = Buf0,
- negotiated_version = Version}) ->
+ State0 = #state{key_algorithm = KeyAlg,
+ tls_handshake_buffer = Buf0,
+ negotiated_version = Version}) ->
Handle =
- fun({Packet, Raw}, {next_state, SName, AS=#state{tls_handshake_hashes=Hs0}}) ->
+ fun({#hello_request{} = Packet, _}, {next_state, connection = SName, State}) ->
+ %% This message should not be included in handshake
+ %% message hashes. Starts new handshake (renegotiation)
+ Hs0 = ssl_handshake:init_hashes(),
+ ?MODULE:SName(Packet, State#state{tls_handshake_hashes=Hs0,
+ renegotiation = {true, peer}});
+ ({#hello_request{} = Packet, _}, {next_state, SName, State}) ->
+ %% This message should not be included in handshake
+ %% message hashes. Already in negotiation so it will be ignored!
+ ?MODULE:SName(Packet, State);
+ ({#client_hello{} = Packet, Raw}, {next_state, connection = SName, State}) ->
+ Hs0 = ssl_handshake:init_hashes(),
+ Hs1 = ssl_handshake:update_hashes(Hs0, Raw),
+ ?MODULE:SName(Packet, State#state{tls_handshake_hashes=Hs1,
+ renegotiation = {true, peer}});
+ ({Packet, Raw}, {next_state, SName, State = #state{tls_handshake_hashes=Hs0}}) ->
Hs1 = ssl_handshake:update_hashes(Hs0, Raw),
- ?MODULE:SName(Packet, AS#state{tls_handshake_hashes=Hs1});
+ ?MODULE:SName(Packet, State#state{tls_handshake_hashes=Hs1});
(_, StopState) -> StopState
end,
try
{Packets, Buf} = ssl_handshake:get_tls_handshake(Data,Buf0, KeyAlg,Version),
- Start = {next_state, StateName, State#state{tls_handshake_buffer = Buf}},
+ Start = {next_state, StateName, State0#state{tls_handshake_buffer = Buf}},
lists:foldl(Handle, Start, Packets)
catch throw:#alert{} = Alert ->
- handle_own_alert(Alert, Version, StateName, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, StateName, State0),
+ {stop, normal, State0}
end;
handle_event(#ssl_tls{type = ?APPLICATION_DATA, fragment = Data},
@@ -686,7 +734,8 @@ handle_event(#ssl_tls{type = ?ALERT, fragment = Data}, StateName, State) ->
{next_state, StateName, State};
handle_event(#alert{level = ?FATAL} = Alert, connection,
- #state{from = From, user_application = {_Mon, Pid}, log_alert = Log,
+ #state{from = From, user_application = {_Mon, Pid},
+ log_alert = Log,
host = Host, port = Port, session = Session,
role = Role, socket_options = Opts} = State) ->
invalidate_session(Role, Host, Port, Session),
@@ -712,11 +761,21 @@ handle_event(#alert{level = ?WARNING, description = ?CLOSE_NOTIFY} = Alert,
_, #state{from = From, role = Role} = State) ->
alert_user(From, Alert, Role),
{stop, normal, State};
-handle_event(#alert{level = ?WARNING} = Alert, StateName,
+
+handle_event(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
+ #state{log_alert = Log, renegotiation = {true, internal}} = State) ->
+ log_alert(Log, StateName, Alert),
+ {stop, normal, State};
+
+handle_event(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
+ #state{log_alert = Log, renegotiation = {true, From}} = State) ->
+ log_alert(Log, StateName, Alert),
+ gen_fsm:reply(From, {error, renegotiation_rejected}),
+ {next_state, connection, next_record(State)};
+
+handle_event(#alert{level = ?WARNING, description = ?USER_CANCELED} = Alert, StateName,
#state{log_alert = Log} = State) ->
log_alert(Log, StateName, Alert),
-%%TODO: Could be user_canceled or no_negotiation should the latter be
- %% treated as fatal?!
{next_state, StateName, next_record(State)}.
%%--------------------------------------------------------------------
@@ -734,6 +793,43 @@ handle_event(#alert{level = ?WARNING} = Alert, StateName,
%% gen_fsm:sync_send_all_state_event/2,3, this function is called to handle
%% the event.
%%--------------------------------------------------------------------
+handle_sync_event({application_data, Data0}, From, connection,
+ #state{socket = Socket,
+ negotiated_version = Version,
+ transport_cb = Transport,
+ connection_states = ConnectionStates0,
+ send_queue = SendQueue,
+ socket_options = SockOpts,
+ ssl_options = #ssl_options{renegotiate_at = RenegotiateAt}}
+ = State) ->
+ %% We should look into having a worker process to do this to
+ %% parallize send and receive decoding and not block the receiver
+ %% if sending is overloading the socket.
+ try
+ Data = encode_packet(Data0, SockOpts),
+ case encode_data(Data, Version, ConnectionStates0, RenegotiateAt) of
+ {Msgs, [], ConnectionStates} ->
+ Result = Transport:send(Socket, Msgs),
+ {reply, Result,
+ connection, State#state{connection_states = ConnectionStates}};
+ {Msgs, RestData, ConnectionStates} ->
+ if
+ Msgs =/= [] ->
+ Transport:send(Socket, Msgs);
+ true ->
+ ok
+ end,
+ renegotiate(State#state{connection_states = ConnectionStates,
+ send_queue = queue:in_r({From, RestData}, SendQueue),
+ renegotiation = {true, internal}})
+ end
+ catch throw:Error ->
+ {reply, Error, connection, State}
+ end;
+handle_sync_event({application_data, Data}, From, StateName,
+ #state{send_queue = Queue} = State) ->
+ %% In renegotiation priorities handshake, send data when handshake is finished
+ {next_state, StateName, State#state{send_queue = queue:in({From, Data}, Queue)}};
handle_sync_event(started, From, StateName, State) ->
{next_state, StateName, State#state{from = From}};
@@ -750,23 +846,14 @@ handle_sync_event({shutdown, How}, From, StateName,
{stop, normal, Error, State#state{from = From}}
end;
-%% TODO: men vad g�r next_record om det �r t.ex. renegotiate? kanske
-%% inte bra... t�l att t�nkas p�!
-handle_sync_event({recv, N}, From, StateName,
- State0 = #state{user_data_buffer = Buffer}) ->
- State1 = State0#state{bytes_to_read = N, from = From},
- case Buffer of
- <<>> ->
- State = next_record(State1),
- {next_state, StateName, State};
- _ ->
- case application_data(<<>>, State1) of
- Stop = {stop, _, _} ->
- Stop;
- State ->
- {next_state, StateName, State}
- end
- end;
+handle_sync_event({recv, N}, From, connection = StateName, State0) ->
+ passive_receive(State0#state{bytes_to_read = N, from = From}, StateName);
+
+%% Doing renegotiate wait with handling request until renegotiate is
+%% finished. Will be handled by next_state_connection/1.
+handle_sync_event({recv, N}, From, StateName, State) ->
+ {next_state, StateName, State#state{bytes_to_read = N, from = From,
+ recv_during_renegotiation = true}};
handle_sync_event({new_user, User}, _From, StateName,
State =#state{user_application = {OldMon, _}}) ->
@@ -814,6 +901,12 @@ handle_sync_event({set_opts, Opts0}, _From, StateName,
end
end;
+handle_sync_event(renegotiate, From, connection, State) ->
+ renegotiate(State#state{renegotiation = {true, From}});
+
+handle_sync_event(renegotiate, _, StateName, State) ->
+ {reply, {error, already_renegotiating}, StateName, State};
+
handle_sync_event(info, _, StateName,
#state{negotiated_version = Version,
session = #session{cipher_suite = Suite}} = State) ->
@@ -834,7 +927,6 @@ handle_sync_event(peer_certificate, _, StateName,
= State) ->
{reply, {ok, Cert}, StateName, State}.
-
%%--------------------------------------------------------------------
%% Function:
%% handle_info(Info,StateName,State)-> {next_state, NextStateName, NextState}|
@@ -863,34 +955,6 @@ handle_info({Protocol, _, Data}, StateName, State =
{stop, normal, State}
end;
-%% %% This is the code for {packet,ssl} removed because it was slower
-%% %% than handling it in erlang.
-%% handle_info(Data = #ssl_tls{}, StateName,
-%% State = #state{tls_buffer = Buffer,
-%% socket = Socket,
-%% connection_states = ConnectionStates0}) ->
-%% case Buffer of
-%% buffer ->
-%% {next_state, StateName, State#state{tls_buffer = [Data]}};
-%% continue ->
-%% inet:setopts(Socket, [{active,once}]),
-%% {Plain, ConnectionStates} =
-%% ssl_record:decode_cipher_text(Data, ConnectionStates0),
-%% gen_fsm:send_all_state_event(self(), Plain),
-%% {next_state, StateName,
-%% State#state{tls_buffer = buffer,
-%% connection_states = ConnectionStates}};
-%% List when is_list(List) ->
-%% {next_state, StateName,
-%% State#state{tls_buffer = Buffer ++ [Data]}}
-%% end;
-
-%% handle_info(CloseMsg = {_, Socket}, StateName0,
-%% #state{socket = Socket,tls_buffer = [Msg]} = State0) ->
-%% %% Hmm we have a ssl_tls msg buffered, handle that first
-%% %% and it proberbly is a close alert
-%% {next_state, StateName0, State0#state{tls_buffer=[Msg,{ssl_close,CloseMsg}]}};
-
handle_info({CloseTag, Socket}, _StateName,
#state{socket = Socket, close_tag = CloseTag,
negotiated_version = Version, host = Host,
@@ -924,17 +988,23 @@ handle_info(A, StateName, State) ->
%% necessary cleaning up. When it returns, the gen_fsm terminates with
%% Reason. The return value is ignored.
%%--------------------------------------------------------------------
-terminate(_Reason, connection, _S=#state{negotiated_version = Version,
+terminate(_Reason, connection, #state{negotiated_version = Version,
connection_states = ConnectionStates,
transport_cb = Transport,
- socket = Socket}) ->
+ socket = Socket, send_queue = SendQueue,
+ renegotiation = Renegotiate}) ->
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate),
{BinAlert, _} = encode_alert(?ALERT_REC(?WARNING,?CLOSE_NOTIFY),
Version, ConnectionStates),
Transport:send(Socket, BinAlert),
Transport:close(Socket);
-terminate(_Reason, _StateName, _S=#state{transport_cb = Transport, socket = Socket}) ->
- Transport:close(Socket),
- ok.
+terminate(_Reason, _StateName, #state{transport_cb = Transport,
+ socket = Socket, send_queue = SendQueue,
+ renegotiation = Renegotiate}) ->
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate),
+ Transport:close(Socket).
%%--------------------------------------------------------------------
%% Function:
@@ -969,8 +1039,8 @@ ssl_init(SslOpts, Role) ->
PrivateKey =
init_private_key(SslOpts#ssl_options.key, SslOpts#ssl_options.keyfile,
SslOpts#ssl_options.password, Role),
- ?DBG_TERM(PrivateKey),
- {ok, CertDbRef, CacheRef, OwnCert, PrivateKey}.
+ DHParams = init_diffie_hellman(SslOpts#ssl_options.dhfile, Role),
+ {ok, CertDbRef, CacheRef, OwnCert, PrivateKey, DHParams}.
init_certificates(#ssl_options{cacertfile = CACertFile,
certfile = CertFile}, Role) ->
@@ -1005,12 +1075,14 @@ init_certificates(CertDbRef, CacheRef, CertFile, server) ->
catch
_E:{badmatch, _R={error,_}} ->
Report = io_lib:format("SSL: ~p: ~p:~p ~s~n ~p~n",
- [?LINE, _E,_R, CertFile, erlang:get_stacktrace()]),
+ [?LINE, _E,_R, CertFile,
+ erlang:get_stacktrace()]),
error_logger:error_report(Report),
throw(ecertfile);
_E:_R ->
Report = io_lib:format("SSL: ~p: ~p:~p ~s~n ~p~n",
- [?LINE, _E,_R, CertFile, erlang:get_stacktrace()]),
+ [?LINE, _E,_R, CertFile,
+ erlang:get_stacktrace()]),
error_logger:error_report(Report),
throw(ecertfile)
end.
@@ -1021,40 +1093,43 @@ init_private_key(undefined, KeyFile, Password, _) ->
try
{ok, List} = ssl_manager:cache_pem_file(KeyFile),
[Der] = [Der || Der = {PKey, _ , _} <- List,
- PKey =:= rsa_private_key orelse PKey =:= dsa_private_key],
+ PKey =:= rsa_private_key orelse
+ PKey =:= dsa_private_key],
{ok, Decoded} = public_key:decode_private_key(Der,Password),
Decoded
catch
_E:{badmatch, _R={error,_}} ->
Report = io_lib:format("SSL: ~p: ~p:~p ~s~n ~p~n",
- [?LINE, _E,_R, KeyFile, erlang:get_stacktrace()]),
+ [?LINE, _E,_R, KeyFile,
+ erlang:get_stacktrace()]),
error_logger:error_report(Report),
throw(ekeyfile);
_E:_R ->
Report = io_lib:format("SSL: ~p: ~p:~p ~s~n ~p~n",
- [?LINE, _E,_R, KeyFile, erlang:get_stacktrace()]),
+ [?LINE, _E,_R, KeyFile,
+ erlang:get_stacktrace()]),
error_logger:error_report(Report),
throw(ekeyfile)
end;
init_private_key(PrivateKey, _, _,_) ->
PrivateKey.
-send_event(FsmPid, Event) ->
- gen_fsm:send_event(FsmPid, Event).
-
-sync_send_event(FsmPid, Event, Timeout) ->
- try gen_fsm:sync_send_event(FsmPid, Event, Timeout) of
- Reply ->
- Reply
- catch
- exit:{noproc, _} ->
- {error, closed};
- exit:{timeout, _} ->
- {error, timeout};
- exit:{normal, _} ->
- {error, closed}
+init_diffie_hellman(_, client) ->
+ undefined;
+init_diffie_hellman(undefined, _) ->
+ ?DEFAULT_DIFFIE_HELLMAN_PARAMS;
+init_diffie_hellman(DHParamFile, server) ->
+ {ok, List} = ssl_manager:cache_pem_file(DHParamFile),
+ case [Der || Der = {dh_params, _ , _} <- List] of
+ [Der] ->
+ {ok, Decoded} = public_key:decode_dhparams(Der),
+ Decoded;
+ [] ->
+ ?DEFAULT_DIFFIE_HELLMAN_PARAMS
end.
+send_event(FsmPid, Event) ->
+ gen_fsm:send_event(FsmPid, Event).
send_all_state_event(FsmPid, Event) ->
@@ -1078,6 +1153,16 @@ sync_send_all_state_event(FsmPid, Event, Timeout) ->
alert_event(Alert) ->
send_all_state_event(self(), Alert).
+%% We do currently not support cipher suites that use fixed DH.
+%% If we want to implement that we should add a code
+%% here to extract DH parameters form cert.
+handle_peer_cert(PeerCert, PublicKeyInfo,
+ #state{session = Session} = State0) ->
+ State = State0#state{session =
+ Session#session{peer_certificate = PeerCert},
+ public_key_info = PublicKeyInfo},
+ {next_state, certify, next_record(State)}.
+
certify_client(#state{client_certificate_requested = true, role = client,
connection_states = ConnectionStates0,
transport_cb = Transport,
@@ -1111,9 +1196,8 @@ verify_client_cert(#state{client_certificate_requested = true, role = client,
ignore -> %% No key or cert or fixed_diffie_hellman
State;
Verified ->
- SigAlg = ssl_handshake:sig_alg(KeyAlg),
{BinVerified, ConnectionStates1, Hashes1} =
- encode_handshake(Verified, SigAlg, Version,
+ encode_handshake(Verified, KeyAlg, Version,
ConnectionStates0, Hashes0),
Transport:send(Socket, BinVerified),
State#state{connection_states = ConnectionStates1,
@@ -1140,8 +1224,10 @@ do_server_hello(Type, #state{negotiated_version = Version,
{_, ConnectionStates1} ->
State1 = State#state{connection_states=ConnectionStates1,
session = Session},
- {ConnectionStates, Hashes} = finalize_server_handshake(State1),
- Resumed = State1#state{connection_states = ConnectionStates,
+ {ConnectionStates, Hashes} =
+ finalize_server_handshake(State1),
+ Resumed = State1#state{connection_states =
+ ConnectionStates,
tls_handshake_hashes = Hashes},
{next_state, abbreviated, next_record(Resumed)};
#alert{} = Alert ->
@@ -1252,14 +1338,15 @@ key_exchange(#state{role = server, key_algorithm = Algo} = State)
Algo == dh_rsa ->
State;
-key_exchange(#state{role = server, key_algorithm = rsa_export} = State) ->
+%key_exchange(#state{role = server, key_algorithm = rsa_export} = State) ->
%% TODO when the public key in the server certificate is
%% less than or equal to 512 bits in length dont send key_exchange
%% but do it otherwise
- State;
+% State;
key_exchange(#state{role = server, key_algorithm = Algo,
diffie_hellman_params = Params,
+ private_key = PrivateKey,
connection_states = ConnectionStates0,
negotiated_version = Version,
tls_handshake_hashes = Hashes0,
@@ -1270,26 +1357,28 @@ key_exchange(#state{role = server, key_algorithm = Algo,
Algo == dhe_dss_export;
Algo == dhe_rsa;
Algo == dhe_rsa_export ->
- Msg = ssl_handshake:key_exchange(server, Params),
- {BinMsg, ConnectionStates1, Hashes1} =
- encode_handshake(Msg, Version, ConnectionStates0, Hashes0),
- Transport:send(Socket, BinMsg),
- State#state{connection_states = ConnectionStates1,
- tls_handshake_hashes = Hashes1};
-key_exchange(#state{role = server, key_algorithm = dh_anon,
- connection_states = ConnectionStates0,
- negotiated_version = Version,
- tls_handshake_hashes = Hashes0,
- socket = Socket,
- transport_cb = Transport
- } = State) ->
- Msg = ssl_handshake:key_exchange(server, anonymous),
- {BinMsg, ConnectionStates1, Hashes1} =
+ Keys = public_key:gen_key(Params),
+ ConnectionState =
+ ssl_record:pending_connection_state(ConnectionStates0, read),
+ SecParams = ConnectionState#connection_state.security_parameters,
+ #security_parameters{client_random = ClientRandom,
+ server_random = ServerRandom} = SecParams,
+ Msg = ssl_handshake:key_exchange(server, {dh, Keys, Params,
+ Algo, ClientRandom,
+ ServerRandom,
+ PrivateKey}),
+ {BinMsg, ConnectionStates, Hashes1} =
encode_handshake(Msg, Version, ConnectionStates0, Hashes0),
Transport:send(Socket, BinMsg),
- State#state{connection_states = ConnectionStates1,
+ State#state{connection_states = ConnectionStates,
+ diffie_hellman_keys = Keys,
tls_handshake_hashes = Hashes1};
+
+
+%% key_algorithm = dh_anon is not supported. Should be by default disabled
+%% if support is implemented and then we need a key_exchange clause for it
+%% here.
key_exchange(#state{role = client,
connection_states = ConnectionStates0,
@@ -1309,17 +1398,33 @@ key_exchange(#state{role = client,
key_exchange(#state{role = client,
connection_states = ConnectionStates0,
key_algorithm = Algorithm,
- public_key_info = PublicKeyInfo,
negotiated_version = Version,
- diffie_hellman_params = Params,
- own_cert = Cert,
+ diffie_hellman_keys = {DhPubKey, _},
socket = Socket, transport_cb = Transport,
tls_handshake_hashes = Hashes0} = State)
when Algorithm == dhe_dss;
Algorithm == dhe_dss_export;
Algorithm == dhe_rsa;
Algorithm == dhe_rsa_export ->
- Msg = dh_key_exchange(Cert, Params, PublicKeyInfo),
+ Msg = ssl_handshake:key_exchange(client, {dh, DhPubKey}),
+ {BinMsg, ConnectionStates1, Hashes1} =
+ encode_handshake(Msg, Version, ConnectionStates0, Hashes0),
+ Transport:send(Socket, BinMsg),
+ State#state{connection_states = ConnectionStates1,
+ tls_handshake_hashes = Hashes1};
+
+key_exchange(#state{role = client,
+ connection_states = ConnectionStates0,
+ key_algorithm = Algorithm,
+ negotiated_version = Version,
+ client_certificate_requested = ClientCertReq,
+ own_cert = OwnCert,
+ diffie_hellman_keys = DhKeys,
+ socket = Socket, transport_cb = Transport,
+ tls_handshake_hashes = Hashes0} = State)
+ when Algorithm == dh_dss;
+ Algorithm == dh_rsa ->
+ Msg = dh_key_exchange(OwnCert, DhKeys, ClientCertReq),
{BinMsg, ConnectionStates1, Hashes1} =
encode_handshake(Msg, Version, ConnectionStates0, Hashes0),
Transport:send(Socket, BinMsg),
@@ -1334,17 +1439,19 @@ rsa_key_exchange(PremasterSecret, PublicKeyInfo = {Algorithm, _, _})
ssl_handshake:key_exchange(client,
{premaster_secret, PremasterSecret,
PublicKeyInfo});
-
rsa_key_exchange(_, _) ->
throw (?ALERT_REC(?FATAL,?HANDSHAKE_FAILURE)).
-dh_key_exchange(OwnCert, Params, PublicKeyInfo) ->
+dh_key_exchange(OwnCert, DhKeys, true) ->
case public_key:pkix_is_fixed_dh_cert(OwnCert) of
true ->
ssl_handshake:key_exchange(client, fixed_diffie_hellman);
false ->
- ssl_handshake:key_exchange(client, {dh, Params, PublicKeyInfo})
- end.
+ {DhPubKey, _} = DhKeys,
+ ssl_handshake:key_exchange(client, {dh, DhPubKey})
+ end;
+dh_key_exchange(_, {DhPubKey, _}, false) ->
+ ssl_handshake:key_exchange(client, {dh, DhPubKey}).
request_client_cert(#state{ssl_options = #ssl_options{verify = verify_peer},
connection_states = ConnectionStates0,
@@ -1378,7 +1485,8 @@ finalize_client_handshake(#state{connection_states = ConnectionStates0}
finalize_server_handshake(State) ->
ConnectionStates0 = cipher_protocol(State),
ConnectionStates =
- ssl_record:activate_pending_connection_state(ConnectionStates0, write),
+ ssl_record:activate_pending_connection_state(ConnectionStates0,
+ write),
finished(State#state{connection_states = ConnectionStates}).
cipher_protocol(#state{connection_states = ConnectionStates,
@@ -1386,7 +1494,8 @@ cipher_protocol(#state{connection_states = ConnectionStates,
negotiated_version = Version,
transport_cb = Transport}) ->
{BinChangeCipher, NewConnectionStates} =
- encode_change_cipher(#change_cipher_spec{}, Version, ConnectionStates),
+ encode_change_cipher(#change_cipher_spec{},
+ Version, ConnectionStates),
Transport:send(Socket, BinChangeCipher),
NewConnectionStates.
@@ -1402,10 +1511,66 @@ finished(#state{role = Role, socket = Socket, negotiated_version = Version,
Transport:send(Socket, BinFinished),
{NewConnectionStates, NewHashes}.
-handle_server_key(_KeyExchangeMsg, State) ->
- State.
-handle_clinet_key(_KeyExchangeMsg, State) ->
- State.
+handle_server_key(
+ #server_key_exchange{params =
+ #server_dh_params{dh_p = P,
+ dh_g = G,
+ dh_y = ServerPublicDhKey},
+ signed_params = Signed},
+ #state{session = Session, negotiated_version = Version, role = Role,
+ public_key_info = PubKeyInfo,
+ key_algorithm = KeyAlgo,
+ connection_states = ConnectionStates0} = State) ->
+
+ PLen = size(P),
+ GLen = size(G),
+ YLen = size(ServerPublicDhKey),
+
+ ConnectionState =
+ ssl_record:pending_connection_state(ConnectionStates0, read),
+ SecParams = ConnectionState#connection_state.security_parameters,
+ #security_parameters{client_random = ClientRandom,
+ server_random = ServerRandom} = SecParams,
+ Hash = ssl_handshake:server_key_exchange_hash(KeyAlgo,
+ <<ClientRandom/binary,
+ ServerRandom/binary,
+ ?UINT16(PLen), P/binary,
+ ?UINT16(GLen), G/binary,
+ ?UINT16(YLen),
+ ServerPublicDhKey/binary>>),
+
+ case verify_dh_params(Signed, Hash, PubKeyInfo) of
+ true ->
+ PMpint = mpint_binary(P),
+ GMpint = mpint_binary(G),
+ Keys = {_, ClientDhPrivateKey} =
+ crypto:dh_generate_key([PMpint,GMpint]),
+ PremasterSecret =
+ crypto:dh_compute_key(mpint_binary(ServerPublicDhKey),
+ ClientDhPrivateKey, [PMpint, GMpint]),
+ case ssl_handshake:master_secret(Version, PremasterSecret,
+ ConnectionStates0, Role) of
+ {MasterSecret, ConnectionStates} ->
+ State#state{diffie_hellman_keys = Keys,
+ session =
+ Session#session{master_secret
+ = MasterSecret},
+ connection_states = ConnectionStates};
+ #alert{} = Alert ->
+ Alert
+ end;
+ false ->
+ ?ALERT_REC(?FATAL,?HANDSHAKE_FAILURE)
+ end.
+
+verify_dh_params(Signed, Hash, {?rsaEncryption, PubKey, _PubKeyparams}) ->
+ case public_key:decrypt_public(Signed, PubKey,
+ [{rsa_pad, rsa_pkcs1_padding}]) of
+ Hash ->
+ true;
+ _ ->
+ false
+ end.
encode_alert(#alert{} = Alert, Version, ConnectionStates) ->
?DBG_TERM(Alert),
@@ -1442,8 +1607,8 @@ encode_size_packet(Bin, Size, Max) ->
false -> <<Len:Size, Bin/binary>>
end.
-encode_data(Data, Version, ConnectionStates) ->
- ssl_record:encode_data(Data, Version, ConnectionStates).
+encode_data(Data, Version, ConnectionStates, RenegotiateAt) ->
+ ssl_record:encode_data(Data, Version, ConnectionStates, RenegotiateAt).
decode_alerts(Bin) ->
decode_alerts(Bin, []).
@@ -1454,6 +1619,20 @@ decode_alerts(<<?BYTE(Level), ?BYTE(Description), Rest/binary>>, Acc) ->
decode_alerts(<<>>, Acc) ->
lists:reverse(Acc, []).
+passive_receive(State0 = #state{user_data_buffer = Buffer}, StateName) ->
+ case Buffer of
+ <<>> ->
+ State = next_record(State0),
+ {next_state, StateName, State};
+ _ ->
+ case application_data(<<>>, State0) of
+ Stop = {stop, _, _} ->
+ Stop;
+ State ->
+ {next_state, StateName, State}
+ end
+ end.
+
application_data(Data, #state{user_application = {_Mon, Pid},
socket_options = SOpts,
bytes_to_read = BytesToRead,
@@ -1504,19 +1683,49 @@ get_data(#socket_options{active=Active, packet=Raw}, BytesToRead, Buffer)
end;
get_data(#socket_options{packet=Type, packet_size=Size}, _, Buffer) ->
PacketOpts = [{packet_size, Size}],
- case erlang:decode_packet(Type, Buffer, PacketOpts) of
+ case decode_packet(Type, Buffer, PacketOpts) of
{more, _} ->
{ok, <<>>, Buffer};
Decoded ->
Decoded
end.
-deliver_app_data(SO = #socket_options{active=once}, Data, Pid, From) ->
- send_or_reply(once, Pid, From, format_reply(SO, Data)),
- SO#socket_options{active=false};
-deliver_app_data(SO= #socket_options{active=Active}, Data, Pid, From) ->
- send_or_reply(Active, Pid, From, format_reply(SO, Data)),
- SO.
+decode_packet({http, headers}, Buffer, PacketOpts) ->
+ decode_packet(httph, Buffer, PacketOpts);
+decode_packet({http_bin, headers}, Buffer, PacketOpts) ->
+ decode_packet(httph_bin, Buffer, PacketOpts);
+decode_packet(Type, Buffer, PacketOpts) ->
+ erlang:decode_packet(Type, Buffer, PacketOpts).
+
+%% Just like with gen_tcp sockets, an ssl socket that has been configured with
+%% {packet, http} (or {packet, http_bin}) will automatically switch to expect
+%% HTTP headers after it sees a HTTP Request or HTTP Response line. We
+%% represent the current state as follows:
+%% #socket_options.packet =:= http: Expect a HTTP Request/Response line
+%% #socket_options.packet =:= {http, headers}: Expect HTTP Headers
+%% Note that if the user has explicitly configured the socket to expect
+%% HTTP headers using the {packet, httph} option, we don't do any automatic
+%% switching of states.
+deliver_app_data(SOpts = #socket_options{active=Active, packet=Type},
+ Data, Pid, From) ->
+ send_or_reply(Active, Pid, From, format_reply(SOpts, Data)),
+ SO = case Data of
+ {P, _, _, _} when ((P =:= http_request) or (P =:= http_response)),
+ ((Type =:= http) or (Type =:= http_bin)) ->
+ SOpts#socket_options{packet={Type, headers}};
+ http_eoh when tuple_size(Type) =:= 2 ->
+ % End of headers - expect another Request/Response line
+ {Type1, headers} = Type,
+ SOpts#socket_options{packet=Type1};
+ _ ->
+ SOpts
+ end,
+ case Active of
+ once ->
+ SO#socket_options{active=false};
+ _ ->
+ SO
+ end.
format_reply(#socket_options{active=false, mode=Mode, header=Header}, Data) ->
{ok, format_reply(Mode, Header, Data)};
@@ -1557,34 +1766,6 @@ opposite_role(server) ->
send_user(Pid, Msg) ->
Pid ! Msg.
-%% %% This is the code for {packet,ssl} removed because it was slower
-%% %% than handling it in erlang.
-%% next_record(#state{socket = Socket,
-%% tls_buffer = [Msg|Rest],
-%% connection_states = ConnectionStates0} = State) ->
-%% Buffer =
-%% case Rest of
-%% [] ->
-%% inet:setopts(Socket, [{active,once}]),
-%% buffer;
-%% _ -> Rest
-%% end,
-%% case Msg of
-%% #ssl_tls{} ->
-%% {Plain, ConnectionStates} =
-%% ssl_record:decode_cipher_text(Msg, ConnectionStates0),
-%% gen_fsm:send_all_state_event(self(), Plain),
-%% State#state{tls_buffer=Buffer, connection_states = ConnectionStates};
-%% {ssl_close, Msg} ->
-%% self() ! Msg,
-%% State#state{tls_buffer=Buffer}
-%% end;
-%% next_record(#state{socket = Socket, tls_buffer = undefined} = State) ->
-%% inet:setopts(Socket, [{active,once}]),
-%% State#state{tls_buffer=continue};
-%% next_record(State) ->
-%% State#state{tls_buffer=continue}.
-
next_record(#state{tls_cipher_texts = [], socket = Socket} = State) ->
inet:setopts(Socket, [{active,once}]),
State;
@@ -1594,13 +1775,57 @@ next_record(#state{tls_cipher_texts = [CT | Rest],
gen_fsm:send_all_state_event(self(), Plain),
State#state{tls_cipher_texts = Rest, connection_states = ConnStates}.
+
next_record_if_active(State =
#state{socket_options =
#socket_options{active = false}}) ->
State;
+
next_record_if_active(State) ->
next_record(State).
+next_state_connection(#state{send_queue = Queue0,
+ negotiated_version = Version,
+ socket = Socket,
+ transport_cb = Transport,
+ connection_states = ConnectionStates0,
+ ssl_options = #ssl_options{renegotiate_at = RenegotiateAt}
+ } = State) ->
+ %% Send queued up data
+ case queue:out(Queue0) of
+ {{value, {From, Data}}, Queue} ->
+ case encode_data(Data, Version, ConnectionStates0, RenegotiateAt) of
+ {Msgs, [], ConnectionStates} ->
+ Result = Transport:send(Socket, Msgs),
+ gen_fsm:reply(From, Result),
+ next_state_connection(State#state{connection_states = ConnectionStates,
+ send_queue = Queue});
+ %% This is unlikely to happen. User configuration of the
+ %% undocumented test option renegotiation_at can make it more likely.
+ {Msgs, RestData, ConnectionStates} ->
+ if
+ Msgs =/= [] ->
+ Transport:send(Socket, Msgs);
+ true ->
+ ok
+ end,
+ renegotiate(State#state{connection_states = ConnectionStates,
+ send_queue = queue:in_r({From, RestData}, Queue),
+ renegotiation = {true, internal}})
+ end;
+ {empty, Queue0} ->
+ next_state_is_connection(State)
+ end.
+
+next_state_is_connection(State =
+ #state{recv_during_renegotiation = true, socket_options =
+ #socket_options{active = false}}) ->
+ passive_receive(State#state{recv_during_renegotiation = false}, connection);
+
+next_state_is_connection(State) ->
+ {next_state, connection, next_record_if_active(State)}.
+
+
register_session(_, _, _, #session{is_resumable = true} = Session) ->
Session; %% Already registered
register_session(client, Host, Port, Session0) ->
@@ -1650,7 +1875,10 @@ initial_state(Role, Host, Port, Socket, {SSLOptions, SocketOptions}, User,
bytes_to_read = 0,
user_data_buffer = <<>>,
log_alert = true,
- session_cache_cb = SessionCacheCb
+ session_cache_cb = SessionCacheCb,
+ renegotiation = {false, first},
+ recv_during_renegotiation = false,
+ send_queue = queue:new()
}.
sslsocket(Pid) ->
@@ -1665,8 +1893,12 @@ get_socket_opts(Socket, [mode | Tags], SockOpts, Acc) ->
get_socket_opts(Socket, Tags, SockOpts,
[{mode, SockOpts#socket_options.mode} | Acc]);
get_socket_opts(Socket, [packet | Tags], SockOpts, Acc) ->
- get_socket_opts(Socket, Tags, SockOpts,
- [{packet, SockOpts#socket_options.packet} | Acc]);
+ case SockOpts#socket_options.packet of
+ {Type, headers} ->
+ get_socket_opts(Socket, Tags, SockOpts, [{packet, Type} | Acc]);
+ Type ->
+ get_socket_opts(Socket, Tags, SockOpts, [{packet, Type} | Acc])
+ end;
get_socket_opts(Socket, [header | Tags], SockOpts, Acc) ->
get_socket_opts(Socket, Tags, SockOpts,
[{header, SockOpts#socket_options.header} | Acc]);
@@ -1688,7 +1920,8 @@ set_socket_opts(Socket, [], SockOpts, Other) ->
inet:setopts(Socket, Other),
SockOpts;
set_socket_opts(Socket, [{mode, Mode}| Opts], SockOpts, Other) ->
- set_socket_opts(Socket, Opts, SockOpts#socket_options{mode = Mode}, Other);
+ set_socket_opts(Socket, Opts,
+ SockOpts#socket_options{mode = Mode}, Other);
set_socket_opts(Socket, [{packet, Packet}| Opts], SockOpts, Other) ->
set_socket_opts(Socket, Opts,
SockOpts#socket_options{packet = Packet}, Other);
@@ -1743,7 +1976,55 @@ handle_own_alert(Alert, Version, StateName,
catch _:_ ->
ok
end.
-
-make_premaster_secret({MajVer, MinVer}) ->
+make_premaster_secret({MajVer, MinVer}, Alg) when Alg == rsa;
+ Alg == dh_dss;
+ Alg == dh_rsa ->
Rand = crypto:rand_bytes(?NUM_OF_PREMASTERSECRET_BYTES-2),
- <<?BYTE(MajVer), ?BYTE(MinVer), Rand/binary>>.
+ <<?BYTE(MajVer), ?BYTE(MinVer), Rand/binary>>;
+make_premaster_secret(_, _) ->
+ undefined.
+
+mpint_binary(Binary) ->
+ Size = byte_size(Binary),
+ <<?UINT32(Size), Binary/binary>>.
+
+
+ack_connection(#state{renegotiation = {true, Initiater}} = State)
+ when Initiater == internal;
+ Initiater == peer ->
+ State#state{renegotiation = undefined};
+ack_connection(#state{renegotiation = {true, From}} = State) ->
+ gen_fsm:reply(From, ok),
+ State#state{renegotiation = undefined};
+ack_connection(#state{renegotiation = {false, first}, from = From} = State) ->
+ gen_fsm:reply(From, connected),
+ State#state{renegotiation = undefined}.
+
+renegotiate(#state{role = client} = State) ->
+ %% Handle same way as if server requested
+ %% the renegotiation
+ Hs0 = ssl_handshake:init_hashes(),
+ connection(#hello_request{}, State#state{tls_handshake_hashes = Hs0});
+renegotiate(#state{role = server,
+ socket = Socket,
+ transport_cb = Transport,
+ negotiated_version = Version,
+ connection_states = ConnectionStates0} = State) ->
+ HelloRequest = ssl_handshake:hello_request(),
+ Frag = ssl_handshake:encode_handshake(HelloRequest, Version, undefined),
+ Hs0 = ssl_handshake:init_hashes(),
+ {BinMsg, ConnectionStates} =
+ ssl_record:encode_handshake(Frag, Version, ConnectionStates0),
+ Transport:send(Socket, BinMsg),
+ {next_state, hello, next_record(State#state{connection_states =
+ ConnectionStates,
+ tls_handshake_hashes = Hs0})}.
+notify_senders(SendQueue) ->
+ lists:foreach(fun({From, _}) ->
+ gen_fsm:reply(From, {error, closed})
+ end, queue:to_list(SendQueue)).
+
+notify_renegotiater({true, From}) when not is_atom(From) ->
+ gen_fsm:reply(From, {error, closed});
+notify_renegotiater(_) ->
+ ok.
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 8c598135ca..9f5ac7106a 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -31,11 +31,11 @@
-include("ssl_debug.hrl").
-include_lib("public_key/include/public_key.hrl").
--export([master_secret/4, client_hello/4, server_hello/3, hello/2,
- certify/5, certificate/3,
+-export([master_secret/4, client_hello/4, server_hello/3, hello/2,
+ hello_request/0, certify/7, certificate/3,
client_certificate_verify/6,
certificate_verify/6, certificate_request/2,
- key_exchange/2, finished/4,
+ key_exchange/2, server_key_exchange_hash/2, finished/4,
verify_connection/5,
get_tls_handshake/4,
server_hello_done/0, sig_alg/1,
@@ -97,6 +97,15 @@ server_hello(SessionId, Version, ConnectionStates) ->
}.
%%--------------------------------------------------------------------
+%% Function: hello_request() -> #hello_request{}
+%%
+%% Description: Creates a hello request message sent by server to
+%% trigger renegotiation.
+%%--------------------------------------------------------------------
+hello_request() ->
+ #hello_request{}.
+
+%%--------------------------------------------------------------------
%% Function: hello(Hello, Info) ->
%% {Version, Id, NewConnectionStates} |
%% #alert{}
@@ -152,10 +161,25 @@ hello(#client_hello{client_version = ClientVersion, random = Random} = Hello,
%% Description: Handles a certificate handshake message
%%--------------------------------------------------------------------
certify(#certificate{asn1_certificates = ASN1Certs}, CertDbRef,
- MaxPathLen, Verify, VerifyFun) ->
+ MaxPathLen, Verify, VerifyFun, ValidateFun, Role) ->
[PeerCert | _] = ASN1Certs,
VerifyBool = verify_bool(Verify),
-
+
+ ValidateExtensionFun =
+ case ValidateFun of
+ undefined ->
+ fun(Extensions, ValidationState, Verify0, AccError) ->
+ ssl_certificate:validate_extensions(Extensions, ValidationState,
+ [], Verify0, AccError, Role)
+ end;
+ Fun ->
+ fun(Extensions, ValidationState, Verify0, AccError) ->
+ {NewExtensions, NewValidationState, NewAccError}
+ = ssl_certificate:validate_extensions(Extensions, ValidationState,
+ [], Verify0, AccError, Role),
+ Fun(NewExtensions, NewValidationState, Verify0, NewAccError)
+ end
+ end,
try
%% Allow missing root_cert and check that with VerifyFun
ssl_certificate:trusted_cert_and_path(ASN1Certs, CertDbRef, false) of
@@ -165,6 +189,8 @@ certify(#certificate{asn1_certificates = ASN1Certs}, CertDbRef,
[{max_path_length,
MaxPathLen},
{verify, VerifyBool},
+ {validate_extensions_fun,
+ ValidateExtensionFun},
{acc_errors,
VerifyErrors}]),
case Result of
@@ -248,8 +274,10 @@ client_certificate_verify(OwnCert, MasterSecret, Version, Algorithm,
%% Description: Checks that the certificate_verify message is valid.
%%--------------------------------------------------------------------
certificate_verify(Signature, {_, PublicKey, _}, Version,
- MasterSecret, Algorithm, {_, Hashes0})
- when Algorithm =:= rsa; Algorithm =:= dh_rsa; Algorithm =:= dhe_rsa ->
+ MasterSecret, Algorithm, {_, Hashes0})
+ when Algorithm == rsa;
+ Algorithm == dh_rsa;
+ Algorithm == dhe_rsa ->
Hashes = calc_certificate_verify(Version, MasterSecret,
Algorithm, Hashes0),
case public_key:decrypt_public(Signature, PublicKey,
@@ -296,30 +324,32 @@ key_exchange(client, fixed_diffie_hellman) ->
#client_diffie_hellman_public{
dh_public = <<>>
}};
-key_exchange(client, {dh, PublicKey}) ->
- Len = byte_size(PublicKey),
+key_exchange(client, {dh, <<?UINT32(Len), PublicKey:Len/binary>>}) ->
#client_key_exchange{
- exchange_keys = #client_diffie_hellman_public{
- dh_public = <<?UINT16(Len), PublicKey/binary>>}
+ exchange_keys = #client_diffie_hellman_public{
+ dh_public = PublicKey}
};
-%% key_exchange(server, {{?'dhpublicnumber', _PublicKey,
-%% #'DomainParameters'{p = P, g = G, y = Y},
-%% SignAlgorithm, ClientRandom, ServerRandom}}) ->
-%% ServerDHParams = #server_dh_params{dh_p = P, dh_g = G, dh_y = Y},
-%% PLen = byte_size(P),
-%% GLen = byte_size(G),
-%% YLen = byte_size(Y),
-%% Hash = server_key_exchange_hash(SignAlgorithm, <<ClientRandom/binary,
-%% ServerRandom/binary,
-%% ?UINT16(PLen), P/binary,
-%% ?UINT16(GLen), G/binary,
-%% ?UINT16(YLen), Y/binary>>),
-%% Signed = digitally_signed(Hash, PrivateKey),
-%% #server_key_exchange{
-%% params = ServerDHParams,
-%% signed_params = Signed
-%% };
+key_exchange(server, {dh, {<<?UINT32(_), PublicKey/binary>>, _},
+ #'DHParameter'{prime = P, base = G},
+ KeyAlgo, ClientRandom, ServerRandom, PrivateKey}) ->
+ <<?UINT32(_), PBin/binary>> = crypto:mpint(P),
+ <<?UINT32(_), GBin/binary>> = crypto:mpint(G),
+ PLen = byte_size(PBin),
+ GLen = byte_size(GBin),
+ YLen = byte_size(PublicKey),
+ ServerDHParams = #server_dh_params{dh_p = PBin,
+ dh_g = GBin, dh_y = PublicKey},
+
+ Hash =
+ server_key_exchange_hash(KeyAlgo, <<ClientRandom/binary,
+ ServerRandom/binary,
+ ?UINT16(PLen), PBin/binary,
+ ?UINT16(GLen), GBin/binary,
+ ?UINT16(YLen), PublicKey/binary>>),
+ Signed = digitally_signed(Hash, PrivateKey),
+ #server_key_exchange{params = ServerDHParams,
+ signed_params = Signed};
key_exchange(_, _) ->
%%TODO : Real imp
#server_key_exchange{}.
@@ -417,7 +447,8 @@ server_hello_done() ->
%%
%% encode a handshake packet to binary
%%--------------------------------------------------------------------
-encode_handshake(Package, Version, SigAlg) ->
+encode_handshake(Package, Version, KeyAlg) ->
+ SigAlg = sig_alg(KeyAlg),
{MsgType, Bin} = enc_hs(Package, Version, SigAlg),
Len = byte_size(Bin),
[MsgType, ?uint24(Len), Bin].
@@ -437,32 +468,16 @@ get_tls_handshake(Data, Buffer, KeyAlg, Version) ->
get_tls_handshake_aux(list_to_binary([Buffer, Data]),
KeyAlg, Version, []).
-get_tls_handshake_aux(<<?BYTE(Type), ?UINT24(Length), Body:Length/binary,Rest/binary>>,
- KeyAlg, Version, Acc) ->
+get_tls_handshake_aux(<<?BYTE(Type), ?UINT24(Length),
+ Body:Length/binary,Rest/binary>>, KeyAlg,
+ Version, Acc) ->
Raw = <<?BYTE(Type), ?UINT24(Length), Body/binary>>,
- H = dec_hs(Type, Body, KeyAlg, Version),
+ H = dec_hs(Type, Body, key_exchange_alg(KeyAlg), Version),
get_tls_handshake_aux(Rest, KeyAlg, Version, [{H,Raw} | Acc]);
get_tls_handshake_aux(Data, _KeyAlg, _Version, Acc) ->
{lists:reverse(Acc), Data}.
%%--------------------------------------------------------------------
-%% Function: sig_alg(atom()) -> integer()
-%%
-%% Description: Convert from key exchange as atom to signature
-%% algorithm as a ?SIGNATURE_... constant
-%%--------------------------------------------------------------------
-
-sig_alg(dh_anon) ->
- ?SIGNATURE_ANONYMOUS;
-sig_alg(Alg) when Alg == dhe_rsa; Alg == rsa; Alg == dh_rsa ->
- ?SIGNATURE_RSA;
-sig_alg(Alg) when Alg == dh_dss; Alg == dhe_dss ->
- ?SIGNATURE_DSA;
-sig_alg(_) ->
- ?NULL.
-
-
-%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
verify_bool(verify_peer) ->
@@ -649,8 +664,8 @@ dec_hs(?SERVER_HELLO, <<?BYTE(Major), ?BYTE(Minor), Random:32/binary,
dec_hs(?CERTIFICATE, <<?UINT24(ACLen), ASN1Certs:ACLen/binary>>, _, _) ->
#certificate{asn1_certificates = certs_to_list(ASN1Certs)};
dec_hs(?SERVER_KEY_EXCHANGE, <<?UINT16(ModLen), Mod:ModLen/binary,
- ?UINT16(ExpLen), Exp:ExpLen/binary,
- Sig/binary>>,
+ ?UINT16(ExpLen), Exp:ExpLen/binary,
+ ?UINT16(_), Sig/binary>>,
?KEY_EXCHANGE_RSA, _) ->
#server_key_exchange{params = #server_rsa_params{rsa_modulus = Mod,
rsa_exponent = Exp},
@@ -658,9 +673,10 @@ dec_hs(?SERVER_KEY_EXCHANGE, <<?UINT16(ModLen), Mod:ModLen/binary,
dec_hs(?SERVER_KEY_EXCHANGE, <<?UINT16(PLen), P:PLen/binary,
?UINT16(GLen), G:GLen/binary,
?UINT16(YLen), Y:YLen/binary,
- Sig/binary>>,
+ ?UINT16(_), Sig/binary>>,
?KEY_EXCHANGE_DIFFIE_HELLMAN, _) ->
- #server_key_exchange{params = #server_dh_params{dh_p = P,dh_g = G, dh_y = Y},
+ #server_key_exchange{params = #server_dh_params{dh_p = P,dh_g = G,
+ dh_y = Y},
signed_params = Sig};
dec_hs(?CERTIFICATE_REQUEST,
<<?BYTE(CertTypesLen), CertTypes:CertTypesLen/binary,
@@ -672,18 +688,20 @@ dec_hs(?SERVER_HELLO_DONE, <<>>, _, _) ->
#server_hello_done{};
dec_hs(?CERTIFICATE_VERIFY,<<?UINT16(_), Signature/binary>>, _, _)->
#certificate_verify{signature = Signature};
-dec_hs(?CLIENT_KEY_EXCHANGE, PKEPMS, rsa, {3, 0}) ->
+dec_hs(?CLIENT_KEY_EXCHANGE, PKEPMS, ?KEY_EXCHANGE_RSA, {3, 0}) ->
PreSecret = #encrypted_premaster_secret{premaster_secret = PKEPMS},
#client_key_exchange{exchange_keys = PreSecret};
-dec_hs(?CLIENT_KEY_EXCHANGE, <<?UINT16(_), PKEPMS/binary>>, rsa, _) ->
+dec_hs(?CLIENT_KEY_EXCHANGE, <<?UINT16(_), PKEPMS/binary>>,
+ ?KEY_EXCHANGE_RSA, _) ->
PreSecret = #encrypted_premaster_secret{premaster_secret = PKEPMS},
#client_key_exchange{exchange_keys = PreSecret};
dec_hs(?CLIENT_KEY_EXCHANGE, <<>>, ?KEY_EXCHANGE_DIFFIE_HELLMAN, _) ->
%% TODO: Should check whether the cert already contains a suitable DH-key (7.4.7.2)
throw(?ALERT_REC(?FATAL, implicit_public_value_encoding));
-dec_hs(?CLIENT_KEY_EXCHANGE, <<?UINT16(DH_YCLen), DH_YC:DH_YCLen/binary>>,
+dec_hs(?CLIENT_KEY_EXCHANGE, <<?UINT16(DH_YLen), DH_Y:DH_YLen/binary>>,
?KEY_EXCHANGE_DIFFIE_HELLMAN, _) ->
- #client_diffie_hellman_public{dh_public = DH_YC};
+ #client_key_exchange{exchange_keys =
+ #client_diffie_hellman_public{dh_public = DH_Y}};
dec_hs(?FINISHED, VerifyData, _, _) ->
#finished{verify_data = VerifyData};
dec_hs(_, _, _, _) ->
@@ -756,12 +774,13 @@ enc_hs(#certificate{asn1_certificates = ASN1CertList}, _Version, _) ->
{?CERTIFICATE, <<?UINT24(ACLen), ASN1Certs:ACLen/binary>>};
enc_hs(#server_key_exchange{params = #server_rsa_params{rsa_modulus = Mod,
rsa_exponent = Exp},
- signed_params = SignedParams}, _Version, _) ->
+ signed_params = SignedParams}, _Version, _) ->
ModLen = byte_size(Mod),
ExpLen = byte_size(Exp),
- {?SERVER_KEY_EXCHANGE, <<?UINT16(ModLen), Mod/binary,
+ SignedLen = byte_size(SignedParams),
+ {?SERVER_KEY_EXCHANGE, <<?UINT16(ModLen),Mod/binary,
?UINT16(ExpLen), Exp/binary,
- SignedParams/binary>>
+ ?UINT16(SignedLen), SignedParams/binary>>
};
enc_hs(#server_key_exchange{params = #server_dh_params{
dh_p = P, dh_g = G, dh_y = Y},
@@ -769,10 +788,11 @@ enc_hs(#server_key_exchange{params = #server_dh_params{
PLen = byte_size(P),
GLen = byte_size(G),
YLen = byte_size(Y),
- {?SERVER_KEY_EXCHANGE, <<?UINT16(PLen), P:PLen/binary,
- ?UINT16(GLen), G:GLen/binary,
- ?UINT16(YLen), Y:YLen/binary,
- SignedParams/binary>>
+ SignedLen = byte_size(SignedParams),
+ {?SERVER_KEY_EXCHANGE, <<?UINT16(PLen), P/binary,
+ ?UINT16(GLen), G/binary,
+ ?UINT16(YLen), Y/binary,
+ ?UINT16(SignedLen), SignedParams/binary>>
};
enc_hs(#certificate_request{certificate_types = CertTypes,
certificate_authorities = CertAuths},
@@ -927,13 +947,40 @@ calc_certificate_verify({3, N}, _, Algorithm, Hashes)
when N == 1; N == 2 ->
ssl_tls1:certificate_verify(Algorithm, Hashes).
-%% server_key_exchange_hash(Algorithm, Value) when Algorithm == rsa;
-%% Algorithm == dh_rsa;
-%% Algorithm == dhe_rsa ->
-%% MD5 = crypto:md5_final(Value),
-%% SHA = crypto:sha_final(Value),
-%% <<MD5/binary, SHA/binary>>;
+server_key_exchange_hash(Algorithm, Value) when Algorithm == rsa;
+ Algorithm == dh_rsa;
+ Algorithm == dhe_rsa ->
+ MD5Context = crypto:md5_init(),
+ NewMD5Context = crypto:md5_update(MD5Context, Value),
+ MD5 = crypto:md5_final(NewMD5Context),
+
+ SHAContext = crypto:sha_init(),
+ NewSHAContext = crypto:sha_update(SHAContext, Value),
+ SHA = crypto:sha_final(NewSHAContext),
+
+ <<MD5/binary, SHA/binary>>;
+
+server_key_exchange_hash(Algorithm, Value) when Algorithm == dh_dss;
+ Algorithm == dhe_dss ->
+
+ SHAContext = crypto:sha_init(),
+ NewSHAContext = crypto:sha_update(SHAContext, Value),
+ crypto:sha_final(NewSHAContext).
-%% server_key_exchange_hash(Algorithm, Value) when Algorithm == dh_dss;
-%% Algorithm == dhe_dss ->
-%% crypto:sha_final(Value).
+
+sig_alg(dh_anon) ->
+ ?SIGNATURE_ANONYMOUS;
+sig_alg(Alg) when Alg == dhe_rsa; Alg == rsa; Alg == dh_rsa ->
+ ?SIGNATURE_RSA;
+sig_alg(Alg) when Alg == dh_dss; Alg == dhe_dss ->
+ ?SIGNATURE_DSA;
+sig_alg(_) ->
+ ?NULL.
+
+key_exchange_alg(rsa) ->
+ ?KEY_EXCHANGE_RSA;
+key_exchange_alg(Alg) when Alg == dhe_rsa; Alg == dhe_dss;
+ Alg == dh_dss; Alg == dh_rsa; Alg == dh_anon ->
+ ?KEY_EXCHANGE_DIFFIE_HELLMAN;
+key_exchange_alg(_) ->
+ ?NULL.
diff --git a/lib/ssl/src/ssl_handshake.hrl b/lib/ssl/src/ssl_handshake.hrl
index b2bdfa0934..889d39f2af 100644
--- a/lib/ssl/src/ssl_handshake.hrl
+++ b/lib/ssl/src/ssl_handshake.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -38,7 +38,8 @@
-define(NUM_OF_SESSION_ID_BYTES, 32). % TSL 1.1 & SSL 3
-define(NUM_OF_PREMASTERSECRET_BYTES, 48).
-
+-define(DEFAULT_DIFFIE_HELLMAN_GENERATOR, 2).
+-define(DEFAULT_DIFFIE_HELLMAN_PRIME, 16#FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Handsake protocol - RFC 4346 section 7.4
diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl
index 23a5c93452..8d19abfe1e 100644
--- a/lib/ssl/src/ssl_internal.hrl
+++ b/lib/ssl/src/ssl_internal.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -57,12 +57,15 @@
verify_fun, % fun(CertVerifyErrors) -> boolean()
fail_if_no_peer_cert, % boolean()
verify_client_once, % boolean()
+ %% fun(Extensions, State, Verify, AccError) -> {Extensions, State, AccError}
+ validate_extensions_fun,
depth, % integer()
certfile, % file()
keyfile, % file()
key, %
password, %
cacertfile, % file()
+ dhfile, % file()
ciphers, %
%% Local policy for the server if it want's to reuse the session
%% or not. Defaluts to allways returning true.
@@ -71,6 +74,7 @@
%% If false sessions will never be reused, if true they
%% will be reused if possible.
reuse_sessions, % boolean()
+ renegotiate_at,
debug %
}).
diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl
index 37a3d1b639..da48f049f6 100644
--- a/lib/ssl/src/ssl_record.erl
+++ b/lib/ssl/src/ssl_record.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -45,7 +45,7 @@
%% Encoding records
-export([encode_handshake/3, encode_alert_record/3,
- encode_change_cipher_spec/2, encode_data/3]).
+ encode_change_cipher_spec/2, encode_data/4]).
%% Decoding
-export([decode_cipher_text/2]).
@@ -474,19 +474,31 @@ split_bin(Bin, ChunkSize, Acc) ->
lists:reverse(Acc, [Bin])
end.
-encode_data(Frag, Version, ConnectionStates)
+encode_data(Frag, Version, ConnectionStates, RenegotiateAt)
when byte_size(Frag) < (?MAX_PLAIN_TEXT_LENGTH - 2048) ->
- encode_plain_text(?APPLICATION_DATA,Version,Frag,ConnectionStates);
-encode_data(Frag, Version, ConnectionStates) ->
+ case encode_plain_text(?APPLICATION_DATA,Version,Frag,ConnectionStates, RenegotiateAt) of
+ {renegotiate, Data} ->
+ {[], Data, ConnectionStates};
+ {Msg, CS} ->
+ {Msg, [], CS}
+ end;
+
+encode_data(Frag, Version, ConnectionStates, RenegotiateAt) when is_binary(Frag) ->
Data = split_bin(Frag, ?MAX_PLAIN_TEXT_LENGTH - 2048),
- {CS1, Acc} =
- lists:foldl(fun(B, {CS0, Acc}) ->
- {ET, CS1} =
- encode_plain_text(?APPLICATION_DATA,
- Version, B, CS0),
- {CS1, [ET | Acc]}
- end, {ConnectionStates, []}, Data),
- {lists:reverse(Acc), CS1}.
+ encode_data(Data, Version, ConnectionStates, RenegotiateAt);
+
+encode_data(Data, Version, ConnectionStates0, RenegotiateAt) when is_list(Data) ->
+ {ConnectionStates, EncodedMsg, NotEncdedData} =
+ lists:foldl(fun(B, {CS0, Encoded, Rest}) ->
+ case encode_plain_text(?APPLICATION_DATA,
+ Version, B, CS0, RenegotiateAt) of
+ {renegotiate, NotEnc} ->
+ {CS0, Encoded, [NotEnc | Rest]};
+ {Enc, CS1} ->
+ {CS1, [Enc | Encoded], Rest}
+ end
+ end, {ConnectionStates0, [], []}, Data),
+ {lists:reverse(EncodedMsg), lists:reverse(NotEncdedData), ConnectionStates}.
encode_handshake(Frag, Version, ConnectionStates) ->
encode_plain_text(?HANDSHAKE, Version, Frag, ConnectionStates).
@@ -499,6 +511,16 @@ encode_alert_record(#alert{level = Level, description = Description},
encode_change_cipher_spec(Version, ConnectionStates) ->
encode_plain_text(?CHANGE_CIPHER_SPEC, Version, <<1:8>>, ConnectionStates).
+encode_plain_text(Type, Version, Data, ConnectionStates, RenegotiateAt) ->
+ #connection_states{current_write =
+ #connection_state{sequence_number = Num}} = ConnectionStates,
+ case renegotiate(Num, RenegotiateAt) of
+ false ->
+ encode_plain_text(Type, Version, Data, ConnectionStates);
+ true ->
+ {renegotiate, Data}
+ end.
+
encode_plain_text(Type, Version, Data, ConnectionStates) ->
#connection_states{current_write=#connection_state{
compression_state=CompS0,
@@ -511,6 +533,11 @@ encode_plain_text(Type, Version, Data, ConnectionStates) ->
CTBin = encode_tls_cipher_text(Type, Version, CipherText),
{CTBin, ConnectionStates#connection_states{current_write = CS2}}.
+renegotiate(N, M) when N < M->
+ false;
+renegotiate(_,_) ->
+ true.
+
encode_tls_cipher_text(Type, {MajVer, MinVer}, Fragment) ->
Length = erlang:iolist_size(Fragment),
[<<?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer), ?UINT16(Length)>>, Fragment].
@@ -529,9 +556,6 @@ cipher(Type, Version, Fragment, CS0) ->
CS2 = CS1#connection_state{cipher_state=CipherS1},
{Ciphered, CS2}.
-decipher(TLS=#ssl_tls{type = ?CHANGE_CIPHER_SPEC}, CS) ->
- %% These are never encrypted
- {TLS, CS};
decipher(TLS=#ssl_tls{type=Type, version=Version, fragment=Fragment}, CS0) ->
SP = CS0#connection_state.security_parameters,
BCA = SP#security_parameters.bulk_cipher_algorithm, % eller Cipher?
diff --git a/lib/ssl/src/ssl_record.hrl b/lib/ssl/src/ssl_record.hrl
index 7370e0f0b3..362b7039d4 100644
--- a/lib/ssl/src/ssl_record.hrl
+++ b/lib/ssl/src/ssl_record.hrl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -63,6 +63,13 @@
sequence_number
}).
+-define(MAX_SEQENCE_NUMBER, 18446744073709552000). %% math:pow(2, 64) - 1 = 1.8446744073709552e19
+%% Sequence numbers can not wrap so when max is about to be reached we should renegotiate.
+%% We will renegotiate a little before so that there will be sequence numbers left
+%% for the rehandshake and a little data.
+-define(MARGIN, 100).
+-define(DEFAULT_RENEGOTIATE_AT, ?MAX_SEQENCE_NUMBER - ?MARGIN).
+
%% ConnectionEnd
-define(SERVER, 0).
-define(CLIENT, 1).
diff --git a/lib/ssl/src/ssl_ssl3.erl b/lib/ssl/src/ssl_ssl3.erl
index ab29ac64df..df809ce275 100644
--- a/lib/ssl/src/ssl_ssl3.erl
+++ b/lib/ssl/src/ssl_ssl3.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -182,13 +182,13 @@ setup_keys(export, MasterSecret, ServerRandom, ClientRandom,
suites() ->
[
%% TODO: uncomment when supported
- %% ?TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
%% ?TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- %% TODO: Funkar inte, borde: ?TLS_RSA_WITH_AES_256_CBC_SHA,
- %% ?TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ ?TLS_RSA_WITH_AES_256_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
%% ?TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
?TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- %% ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
%% ?TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
?TLS_RSA_WITH_AES_128_CBC_SHA,
%%?TLS_DHE_DSS_WITH_RC4_128_SHA, TODO: Support this?
diff --git a/lib/ssl/src/ssl_tls1.erl b/lib/ssl/src/ssl_tls1.erl
index e0013c48ac..ce9a135168 100644
--- a/lib/ssl/src/ssl_tls1.erl
+++ b/lib/ssl/src/ssl_tls1.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -136,13 +136,13 @@ mac_hash(Method, Mac_write_secret, Seq_num, Type, {Major, Minor},
suites() ->
[
%% TODO: uncomment when supported
- %% ?TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- %% ?TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- %% TODO: Funkar inte, borde: ?TLS_RSA_WITH_AES_256_CBC_SHA,
- %% ?TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ %%?TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ ?TLS_RSA_WITH_AES_256_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
%% ?TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
?TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- %% ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
%% ?TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
?TLS_RSA_WITH_AES_128_CBC_SHA,
%%?TLS_DHE_DSS_WITH_RC4_128_SHA, TODO: Support this?
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 3d9cec43dd..7f33efd7e1 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -26,10 +26,13 @@
-include("test_server.hrl").
-include("test_server_line.hrl").
+-include_lib("public_key/include/public_key.hrl").
-define('24H_in_sec', 86400).
-define(TIMEOUT, 60000).
-define(EXPIRE, 10).
+-define(SLEEP, 500).
+
-behaviour(ssl_session_cache_api).
@@ -150,19 +153,26 @@ all(doc) ->
all(suite) ->
[app, connection_info, controlling_process, controller_dies,
peercert, connect_dist,
- peername, sockname, socket_options, versions, cipher_suites,
+ peername, sockname, socket_options, misc_ssl_options, versions, cipher_suites,
upgrade, upgrade_with_timeout, tcp_connect,
ipv6, ekeyfile, ecertfile, ecacertfile, eoptions, shutdown,
shutdown_write, shutdown_both, shutdown_error, ciphers,
- send_close,
+ send_close, close_transport_accept, dh_params,
server_verify_peer_passive,
server_verify_peer_active, server_verify_peer_active_once,
server_verify_none_passive, server_verify_none_active,
- server_verify_none_active_once,
- server_verify_no_cacerts, client_verify_none_passive,
+ server_verify_none_active_once, server_verify_no_cacerts,
+ server_require_peer_cert_ok, server_require_peer_cert_fail,
+ server_verify_client_once_passive,
+ server_verify_client_once_active,
+ server_verify_client_once_active_once,
+ client_verify_none_passive,
client_verify_none_active, client_verify_none_active_once
%%, session_cache_process_list, session_cache_process_mnesia
- ,reuse_session, reuse_session_expired, server_does_not_want_to_reuse_session
+ ,reuse_session, reuse_session_expired, server_does_not_want_to_reuse_session,
+ client_renegotiate, server_renegotiate,
+ client_no_wrap_sequence_number, server_no_wrap_sequence_number,
+ extended_key_usage, validate_extensions_fun
].
%% Test cases starts here.
@@ -236,7 +246,7 @@ controlling_process(Config) when is_list(Config) ->
Port = ssl_test_lib:inet_port(Server),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
- {from, self()},
+ {from, self()},
{mfa, {?MODULE,
controlling_process_result, [self(),
ClientMsg]}},
@@ -267,7 +277,7 @@ controlling_process_result(Socket, Pid, Msg) ->
ok = ssl:controlling_process(Socket, Pid),
%% Make sure other side has evaluated controlling_process
%% before message is sent
- test_server:sleep(100),
+ test_server:sleep(?SLEEP),
ssl:send(Socket, Msg),
no_result_msg.
@@ -298,7 +308,7 @@ controller_dies(Config) when is_list(Config) ->
{options, ClientOpts}]),
test_server:format("Testcase ~p, Client ~p Server ~p ~n", [self(), Client, Server]),
- timer:sleep(200), %% so that they are connected
+ test_server:sleep(?SLEEP), %% so that they are connected
process_flag(trap_exit, true),
@@ -307,7 +317,7 @@ controller_dies(Config) when is_list(Config) ->
get_close(Client, ?LINE),
%% Test that clients die when process disappear
- Server ! listen, timer:sleep(200),
+ Server ! listen,
Tester = self(),
Connect = fun(Pid) ->
{ok, Socket} = ssl:connect(Hostname, Port,
@@ -321,7 +331,7 @@ controller_dies(Config) when is_list(Config) ->
get_close(Client2, ?LINE),
%% Test that clients die when the controlling process have changed
- Server ! listen, timer:sleep(200),
+ Server ! listen,
Client3 = spawn_link(fun() -> Connect(Tester) end),
Controller = spawn_link(fun() -> receive die_nice -> normal end end),
@@ -345,7 +355,7 @@ controller_dies(Config) when is_list(Config) ->
get_close(Controller, ?LINE),
%% Test that servers die
- Server ! listen, timer:sleep(200),
+ Server ! listen,
LastClient = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
{from, self()},
@@ -353,7 +363,7 @@ controller_dies(Config) when is_list(Config) ->
controller_dies_result, [self(),
ClientMsg]}},
{options, [{reuseaddr,true}|ClientOpts]}]),
- timer:sleep(200), %% so that they are connected
+ test_server:sleep(?SLEEP), %% so that they are connected
exit(Server, killed),
get_close(Server, ?LINE),
@@ -484,9 +494,9 @@ peername(Config) when is_list(Config) ->
Port = ssl_test_lib:inet_port(Server),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
- {from, self()},
- {mfa, {?MODULE, peername_result, []}},
- {options, [{port, 0} | ClientOpts]}]),
+ {from, self()},
+ {mfa, {?MODULE, peername_result, []}},
+ {options, [{port, 0} | ClientOpts]}]),
ClientPort = ssl_test_lib:inet_port(Client),
ServerIp = ssl_test_lib:node_to_hostip(ServerNode),
@@ -526,6 +536,7 @@ sockname(Config) when is_list(Config) ->
{from, self()},
{mfa, {?MODULE, sockname_result, []}},
{options, [{port, 0} | ClientOpts]}]),
+
ClientPort = ssl_test_lib:inet_port(Client),
ServerIp = ssl_test_lib:node_to_hostip(ServerNode),
ClientIp = ssl_test_lib:node_to_hostip(ClientNode),
@@ -602,6 +613,46 @@ socket_options_result(Socket, Options, DefaultValues, NewOptions, NewValues) ->
ok.
%%--------------------------------------------------------------------
+misc_ssl_options(doc) ->
+ ["Test what happens when we give valid options"];
+
+misc_ssl_options(suite) ->
+ [];
+
+misc_ssl_options(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ %% Chek that ssl options not tested elsewhere are filtered away e.i. not passed to inet.
+ TestOpts = [{depth, 1},
+ {key, undefined},
+ {password, []},
+ {reuse_session, fun(_,_,_,_) -> true end},
+ {debug, []},
+ {cb_info, {gen_tcp, tcp, tcp_closed}}],
+
+ Server =
+ ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, TestOpts ++ ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client =
+ ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, TestOpts ++ ClientOpts}]),
+
+ test_server:format("Testcase ~p, Client ~p Server ~p ~n",
+ [self(), Client, Server]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
versions(doc) ->
["Test API function versions/0"];
@@ -667,13 +718,71 @@ send_close(Config) when is_list(Config) ->
test_server:format("Testcase ~p, Client ~p Server ~p ~n",
[self(), self(), Server]),
- ok = ssl:send(SslS, "HejHopp"),
- {ok,<<"Hejhopp">>} = ssl:recv(SslS, 7),
+ ok = ssl:send(SslS, "Hello world"),
+ {ok,<<"Hello world">>} = ssl:recv(SslS, 11),
gen_tcp:close(TcpS),
- {error, _} = ssl:send(SslS, "HejHopp"),
+ {error, _} = ssl:send(SslS, "Hello world"),
ssl_test_lib:close(Server).
%%--------------------------------------------------------------------
+close_transport_accept(doc) ->
+ ["Tests closing ssl socket when waiting on ssl:transport_accept/1"];
+
+close_transport_accept(suite) ->
+ [];
+
+close_transport_accept(Config) when is_list(Config) ->
+ ServerOpts = ?config(server_opts, Config),
+ {_ClientNode, ServerNode, _Hostname} = ssl_test_lib:run_where(Config),
+
+ Port = 0,
+ Opts = [{active, false} | ServerOpts],
+ {ok, ListenSocket} = rpc:call(ServerNode, ssl, listen, [Port, Opts]),
+ spawn_link(fun() ->
+ test_server:sleep(?SLEEP),
+ rpc:call(ServerNode, ssl, close, [ListenSocket])
+ end),
+ case rpc:call(ServerNode, ssl, transport_accept, [ListenSocket]) of
+ {error, closed} ->
+ ok;
+ Other ->
+ exit({?LINE, Other})
+ end.
+
+%%--------------------------------------------------------------------
+dh_params(doc) ->
+ ["Test to specify DH-params file in server."];
+
+dh_params(suite) ->
+ [];
+
+dh_params(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ DataDir = ?config(data_dir, Config),
+ DHParamFile = filename:join(DataDir, "dHParam.pem"),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, [{dhfile, DHParamFile} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options,
+ [{ciphers,[{dhe_rsa,aes_256_cbc,sha,ignore}]} |
+ ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
upgrade(doc) ->
["Test that you can upgrade an tcp connection to an ssl connection"];
@@ -710,11 +819,11 @@ upgrade(Config) when is_list(Config) ->
ssl_test_lib:close(Client).
upgrade_result(Socket) ->
- ok = ssl:send(Socket, "Hejhopp"),
+ ok = ssl:send(Socket, "Hello world"),
%% Make sure binary is inherited from tcp socket and that we do
%% not get the list default!
receive
- {ssl, _, <<"Hejhopp">>} ->
+ {ssl, _, <<"Hello world">>} ->
ok
end.
@@ -763,15 +872,14 @@ tcp_connect(suite) ->
[];
tcp_connect(Config) when is_list(Config) ->
- ClientOpts = ?config(client_opts, Config),
ServerOpts = ?config(server_opts, Config),
- {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ {_, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
TcpOpts = [binary, {reuseaddr, true}],
Server = ssl_test_lib:start_upgrade_server([{node, ServerNode}, {port, 0},
{from, self()},
{timeout, 5000},
- {mfa, {?MODULE, should_close, []}},
+ {mfa, {?MODULE, dummy, []}},
{tcp_options, TcpOpts},
{ssl_options, ServerOpts}]),
Port = ssl_test_lib:inet_port(Server),
@@ -780,18 +888,20 @@ tcp_connect(Config) when is_list(Config) ->
test_server:format("Testcase ~p connected to Server ~p ~n", [self(), Server]),
gen_tcp:send(Socket, "<SOME GARBLED NON SSL MESSAGE>"),
- ssl_test_lib:check_result(Server, {error,esslerrssl}, tcp_closed, Socket),
-
+ receive
+ {tcp_closed, Socket} ->
+ receive
+ {Server, {error, Error}} ->
+ test_server:format("Error ~p", [Error])
+ end
+ end,
ssl_test_lib:close(Server).
-should_close(Socket) ->
- receive
- {ssl, Socket, closed} ->
- server_closed;
- Other ->
- exit({?LINE, Other})
- end.
+dummy(_Socket) ->
+ %% Should not happen as the ssl connection will not be established
+ %% due to fatal handshake failiure
+ exit(kill).
%%--------------------------------------------------------------------
ipv6(doc) ->
@@ -843,12 +953,14 @@ ekeyfile(Config) when is_list(Config) ->
ClientOpts = ?config(client_opts, Config),
BadOpts = ?config(server_bad_key, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
- Port = ssl_test_lib:inet_port(ServerNode),
-
+
Server =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
+ ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
{from, self()},
{options, BadOpts}]),
+
+ Port = ssl_test_lib:inet_port(Server),
+
Client =
ssl_test_lib:start_client_error([{node, ClientNode},
{port, Port}, {host, Hostname},
@@ -869,19 +981,21 @@ ecertfile(Config) when is_list(Config) ->
ClientOpts = ?config(client_opts, Config),
ServerBadOpts = ?config(server_bad_cert, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
- Port = ssl_test_lib:inet_port(ServerNode),
- Server0 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
+ Server =
+ ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
{from, self()},
{options, ServerBadOpts}]),
- Client0 =
+
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client =
ssl_test_lib:start_client_error([{node, ClientNode},
{port, Port}, {host, Hostname},
{from, self()},
{options, ClientOpts}]),
- ssl_test_lib:check_result(Server0, {error, ecertfile}, Client0,
+ ssl_test_lib:check_result(Server, {error, ecertfile}, Client,
{error, closed}).
@@ -896,15 +1010,18 @@ ecacertfile(Config) when is_list(Config) ->
ClientOpts = [{reuseaddr, true}|?config(client_opts, Config)],
ServerBadOpts = [{reuseaddr, true}|?config(server_bad_ca, Config)],
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
- Port = ssl_test_lib:inet_port(ServerNode),
Server0 =
ssl_test_lib:start_server_error([{node, ServerNode},
- {port, Port}, {from, self()},
+ {port, 0}, {from, self()},
{options, ServerBadOpts}]),
+
+ Port0 = ssl_test_lib:inet_port(Server0),
+
+
Client0 =
ssl_test_lib:start_client_error([{node, ClientNode},
- {port, Port}, {host, Hostname},
+ {port, Port0}, {host, Hostname},
{from, self()},
{options, ClientOpts}]),
@@ -917,11 +1034,14 @@ ecacertfile(Config) when is_list(Config) ->
Server1 =
ssl_test_lib:start_server_error([{node, ServerNode},
- {port, Port}, {from, self()},
+ {port, 0}, {from, self()},
{options, ServerBadOpts1}]),
+
+ Port1 = ssl_test_lib:inet_port(Server1),
+
Client1 =
ssl_test_lib:start_client_error([{node, ClientNode},
- {port, Port}, {host, Hostname},
+ {port, Port1}, {host, Hostname},
{from, self()},
{options, ClientOpts}]),
@@ -942,198 +1062,58 @@ eoptions(Config) when is_list(Config) ->
ClientOpts = ?config(client_opts, Config),
ServerOpts = ?config(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
- Port = ssl_test_lib:inet_port(ServerNode),
-
- %% Emulated opts
- Server0 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{active, trice} | ServerOpts]}]),
- Client0 =
- ssl_test_lib:start_client_error([{node, ClientNode},
- {port, Port}, {host, Hostname},
- {from, self()},
- {options, [{active, trice} | ClientOpts]}]),
- ssl_test_lib:check_result(Server0, {error, {eoptions, {active,trice}}},
- Client0, {error, {eoptions, {active,trice}}}),
-
- test_server:sleep(500),
-
- Server1 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{header, a} | ServerOpts]}]),
- Client1 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{header, a} | ClientOpts]}]),
- ssl_test_lib:check_result(Server1, {error, {eoptions, {header, a}}},
- Client1, {error, {eoptions, {header, a}}}),
-
- test_server:sleep(500),
-
-
- Server2 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{mode, a} | ServerOpts]}]),
-
- Client2 =
- ssl_test_lib:start_client_error([{node, ClientNode},
- {port, Port}, {host, Hostname},
- {from, self()},
- {options, [{mode, a} | ClientOpts]}]),
- ssl_test_lib:check_result(Server2, {error, {eoptions, {mode, a}}},
- Client2, {error, {eoptions, {mode, a}}}),
-
-
- test_server:sleep(500),
-
- Server3 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{packet, 8.0} | ServerOpts]}]),
- Client3 =
- ssl_test_lib:start_client_error([{node, ClientNode},
- {port, Port}, {host, Hostname},
- {from, self()},
- {options, [{packet, 8.0} | ClientOpts]}]),
- ssl_test_lib:check_result(Server3, {error, {eoptions, {packet, 8.0}}},
- Client3, {error, {eoptions, {packet, 8.0}}}),
-
- test_server:sleep(500),
-
- %% ssl
- Server4 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{verify, 4} | ServerOpts]}]),
- Client4 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{verify, 4} | ClientOpts]}]),
- ssl_test_lib:check_result(Server4, {error, {eoptions, {verify, 4}}},
- Client4, {error, {eoptions, {verify, 4}}}),
-
- test_server:sleep(500),
-
- Server5 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{depth, four} | ServerOpts]}]),
- Client5 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{depth, four} | ClientOpts]}]),
- ssl_test_lib:check_result(Server5, {error, {eoptions, {depth, four}}},
- Client5, {error, {eoptions, {depth, four}}}),
-
- test_server:sleep(500),
-
- Server6 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{cacertfile, ""} | ServerOpts]}]),
- Client6 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{cacertfile, ""} | ClientOpts]}]),
- ssl_test_lib:check_result(Server6, {error, {eoptions, {cacertfile, ""}}},
- Client6, {error, {eoptions, {cacertfile, ""}}}),
-
-
- test_server:sleep(500),
-
- Server7 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{certfile, 'cert.pem'} | ServerOpts]}]),
- Client7 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{certfile, 'cert.pem'} | ClientOpts]}]),
- ssl_test_lib:check_result(Server7,
- {error, {eoptions, {certfile, 'cert.pem'}}},
- Client7, {error, {eoptions, {certfile, 'cert.pem'}}}),
-
- test_server:sleep(500),
-
- Server8 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{keyfile,'key.pem' } | ServerOpts]}]),
- Client8 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()}, {options, [{keyfile, 'key.pem'}
- | ClientOpts]}]),
- ssl_test_lib:check_result(Server8,
- {error, {eoptions, {keyfile, 'key.pem'}}},
- Client8, {error, {eoptions, {keyfile, 'key.pem'}}}),
-
- test_server:sleep(500),
-
- Server9 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{key, 'key.pem' } | ServerOpts]}]),
- Client9 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()}, {options, [{key, 'key.pem'}
- | ClientOpts]}]),
- ssl_test_lib:check_result(Server9, {error, {eoptions, {key, 'key.pem'}}},
- Client9, {error, {eoptions, {key, 'key.pem'}}}),
+ Check = fun(Client, Server, {versions, [sslv2, sslv3]} = Option) ->
+ ssl_test_lib:check_result(Server,
+ {error, {eoptions, {sslv2, Option}}},
+ Client,
+ {error, {eoptions, {sslv2, Option}}});
+ (Client, Server, Option) ->
+ ssl_test_lib:check_result(Server,
+ {error, {eoptions, Option}},
+ Client,
+ {error, {eoptions, Option}})
+ end,
- test_server:sleep(500),
-
- Server10 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{password, foo} | ServerOpts]}]),
- Client10 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{password, foo} | ClientOpts]}]),
- ssl_test_lib:check_result(Server10, {error, {eoptions, {password, foo}}},
- Client10, {error, {eoptions, {password, foo}}}),
-
- test_server:sleep(500),
-
- %% Misc
- Server11 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{ssl_imp, cool} | ServerOpts]}]),
- Client11 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{ssl_imp, cool} | ClientOpts]}]),
- ssl_test_lib:check_result(Server11, {error, {eoptions, {ssl_imp, cool}}},
- Client11, {error, {eoptions, {ssl_imp, cool}}}),
-
-
- test_server:sleep(500),
-
- Server12 =
- ssl_test_lib:start_server_error([{node, ServerNode}, {port, Port},
- {from, self()},
- {options, [{debug, cool} | ServerOpts]}]),
- Client12 =
- ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {options, [{debug, cool} | ClientOpts]}]),
- ssl_test_lib:check_result(Server12, {error, {eoptions, {debug, cool}}},
- Client12, {error, {eoptions, {debug, cool}}}).
+ TestOpts = [{versions, [sslv2, sslv3]},
+ {ssl_imp, cool},
+ {verify, 4},
+ {verify_fun, function},
+ {fail_if_no_peer_cert, 0},
+ {verify_client_once, 1},
+ {validate_extensions_fun, function},
+ {depth, four},
+ {certfile, 'cert.pem'},
+ {keyfile,'key.pem' },
+ {password, foo},
+ {cacertfile, ""},
+ {dhfile,'dh.pem' },
+ {ciphers, [{foo, bar, sha, ignore}]},
+ {reuse_session, foo},
+ {reuse_sessions, 0},
+ {renegotiate_at, "10"},
+ {debug, 1},
+ {mode, depech},
+ {packet, 8.0},
+ {packet_size, "2"},
+ {header, a},
+ {active, trice},
+ {key, 'key.pem' }],
+
+ [begin
+ Server =
+ ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {options, [TestOpt | ServerOpts]}]),
+ %% Will never reach a point where port is used.
+ Client =
+ ssl_test_lib:start_client_error([{node, ClientNode}, {port, 0},
+ {host, Hostname}, {from, self()},
+ {options, [TestOpt | ClientOpts]}]),
+ Check(Client, Server, TestOpt),
+ ok
+ end || TestOpt <- TestOpts],
+ ok.
%%--------------------------------------------------------------------
shutdown(doc) ->
@@ -1203,7 +1183,7 @@ shutdown_write(Config) when is_list(Config) ->
ssl_test_lib:check_result(Server, ok, Client, {error, closed}).
shutdown_write_result(Socket, server) ->
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
ssl:shutdown(Socket, write);
shutdown_write_result(Socket, client) ->
ssl:recv(Socket, 0).
@@ -1233,7 +1213,7 @@ shutdown_both(Config) when is_list(Config) ->
ssl_test_lib:check_result(Server, ok, Client, {error, closed}).
shutdown_both_result(Socket, server) ->
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
ssl:shutdown(Socket, read_write);
shutdown_both_result(Socket, client) ->
ssl:recv(Socket, 0).
@@ -1339,7 +1319,7 @@ reuse_session(Config) when is_list(Config) ->
Client0 =
ssl_test_lib:start_client([{node, ClientNode},
{port, Port}, {host, Hostname},
- {mfa, {?MODULE, no_result, []}},
+ {mfa, {ssl_test_lib, no_result, []}},
{from, self()}, {options, ClientOpts}]),
SessionInfo =
receive
@@ -1350,7 +1330,7 @@ reuse_session(Config) when is_list(Config) ->
Server ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client1 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -1410,7 +1390,7 @@ reuse_session(Config) when is_list(Config) ->
Server1 ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client4 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -1457,7 +1437,7 @@ reuse_session_expired(Config) when is_list(Config) ->
Client0 =
ssl_test_lib:start_client([{node, ClientNode},
{port, Port}, {host, Hostname},
- {mfa, {?MODULE, no_result, []}},
+ {mfa, {ssl_test_lib, no_result, []}},
{from, self()}, {options, ClientOpts}]),
SessionInfo =
receive
@@ -1468,7 +1448,7 @@ reuse_session_expired(Config) when is_list(Config) ->
Server ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client1 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -1530,7 +1510,7 @@ server_does_not_want_to_reuse_session(Config) when is_list(Config) ->
Client0 =
ssl_test_lib:start_client([{node, ClientNode},
{port, Port}, {host, Hostname},
- {mfa, {?MODULE, no_result, []}},
+ {mfa, {ssl_test_lib, no_result, []}},
{from, self()}, {options, ClientOpts}]),
SessionInfo =
receive
@@ -1541,7 +1521,7 @@ server_does_not_want_to_reuse_session(Config) when is_list(Config) ->
Server ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client1 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -1725,8 +1705,122 @@ server_verify_none_active_once(Config) when is_list(Config) ->
ssl_test_lib:check_result(Server, ok, Client, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+%%--------------------------------------------------------------------
+
+server_verify_client_once_passive(doc) ->
+ ["Test server option verify_client_once"];
+
+server_verify_client_once_passive(suite) ->
+ [];
+
+server_verify_client_once_passive(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_verification_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result, []}},
+ {options, [{active, false}, {verify, verify_peer},
+ {verify_client_once, true}
+ | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client0 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result, []}},
+ {options, [{active, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client0, ok),
+ ssl_test_lib:close(Client0),
+ Server ! listen,
+ Client1 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, result_ok, []}},
+ {options, [{active, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Client1, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client1).
+
+%%--------------------------------------------------------------------
+
+server_verify_client_once_active(doc) ->
+ ["Test server option verify_client_once"];
+
+server_verify_client_once_active(suite) ->
+ [];
+
+server_verify_client_once_active(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_verification_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, [{active, once}, {verify, verify_peer},
+ {verify_client_once, true}
+ | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client0 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, [{active, true} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client0, ok),
+ ssl_test_lib:close(Client0),
+ Server ! listen,
+ Client1 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, result_ok, []}},
+ {options, [{active, true} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Client1, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client1).
+
+%%--------------------------------------------------------------------
+
+server_verify_client_once_active_once(doc) ->
+ ["Test server option verify_client_once"];
+
+server_verify_client_once_active_once(suite) ->
+ [];
+server_verify_client_once_active_once(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_verification_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active_once, []}},
+ {options, [{active, once}, {verify, verify_peer},
+ {verify_client_once, true}
+ | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client0 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active_once, []}},
+ {options, [{active, once} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client0, ok),
+ ssl_test_lib:close(Client0),
+ Server ! listen,
+
+ Client1 = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, result_ok, []}},
+ {options, [{active, once} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Client1, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client1).
+
%%--------------------------------------------------------------------
server_verify_no_cacerts(doc) ->
@@ -1744,7 +1838,68 @@ server_verify_no_cacerts(Config) when is_list(Config) ->
| ServerOpts]}]),
ssl_test_lib:check_result(Server, {error, {eoptions, {cacertfile, ""}}}).
+
+%%--------------------------------------------------------------------
+
+server_require_peer_cert_ok(doc) ->
+ ["Test server option fail_if_no_peer_cert when peer sends cert"];
+
+server_require_peer_cert_ok(suite) ->
+ [];
+
+server_require_peer_cert_ok(Config) when is_list(Config) ->
+ ServerOpts = [{verify, verify_peer}, {fail_if_no_peer_cert, true}
+ | ?config(server_verification_opts, Config)],
+ ClientOpts = ?config(client_verification_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result, []}},
+ {options, [{active, false} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result, []}},
+ {options, [{active, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+
+server_require_peer_cert_fail(doc) ->
+ ["Test server option fail_if_no_peer_cert when peer doesn't send cert"];
+
+server_require_peer_cert_fail(suite) ->
+ [];
+
+server_require_peer_cert_fail(Config) when is_list(Config) ->
+ ServerOpts = [{verify, verify_peer}, {fail_if_no_peer_cert, true}
+ | ?config(server_verification_opts, Config)],
+ BadClientOpts = ?config(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, no_result, []}},
+ {options, [{active, false} | ServerOpts]}]),
+
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, no_result, []}},
+ {options, [{active, false} | BadClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, {error, esslaccept},
+ Client, {error, esslconnect}),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
%%--------------------------------------------------------------------
client_verify_none_passive(doc) ->
@@ -1849,31 +2004,289 @@ client_verify_none_active_once(Config) when is_list(Config) ->
ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+client_renegotiate(doc) ->
+ ["Test ssl:renegotiate/1 on client."];
+
+client_renegotiate(suite) ->
+ [];
+
+client_renegotiate(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Data = "From erlang to erlang",
+
+ Server =
+ ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, erlang_ssl_receive, [Data]}},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE,
+ renegotiate, [Data]}},
+ {options, [{reuse_sessions, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Client, ok, Server, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client),
+ process_flag(trap_exit, false),
+ ok.
+%%--------------------------------------------------------------------
+server_renegotiate(doc) ->
+ ["Test ssl:renegotiate/1 on server."];
+
+server_renegotiate(suite) ->
+ [];
+
+server_renegotiate(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Data = "From erlang to erlang",
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE,
+ renegotiate, [Data]}},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, erlang_ssl_receive, [Data]}},
+ {options, [{reuse_sessions, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client),
+ ok.
+
+%%--------------------------------------------------------------------
+client_no_wrap_sequence_number(doc) ->
+ ["Test that erlang client will renegotiate session when",
+ "max sequence number celing is about to be reached. Although"
+ "in the testcase we use the test option renegotiate_at"
+ " to lower treashold substantially."];
+
+client_no_wrap_sequence_number(suite) ->
+ [];
+
+client_no_wrap_sequence_number(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ ErlData = "From erlang to erlang",
+ N = 10,
+
+ Server =
+ ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib,
+ trigger_renegotiate, [[ErlData, N+2]]}},
+ {options, [{reuse_sessions, false},
+ {renegotiate_at, N} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client),
+ process_flag(trap_exit, false),
+ ok.
+%%--------------------------------------------------------------------
+server_no_wrap_sequence_number(doc) ->
+ ["Test that erlang server will renegotiate session when",
+ "max sequence number celing is about to be reached. Although"
+ "in the testcase we use the test option renegotiate_at"
+ " to lower treashold substantially."];
+
+server_no_wrap_sequence_number(suite) ->
+ [];
+
+server_no_wrap_sequence_number(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Data = "From erlang to erlang",
+ N = 10,
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib,
+ trigger_renegotiate, [[Data, N+2]]}},
+ {options, [{renegotiate_at, N} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, [{reuse_sessions, false} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client),
+ ok.
+
+%%--------------------------------------------------------------------
+extended_key_usage(doc) ->
+ ["Test cert that has a critical extended_key_usage extension"];
+
+extended_key_usage(suite) ->
+ [];
+
+extended_key_usage(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ PrivDir = ?config(priv_dir, Config),
+
+ CertFile = proplists:get_value(certfile, ServerOpts),
+ KeyFile = proplists:get_value(keyfile, ServerOpts),
+ NewCertFile = filename:join(PrivDir, "cert.pem"),
+
+ {ok, [{cert, DerCert, _}]} = public_key:pem_to_der(CertFile),
+
+ {ok, [KeyInfo]} = public_key:pem_to_der(KeyFile),
+
+ {ok, Key} = public_key:decode_private_key(KeyInfo),
+
+ {ok, OTPCert} = public_key:pkix_decode_cert(DerCert, otp),
+
+ ExtKeyUsageExt = {'Extension', ?'id-ce-extKeyUsage', true, [?'id-kp-serverAuth']},
+
+ OTPTbsCert = OTPCert#'OTPCertificate'.tbsCertificate,
+
+ Extensions = OTPTbsCert#'OTPTBSCertificate'.extensions,
+
+ NewOTPTbsCert = OTPTbsCert#'OTPTBSCertificate'{extensions = [ExtKeyUsageExt |Extensions]},
+
+ NewDerCert = public_key:sign(NewOTPTbsCert, Key),
+
+ public_key:der_to_pem(NewCertFile, [{cert, NewDerCert}]),
+
+ NewServerOpts = [{certfile, NewCertFile} | proplists:delete(certfile, ServerOpts)],
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, NewServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, ClientOpts}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+validate_extensions_fun(doc) ->
+ ["Test that it is possible to specify a validate_extensions_fun"];
+
+validate_extensions_fun(suite) ->
+ [];
+
+validate_extensions_fun(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_verification_opts, Config),
+ ServerOpts = ?config(server_verification_opts, Config),
+
+ Fun = fun(Extensions, State, _, AccError) ->
+ {Extensions, State, AccError}
+ end,
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options, [{validate_extensions_fun, Fun},
+ {verify, verify_peer} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, send_recv_result_active, []}},
+ {options,[{validate_extensions_fun, Fun} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
send_recv_result(Socket) ->
- ssl:send(Socket, "Hejhopp"),
- test_server:sleep(100),
- {ok,"Hejhopp"} = ssl:recv(Socket, 7),
+ ssl:send(Socket, "Hello world"),
+ {ok,"Hello world"} = ssl:recv(Socket, 11),
ok.
send_recv_result_active(Socket) ->
- ssl:send(Socket, "Hejhopp"),
- test_server:sleep(100),
+ ssl:send(Socket, "Hello world"),
receive
- {ssl, Socket, "Hejhopp"} ->
+ {ssl, Socket, "Hello world"} ->
ok
end.
send_recv_result_active_once(Socket) ->
- ssl:send(Socket, "Hejhopp"),
- test_server:sleep(100),
+ ssl:send(Socket, "Hello world"),
receive
- {ssl, Socket, "Hejhopp"} ->
+ {ssl, Socket, "Hello world"} ->
ok
end.
+result_ok(_Socket) ->
+ ok.
+
+renegotiate(Socket, Data) ->
+ test_server:format("Renegotiating ~n", []),
+ Result = ssl:renegotiate(Socket),
+ test_server:format("Result ~p~n", [Result]),
+ ssl:send(Socket, Data),
+ case Result of
+ ok ->
+ ok;
+ %% It is not an error in erlang ssl
+ %% if peer rejects renegotiation.
+ %% Connection will stay up
+ {error, renegotiation_rejected} ->
+ ok;
+ Other ->
+ Other
+ end.
+
session_cache_process_list(doc) ->
["Test reuse of sessions (short handshake)"];
@@ -1909,7 +2322,7 @@ session_cache_process(Type,Config) when is_list(Config) ->
Client0 =
ssl_test_lib:start_client([{node, ClientNode},
{port, Port}, {host, Hostname},
- {mfa, {?MODULE, no_result, []}},
+ {mfa, {ssl_test_lib, no_result, []}},
{from, self()}, {options, ClientOpts}]),
SessionInfo =
receive
@@ -1920,7 +2333,7 @@ session_cache_process(Type,Config) when is_list(Config) ->
Server ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client1 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -1963,7 +2376,7 @@ session_cache_process(Type,Config) when is_list(Config) ->
Server1 ! listen,
%% Make sure session is registered
- test_server:sleep(500),
+ test_server:sleep(?SLEEP),
Client4 =
ssl_test_lib:start_client([{node, ClientNode},
@@ -2112,3 +2525,14 @@ session_loop(Sess) ->
session_loop(Sess)
end.
+erlang_ssl_receive(Socket, Data) ->
+ receive
+ {ssl, Socket, Data} ->
+ io:format("Received ~p~n",[Data]),
+ ok;
+ Other ->
+ test_server:fail({unexpected_message, Other})
+ after ?SLEEP * 3 ->
+ test_server:fail({did_not_get, Data})
+ end.
+
diff --git a/lib/ssl/test/ssl_basic_SUITE_data/dHParam.pem b/lib/ssl/test/ssl_basic_SUITE_data/dHParam.pem
new file mode 100644
index 0000000000..feb581da30
--- /dev/null
+++ b/lib/ssl/test/ssl_basic_SUITE_data/dHParam.pem
@@ -0,0 +1,5 @@
+-----BEGIN DH PARAMETERS-----
+MIGHAoGBAMY5VmCZ22ZEy/KO8kjt94PH7ZtSG0Z0zitlMlvd4VsNkDzXsVeu+wkH
+FGDC3h3vgv6iwXGCbmrSOVk/FPZbzLhwZ8aLnkUFOBbOvVvb1JptQwOt8mf+eScG
+M2gGBktheQV5Nf1IrzOctG7VGt+neiqb/Y86uYCcDdL+M8++0qnLAgEC
+-----END DH PARAMETERS-----
diff --git a/lib/ssl/test/ssl_packet_SUITE.erl b/lib/ssl/test/ssl_packet_SUITE.erl
index f031552457..1bcb9a657b 100644
--- a/lib/ssl/test/ssl_packet_SUITE.erl
+++ b/lib/ssl/test/ssl_packet_SUITE.erl
@@ -144,7 +144,9 @@ all(suite) ->
packet_wait_passive, packet_wait_active,
packet_baddata_passive, packet_baddata_active,
packet_size_passive, packet_size_active,
- packet_erl_decode
+ packet_erl_decode,
+ packet_http_decode,
+ packet_http_bin_decode_multi
].
%% Test cases starts here.
@@ -1466,6 +1468,173 @@ client_packet_decode(Socket, CDR) ->
end,
ok.
+%%--------------------------------------------------------------------
+packet_http_decode(doc) ->
+ ["Test setting the packet option {packet, http}"];
+packet_http_decode(suite) ->
+ [];
+
+packet_http_decode(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Request = "GET / HTTP/1.1\r\n"
+ "host: www.example.com\r\n"
+ "user-agent: HttpTester\r\n"
+ "\r\n",
+ Response = "HTTP/1.1 200 OK\r\n"
+ "\r\n"
+ "Hello!",
+
+ Server = ssl_test_lib:start_server([{node, ClientNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, server_http_decode, [Response]}},
+ {options, [{active, true}, binary, {packet, http} |
+ ServerOpts]}]),
+
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ServerNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, client_http_decode, [Request]}},
+ {options, [{active, true}, binary, {packet, http} |
+ ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+
+server_http_decode(Socket, HttpResponse) ->
+ assert_packet_opt(Socket, http),
+ receive
+ {ssl, Socket, {http_request, 'GET', _, {1,1}}} -> ok;
+ Other1 -> exit({?LINE, Other1})
+ end,
+ assert_packet_opt(Socket, http),
+ receive
+ {ssl, Socket, {http_header, _, 'Host', _, "www.example.com"}} -> ok;
+ Other2 -> exit({?LINE, Other2})
+ end,
+ assert_packet_opt(Socket, http),
+ receive
+ {ssl, Socket, {http_header, _, 'User-Agent', _, "HttpTester"}} -> ok;
+ Other3 -> exit({?LINE, Other3})
+ end,
+ assert_packet_opt(Socket, http),
+ receive
+ {ssl, Socket, http_eoh} -> ok;
+ Other4 -> exit({?LINE, Other4})
+ end,
+ assert_packet_opt(Socket, http),
+ ok = ssl:send(Socket, HttpResponse),
+ ok.
+
+client_http_decode(Socket, HttpRequest) ->
+ ok = ssl:send(Socket, HttpRequest),
+ receive
+ {ssl, Socket, {http_response, {1,1}, 200, "OK"}} -> ok;
+ Other1 -> exit({?LINE, Other1})
+ end,
+ receive
+ {ssl, Socket, http_eoh} -> ok;
+ Other2 -> exit({?LINE, Other2})
+ end,
+ ok = ssl:setopts(Socket, [{packet, 0}]),
+ receive
+ {ssl, Socket, <<"Hello!">>} -> ok;
+ Other3 -> exit({?LINE, Other3})
+ end,
+ ok.
+
+%%--------------------------------------------------------------------
+packet_http_bin_decode_multi(doc) ->
+ ["Test setting the packet option {packet, http_bin} with multiple requests"];
+packet_http_bin_decode_multi(suite) ->
+ [];
+
+packet_http_bin_decode_multi(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Request = <<"GET / HTTP/1.1\r\n"
+ "host: www.example.com\r\n"
+ "user-agent: HttpTester\r\n"
+ "\r\n">>,
+ Response = <<"HTTP/1.1 200 OK\r\n"
+ "\r\n"
+ "Hello!">>,
+ NumMsgs = 3,
+
+ Server = ssl_test_lib:start_server([{node, ClientNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, server_http_bin_decode, [Response, NumMsgs]}},
+ {options, [{active, true}, binary, {packet, http_bin} |
+ ServerOpts]}]),
+
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ServerNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, client_http_bin_decode, [Request, NumMsgs]}},
+ {options, [{active, true}, binary, {packet, http_bin} |
+ ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+
+server_http_bin_decode(Socket, HttpResponse, Count) when Count > 0 ->
+ assert_packet_opt(Socket, http_bin),
+ receive
+ {ssl, Socket, {http_request, 'GET', _, {1,1}}} -> ok;
+ Other1 -> exit({?LINE, Other1})
+ end,
+ assert_packet_opt(Socket, http_bin),
+ receive
+ {ssl, Socket, {http_header, _, 'Host', _, <<"www.example.com">>}} -> ok;
+ Other2 -> exit({?LINE, Other2})
+ end,
+ assert_packet_opt(Socket, http_bin),
+ receive
+ {ssl, Socket, {http_header, _, 'User-Agent', _, <<"HttpTester">>}} -> ok;
+ Other3 -> exit({?LINE, Other3})
+ end,
+ assert_packet_opt(Socket, http_bin),
+ receive
+ {ssl, Socket, http_eoh} -> ok;
+ Other4 -> exit({?LINE, Other4})
+ end,
+ assert_packet_opt(Socket, http_bin),
+ ok = ssl:send(Socket, HttpResponse),
+ server_http_bin_decode(Socket, HttpResponse, Count - 1);
+server_http_bin_decode(_, _, _) ->
+ ok.
+
+client_http_bin_decode(Socket, HttpRequest, Count) when Count > 0 ->
+ ok = ssl:send(Socket, HttpRequest),
+ receive
+ {ssl, Socket, {http_response, {1,1}, 200, <<"OK">>}} -> ok;
+ Other1 -> exit({?LINE, Other1})
+ end,
+ receive
+ {ssl, Socket, http_eoh} -> ok;
+ Other2 -> exit({?LINE, Other2})
+ end,
+ ok = ssl:setopts(Socket, [{packet, 0}]),
+ receive
+ {ssl, Socket, <<"Hello!">>} -> ok;
+ Other3 -> exit({?LINE, Other3})
+ end,
+ ok = ssl:setopts(Socket, [{packet, http_bin}]),
+ client_http_bin_decode(Socket, HttpRequest, Count - 1);
+client_http_bin_decode(_, _, _) ->
+ ok.
%%--------------------------------------------------------------------
%% Internal functions
@@ -1572,3 +1741,6 @@ active_packet(Socket, Data, N) ->
Other ->
{other, Other, ssl:session_info(Socket),N}
end.
+
+assert_packet_opt(Socket, Type) ->
+ {ok, [{packet, Type}]} = ssl:getopts(Socket, [packet]).
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 2df2e70679..00c5350ad0 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -26,6 +26,7 @@
%% Note: This directive should only be used in test suites.
-compile(export_all).
+-record(sslsocket, { fd = nil, pid = nil}).
timetrap(Time) ->
Mul = try
@@ -52,7 +53,11 @@ node_to_hostip(Node) ->
Address.
start_server(Args) ->
- spawn_link(?MODULE, run_server, [Args]).
+ Result = spawn_link(?MODULE, run_server, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
run_server(Opts) ->
Node = proplists:get_value(node, Opts),
@@ -61,23 +66,14 @@ run_server(Opts) ->
Pid = proplists:get_value(from, Opts),
test_server:format("ssl:listen(~p, ~p)~n", [Port, Options]),
{ok, ListenSocket} = rpc:call(Node, ssl, listen, [Port, Options]),
- case Port of
- 0 ->
- {ok, {_, NewPort}} = ssl:sockname(ListenSocket),
- Pid ! {self(), {port, NewPort}};
- _ ->
- ok
- end,
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
run_server(ListenSocket, Opts).
run_server(ListenSocket, Opts) ->
+ AcceptSocket = connect(ListenSocket, Opts),
Node = proplists:get_value(node, Opts),
Pid = proplists:get_value(from, Opts),
- test_server:format("ssl:transport_accept(~p)~n", [ListenSocket]),
- {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
- [ListenSocket]),
- test_server:format("ssl:ssl_accept(~p)~n", [AcceptSocket]),
- ok = rpc:call(Node, ssl, ssl_accept, [AcceptSocket]),
{Module, Function, Args} = proplists:get_value(mfa, Opts),
test_server:format("Server: apply(~p,~p,~p)~n",
[Module, Function, [AcceptSocket | Args]]),
@@ -85,6 +81,7 @@ run_server(ListenSocket, Opts) ->
no_result_msg ->
ok;
Msg ->
+ test_server:format("Msg: ~p ~n", [Msg]),
Pid ! {self(), Msg}
end,
receive
@@ -94,8 +91,44 @@ run_server(ListenSocket, Opts) ->
ok = rpc:call(Node, ssl, close, [AcceptSocket])
end.
+%%% To enable to test with s_client -reconnect
+connect(ListenSocket, Opts) ->
+ Node = proplists:get_value(node, Opts),
+ ReconnectTimes = proplists:get_value(reconnect_times, Opts, 0),
+ AcceptSocket = connect(ListenSocket, Node, 1 + ReconnectTimes, dummy),
+ case ReconnectTimes of
+ 0 ->
+ AcceptSocket;
+ _ ->
+ remove_close_msg(ReconnectTimes),
+ AcceptSocket
+ end.
+
+connect(_, _, 0, AcceptSocket) ->
+ AcceptSocket;
+connect(ListenSocket, Node, N, _) ->
+ test_server:format("ssl:transport_accept(~p)~n", [ListenSocket]),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ test_server:format("ssl:ssl_accept(~p)~n", [AcceptSocket]),
+ ok = rpc:call(Node, ssl, ssl_accept, [AcceptSocket]),
+ connect(ListenSocket, Node, N-1, AcceptSocket).
+
+remove_close_msg(0) ->
+ ok;
+remove_close_msg(ReconnectTimes) ->
+ receive
+ {ssl_closed, _} ->
+ remove_close_msg(ReconnectTimes -1)
+ end.
+
+
start_client(Args) ->
- spawn_link(?MODULE, run_client, [Args]).
+ Result = spawn_link(?MODULE, run_client, [Args]),
+ receive
+ connected ->
+ Result
+ end.
run_client(Opts) ->
Node = proplists:get_value(node, Opts),
@@ -106,14 +139,11 @@ run_client(Opts) ->
test_server:format("ssl:connect(~p, ~p, ~p)~n", [Host, Port, Options]),
case rpc:call(Node, ssl, connect, [Host, Port, Options]) of
{ok, Socket} ->
+ Pid ! connected,
test_server:format("Client: connected~n", []),
- case proplists:get_value(port, Options) of
- 0 ->
- {ok, {_, NewPort}} = ssl:sockname(Socket),
- Pid ! {self(), {port, NewPort}};
- _ ->
- ok
- end,
+ %% In specail cases we want to know the client port, it will
+ %% be indicated by sending {port, 0} in options list!
+ send_selected_port(Pid, proplists:get_value(port, Options), Socket),
{Module, Function, Args} = proplists:get_value(mfa, Opts),
test_server:format("Client: apply(~p,~p,~p)~n",
[Module, Function, [Socket | Args]]),
@@ -178,6 +208,26 @@ check_result(Pid, Msg) ->
test_server:fail(Reason)
end.
+check_result_ignore_renegotiation_reject(Pid, Msg) ->
+ receive
+ {Pid, fail_session_fatal_alert_during_renegotiation} ->
+ test_server:comment("Server rejected old renegotiation"),
+ ok;
+ {ssl_error, _, esslconnect} ->
+ test_server:comment("Server rejected old renegotiation"),
+ ok;
+ {Pid, Msg} ->
+ ok;
+ {Port, {data,Debug}} when is_port(Port) ->
+ io:format("openssl ~s~n",[Debug]),
+ check_result(Pid,Msg);
+ Unexpected ->
+ Reason = {{expected, {Pid, Msg}},
+ {got, Unexpected}},
+ test_server:fail(Reason)
+ end.
+
+
wait_for_result(Server, ServerMsg, Client, ClientMsg) ->
receive
{Server, ServerMsg} ->
@@ -237,7 +287,7 @@ cert_options(Config) ->
"badcert.pem"]),
BadKeyFile = filename:join([?config(priv_dir, Config),
"badkey.pem"]),
- [{client_opts, [{ssl_imp, new}]},
+ [{client_opts, [{ssl_imp, new},{reuseaddr, true}]},
{client_verification_opts, [{cacertfile, ClientCaCertFile},
{certfile, ClientCertFile},
{keyfile, ClientKeyFile},
@@ -269,7 +319,11 @@ cert_options(Config) ->
start_upgrade_server(Args) ->
- spawn_link(?MODULE, run_upgrade_server, [Args]).
+ Result = spawn_link(?MODULE, run_upgrade_server, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
run_upgrade_server(Opts) ->
Node = proplists:get_value(node, Opts),
@@ -281,15 +335,8 @@ run_upgrade_server(Opts) ->
test_server:format("gen_tcp:listen(~p, ~p)~n", [Port, TcpOptions]),
{ok, ListenSocket} = rpc:call(Node, gen_tcp, listen, [Port, TcpOptions]),
-
- case Port of
- 0 ->
- {ok, {_, NewPort}} = inet:sockname(ListenSocket),
- Pid ! {self(), {port, NewPort}};
- _ ->
- ok
- end,
-
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
test_server:format("gen_tcp:accept(~p)~n", [ListenSocket]),
{ok, AcceptSocket} = rpc:call(Node, gen_tcp, accept, [ListenSocket]),
@@ -331,14 +378,8 @@ run_upgrade_client(Opts) ->
test_server:format("gen_tcp:connect(~p, ~p, ~p)~n",
[Host, Port, TcpOptions]),
{ok, Socket} = rpc:call(Node, gen_tcp, connect, [Host, Port, TcpOptions]),
-
- case proplists:get_value(port, Opts) of
- 0 ->
- {ok, {_, NewPort}} = inet:sockname(Socket),
- Pid ! {self(), {port, NewPort}};
- _ ->
- ok
- end,
+
+ send_selected_port(Pid, Port, Socket),
test_server:format("ssl:connect(~p, ~p)~n", [Socket, SslOptions]),
{ok, SslSocket} = rpc:call(Node, ssl, connect, [Socket, SslOptions]),
@@ -354,7 +395,11 @@ run_upgrade_client(Opts) ->
end.
start_server_error(Args) ->
- spawn_link(?MODULE, run_server_error, [Args]).
+ Result = spawn_link(?MODULE, run_server_error, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
run_server_error(Opts) ->
Node = proplists:get_value(node, Opts),
@@ -364,8 +409,10 @@ run_server_error(Opts) ->
test_server:format("ssl:listen(~p, ~p)~n", [Port, Options]),
case rpc:call(Node, ssl, listen, [Port, Options]) of
{ok, ListenSocket} ->
- test_server:sleep(2000), %% To make sure error_client will
+ %% To make sure error_client will
%% get {error, closed} and not {error, connection_refused}
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
test_server:format("ssl:transport_accept(~p)~n", [ListenSocket]),
case rpc:call(Node, ssl, transport_accept, [ListenSocket]) of
{error, _} = Error ->
@@ -376,6 +423,9 @@ run_server_error(Opts) ->
Pid ! {self(), Error}
end;
Error ->
+ %% Not really true but as this is an error test
+ %% this is what we want.
+ Pid ! {listen, up},
Pid ! {self(), Error}
end.
@@ -400,8 +450,8 @@ inet_port(Pid) when is_pid(Pid)->
inet_port(Node) ->
{Port, Socket} = do_inet_port(Node),
- rpc:call(Node, gen_tcp, close, [Socket]),
- Port.
+ rpc:call(Node, gen_tcp, close, [Socket]),
+ Port.
do_inet_port(Node) ->
{ok, Socket} = rpc:call(Node, gen_tcp, listen, [0, [{reuseaddr, true}]]),
@@ -410,3 +460,37 @@ do_inet_port(Node) ->
no_result(_) ->
no_result_msg.
+
+trigger_renegotiate(Socket, [ErlData, N]) ->
+ [{session_id, Id} | _ ] = ssl:session_info(Socket),
+ trigger_renegotiate(Socket, ErlData, N, Id).
+
+trigger_renegotiate(Socket, _, 0, Id) ->
+ test_server:sleep(1000),
+ case ssl:session_info(Socket) of
+ [{session_id, Id} | _ ] ->
+ fail_session_not_renegotiated;
+ %% Tests that uses this function will not reuse
+ %% sessions so if we get a new session id the
+ %% renegotiation has succeeded.
+ [{session_id, _} | _ ] ->
+ ok;
+ {error, closed} ->
+ fail_session_fatal_alert_during_renegotiation;
+ {error, timeout} ->
+ fail_timeout
+ end;
+
+trigger_renegotiate(Socket, ErlData, N, Id) ->
+ ssl:send(Socket, ErlData),
+ trigger_renegotiate(Socket, ErlData, N-1, Id).
+
+
+send_selected_port(Pid, 0, #sslsocket{} = Socket) ->
+ {ok, {_, NewPort}} = ssl:sockname(Socket),
+ Pid ! {self(), {port, NewPort}};
+send_selected_port(Pid, 0, Socket) ->
+ {ok, {_, NewPort}} = inet:sockname(Socket),
+ Pid ! {self(), {port, NewPort}};
+send_selected_port(_,_,_) ->
+ ok.
diff --git a/lib/ssl/test/ssl_to_openssl_SUITE.erl b/lib/ssl/test/ssl_to_openssl_SUITE.erl
index c079e12b83..cbf0447bf0 100644
--- a/lib/ssl/test/ssl_to_openssl_SUITE.erl
+++ b/lib/ssl/test/ssl_to_openssl_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -30,6 +30,9 @@
-define(TIMEOUT, 120000).
-define(SLEEP, 1000).
+-define(OPENSSL_RENEGOTIATE, "r\n").
+-define(OPENSSL_QUIT, "Q\n").
+-define(OPENSSL_GARBAGE, "P\n").
%% Test server callback functions
%%--------------------------------------------------------------------
@@ -114,6 +117,11 @@ all(doc) ->
all(suite) ->
[erlang_client_openssl_server,
erlang_server_openssl_client,
+ erlang_server_openssl_client_reuse_session,
+ erlang_client_openssl_server_renegotiate,
+ erlang_client_openssl_server_no_wrap_sequence_number,
+ erlang_server_openssl_client_no_wrap_sequence_number,
+ erlang_client_openssl_server_no_server_ca_cert,
ssl3_erlang_client_openssl_server,
ssl3_erlang_server_openssl_client,
ssl3_erlang_client_openssl_server_client_cert,
@@ -124,7 +132,8 @@ all(suite) ->
tls1_erlang_client_openssl_server_client_cert,
tls1_erlang_server_openssl_client_client_cert,
tls1_erlang_server_erlang_client_client_cert,
- ciphers
+ ciphers,
+ erlang_client_bad_openssl_server
].
%% Test cases starts here.
@@ -148,13 +157,13 @@ erlang_client_openssl_server(Config) when is_list(Config) ->
KeyFile = proplists:get_value(keyfile, ServerOpts),
Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
- " -cert " ++ CertFile ++ " -key " ++ KeyFile,
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile,
test_server:format("openssl cmd: ~p~n", [Cmd]),
OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -193,14 +202,199 @@ erlang_server_openssl_client(Config) when is_list(Config) ->
{options, ServerOpts}]),
Port = ssl_test_lib:inet_port(Server),
+ Cmd = "openssl s_client -port " ++ integer_to_list(Port) ++
+ " -host localhost",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpenSslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+ port_command(OpenSslPort, Data),
+
+ ssl_test_lib:check_result(Server, ok),
+
+ ssl_test_lib:close(Server),
+
+ close_port(OpenSslPort),
+ process_flag(trap_exit, false),
+ ok.
+
+%%--------------------------------------------------------------------
+
+erlang_server_openssl_client_reuse_session(doc) ->
+ ["Test erlang server with openssl client that reconnects with the"
+ "same session id, to test reusing of sessions."];
+erlang_server_openssl_client_reuse_session(suite) ->
+ [];
+erlang_server_openssl_client_reuse_session(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+
+ {_, ServerNode, _} = ssl_test_lib:run_where(Config),
+
+ Data = "From openssl to erlang",
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, erlang_ssl_receive, [Data]}},
+ {reconnect_times, 5},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Cmd = "openssl s_client -port " ++ integer_to_list(Port) ++
+ " -host localhost -reconnect",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpenSslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
+ port_command(OpenSslPort, Data),
+
+ ssl_test_lib:check_result(Server, ok),
+
+ ssl_test_lib:close(Server),
+
+ close_port(OpenSslPort),
+ process_flag(trap_exit, false),
+ ok.
+
+%%--------------------------------------------------------------------
+
+erlang_client_openssl_server_renegotiate(doc) ->
+ ["Test erlang client when openssl server issuses a renegotiate"];
+erlang_client_openssl_server_renegotiate(suite) ->
+ [];
+erlang_client_openssl_server_renegotiate(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, _, Hostname} = ssl_test_lib:run_where(Config),
+
+ ErlData = "From erlang to openssl",
+ OpenSslData = "From openssl to erlang",
+
+ Port = ssl_test_lib:inet_port(node()),
+ CertFile = proplists:get_value(certfile, ServerOpts),
+ KeyFile = proplists:get_value(keyfile, ServerOpts),
+
+ Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ " -msg",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
+ wait_for_openssl_server(),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE,
+ delayed_send, [[ErlData, OpenSslData]]}},
+ {options, ClientOpts}]),
+
+ port_command(OpensslPort, ?OPENSSL_RENEGOTIATE),
test_server:sleep(?SLEEP),
+ port_command(OpensslPort, OpenSslData),
+
+ %%ssl_test_lib:check_result(Client, ok),
+ %% Currently allow test case to not fail
+ %% if server requires secure renegotiation from RFC-5746
+ %% This should be removed as soon as we have implemented it.
+ ssl_test_lib:check_result_ignore_renegotiation_reject(Client, ok),
+
+ %% Clean close down! Server needs to be closed first !!
+ close_port(OpensslPort),
+
+ ssl_test_lib:close(Client),
+ process_flag(trap_exit, false),
+ ok.
+
+%%--------------------------------------------------------------------
+
+erlang_client_openssl_server_no_wrap_sequence_number(doc) ->
+ ["Test that erlang client will renegotiate session when",
+ "max sequence number celing is about to be reached. Although"
+ "in the testcase we use the test option renegotiate_at"
+ " to lower treashold substantially."];
+erlang_client_openssl_server_no_wrap_sequence_number(suite) ->
+ [];
+erlang_client_openssl_server_no_wrap_sequence_number(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, _, Hostname} = ssl_test_lib:run_where(Config),
+
+ ErlData = "From erlang to openssl\n",
+ N = 10,
+
+ Port = ssl_test_lib:inet_port(node()),
+ CertFile = proplists:get_value(certfile, ServerOpts),
+ KeyFile = proplists:get_value(keyfile, ServerOpts),
+ Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ " -msg",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
+ wait_for_openssl_server(),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib,
+ trigger_renegotiate, [[ErlData, N+2]]}},
+ {options, [{reuse_sessions, false},
+ {renegotiate_at, N} | ClientOpts]}]),
+
+ %%ssl_test_lib:check_result(Client, ok),
+ %% Currently allow test case to not fail
+ %% if server requires secure renegotiation from RFC-5746
+ %% This should be removed as soon as we have implemented it.
+ ssl_test_lib:check_result_ignore_renegotiation_reject(Client, ok),
+
+ %% Clean close down! Server needs to be closed first !!
+ close_port(OpensslPort),
+
+ ssl_test_lib:close(Client),
+ process_flag(trap_exit, false),
+ ok.
+%%--------------------------------------------------------------------
+erlang_server_openssl_client_no_wrap_sequence_number(doc) ->
+ ["Test that erlang client will renegotiate session when",
+ "max sequence number celing is about to be reached. Although"
+ "in the testcase we use the test option renegotiate_at"
+ " to lower treashold substantially."];
+
+erlang_server_openssl_client_no_wrap_sequence_number(suite) ->
+ [];
+erlang_server_openssl_client_no_wrap_sequence_number(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+
+ {_, ServerNode, _} = ssl_test_lib:run_where(Config),
+
+ Data = "From openssl to erlang",
+
+ N = 10,
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib,
+ trigger_renegotiate, [[Data, N+2]]}},
+ {options, [{renegotiate_at, N} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+
Cmd = "openssl s_client -port " ++ integer_to_list(Port) ++
- " -host localhost",
+ " -host localhost -msg",
test_server:format("openssl cmd: ~p~n", [Cmd]),
OpenSslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
port_command(OpenSslPort, Data),
ssl_test_lib:check_result(Server, ok),
@@ -210,6 +404,53 @@ erlang_server_openssl_client(Config) when is_list(Config) ->
close_port(OpenSslPort),
process_flag(trap_exit, false),
ok.
+%%--------------------------------------------------------------------
+
+erlang_client_openssl_server_no_server_ca_cert(doc) ->
+ ["Test erlang client when openssl server sends a cert chain not"
+ "including the ca cert. Explicitly test this even if it is"
+ "implicitly tested eleswhere."];
+erlang_client_openssl_server_no_server_ca_cert(suite) ->
+ [];
+erlang_client_openssl_server_no_server_ca_cert(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_opts, Config),
+ ClientOpts = ?config(client_opts, Config),
+
+ {ClientNode, _, Hostname} = ssl_test_lib:run_where(Config),
+
+ Data = "From openssl to erlang",
+
+ Port = ssl_test_lib:inet_port(node()),
+ CertFile = proplists:get_value(certfile, ServerOpts),
+ KeyFile = proplists:get_value(keyfile, ServerOpts),
+
+ Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ " -msg",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
+ wait_for_openssl_server(),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE,
+ erlang_ssl_receive, [Data]}},
+ {options, ClientOpts}]),
+
+ port_command(OpensslPort, Data),
+
+ ssl_test_lib:check_result(Client, ok),
+
+ %% Clean close down! Server needs to be closed first !!
+ close_port(OpensslPort),
+
+ ssl_test_lib:close(Client),
+ process_flag(trap_exit, false),
+ ok.
%%--------------------------------------------------------------------
ssl3_erlang_client_openssl_server(doc) ->
@@ -233,7 +474,7 @@ ssl3_erlang_client_openssl_server(Config) when is_list(Config) ->
OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -268,8 +509,6 @@ ssl3_erlang_server_openssl_client(Config) when is_list(Config) ->
{options,
[{versions, [sslv3]} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
-
- test_server:sleep(?SLEEP),
Cmd = "openssl s_client -port " ++ integer_to_list(Port) ++
" -host localhost -ssl3",
@@ -300,8 +539,8 @@ ssl3_erlang_client_openssl_server_client_cert(Config) when is_list(Config) ->
Data = "From openssl to erlang",
Port = ssl_test_lib:inet_port(node()),
- CaCertFile = proplists:get_value(cacertfile, ServerOpts),
CertFile = proplists:get_value(certfile, ServerOpts),
+ CaCertFile = proplists:get_value(cacertfile, ServerOpts),
KeyFile = proplists:get_value(keyfile, ServerOpts),
Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
@@ -312,7 +551,7 @@ ssl3_erlang_client_openssl_server_client_cert(Config) when is_list(Config) ->
OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -354,8 +593,6 @@ ssl3_erlang_server_openssl_client_client_cert(Config) when is_list(Config) ->
| ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- test_server:sleep(?SLEEP),
-
CaCertFile = proplists:get_value(cacertfile, ClientOpts),
CertFile = proplists:get_value(certfile, ClientOpts),
KeyFile = proplists:get_value(keyfile, ClientOpts),
@@ -402,8 +639,6 @@ ssl3_erlang_server_erlang_client_client_cert(Config) when is_list(Config) ->
| ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- test_server:sleep(?SLEEP),
-
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
{from, self()},
@@ -439,14 +674,13 @@ tls1_erlang_client_openssl_server(Config) when is_list(Config) ->
KeyFile = proplists:get_value(keyfile, ServerOpts),
Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
- " -cert " ++ CertFile
- ++ " -key " ++ KeyFile ++ " -tls1",
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ " -tls1",
test_server:format("openssl cmd: ~p~n", [Cmd]),
OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -484,8 +718,6 @@ tls1_erlang_server_openssl_client(Config) when is_list(Config) ->
[{versions, [tlsv1]} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- test_server:sleep(?SLEEP),
-
Cmd = "openssl s_client -port " ++ integer_to_list(Port) ++
" -host localhost -tls1",
@@ -529,7 +761,7 @@ tls1_erlang_client_openssl_server_client_cert(Config) when is_list(Config) ->
OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -571,8 +803,6 @@ tls1_erlang_server_openssl_client_client_cert(Config) when is_list(Config) ->
| ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- test_server:sleep(?SLEEP),
-
CaCertFile = proplists:get_value(cacertfile, ClientOpts),
CertFile = proplists:get_value(certfile, ClientOpts),
KeyFile = proplists:get_value(keyfile, ClientOpts),
@@ -617,8 +847,6 @@ tls1_erlang_server_erlang_client_client_cert(Config) when is_list(Config) ->
| ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- test_server:sleep(?SLEEP),
-
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
{from, self()},
@@ -668,14 +896,13 @@ cipher(CipherSuite, Version, Config) ->
KeyFile = proplists:get_value(keyfile, ServerOpts),
Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
- " -cert " ++ CertFile
- ++ " -key " ++ KeyFile ++ "",
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ "",
test_server:format("openssl cmd: ~p~n", [Cmd]),
OpenSslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
- test_server:sleep(?SLEEP),
+ wait_for_openssl_server(),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
{host, Hostname},
@@ -707,6 +934,52 @@ cipher(CipherSuite, Version, Config) ->
Return.
%%--------------------------------------------------------------------
+erlang_client_bad_openssl_server(doc) ->
+ [""];
+erlang_client_bad_openssl_server(suite) ->
+ [];
+erlang_client_bad_openssl_server(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ ServerOpts = ?config(server_verification_opts, Config),
+ ClientOpts = ?config(client_verification_opts, Config),
+
+ {ClientNode, _, Hostname} = ssl_test_lib:run_where(Config),
+
+ Port = ssl_test_lib:inet_port(node()),
+ CertFile = proplists:get_value(certfile, ServerOpts),
+ KeyFile = proplists:get_value(keyfile, ServerOpts),
+
+ Cmd = "openssl s_server -accept " ++ integer_to_list(Port) ++
+ " -cert " ++ CertFile ++ " -key " ++ KeyFile ++ "",
+
+ test_server:format("openssl cmd: ~p~n", [Cmd]),
+
+ OpensslPort = open_port({spawn, Cmd}, [stderr_to_stdout]),
+
+ wait_for_openssl_server(),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, server_sent_garbage, []}},
+ {options,
+ [{versions, [tlsv1]} | ClientOpts]}]),
+
+ %% Send garbage
+ port_command(OpensslPort, ?OPENSSL_GARBAGE),
+
+ test_server:sleep(?SLEEP),
+
+ Client ! server_sent_garbage,
+
+ ssl_test_lib:check_result(Client, true),
+
+ ssl_test_lib:close(Client),
+ %% Clean close down!
+ close_port(OpensslPort),
+ process_flag(trap_exit, false),
+ ok.
+%%--------------------------------------------------------------------
erlang_ssl_receive(Socket, Data) ->
test_server:format("Connection info: ~p~n",
@@ -738,8 +1011,14 @@ connection_info(Socket, Version) ->
connection_info_result(Socket) ->
ssl:connection_info(Socket).
+
+delayed_send(Socket, [ErlData, OpenSslData]) ->
+ test_server:sleep(?SLEEP),
+ ssl:send(Socket, ErlData),
+ erlang_ssl_receive(Socket, OpenSslData).
+
close_port(Port) ->
- port_command(Port, "Q\n"),
+ port_command(Port, ?OPENSSL_QUIT),
%%catch port_command(Port, "quit\n"),
close_loop(Port, 500, false).
@@ -770,3 +1049,22 @@ close_loop(Port, Time, SentClose) ->
io:format("Timeout~n",[])
end
end.
+
+
+server_sent_garbage(Socket) ->
+ receive
+ server_sent_garbage ->
+ {error, closed} == ssl:send(Socket, "data")
+ end.
+
+wait_for_openssl_server() ->
+ receive
+ {Port, {data, Debug}} when is_port(Port) ->
+ io:format("openssl ~s~n",[Debug]),
+ %% openssl has started make sure
+ %% it will be in accept. Parsing
+ %% output is too error prone. (Even
+ %% more so than sleep!)
+ test_server:sleep(?SLEEP)
+ end.
+
diff --git a/lib/ssl/vsn.mk b/lib/ssl/vsn.mk
index 7c038e5818..337ea4b380 100644
--- a/lib/ssl/vsn.mk
+++ b/lib/ssl/vsn.mk
@@ -17,9 +17,18 @@
# %CopyrightEnd%
#
-SSL_VSN = 3.10.8
+SSL_VSN = 3.11
-TICKETS = OTP-8372 OTP-8441 OTP-8459
+TICKETS = OTP-8517 \
+ OTP-7046 \
+ OTP-8557 \
+ OTP-8560 \
+ OTP-8545 \
+ OTP-8554
+
+#TICKETS_3.10.9 = OTP-8510
+
+#TICKETS_3.10.8 = OTP-8372 OTP-8441 OTP-8459
#TICKETS_3.10.7 = OTP-8260 OTP-8218 OTP-8250
#TICKETS_3.10.6 = OTP-8275