aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/doc/src/erlang.xml7
-rw-r--r--erts/doc/src/escript.xml10
-rw-r--r--erts/emulator/Makefile.in2
-rw-r--r--erts/emulator/beam/beam_emu.c14
-rwxr-xr-xerts/emulator/beam/erl_bif_info.c25
-rw-r--r--erts/emulator/beam/erl_init.c4
-rw-r--r--erts/emulator/internal_doc/CarrierMigration.md201
-rw-r--r--erts/emulator/internal_doc/CodeLoading.md186
-rw-r--r--erts/emulator/internal_doc/DelayedDealloc.md175
-rw-r--r--erts/emulator/internal_doc/PTables.md356
-rw-r--r--erts/emulator/internal_doc/PortSignals.md267
-rw-r--r--erts/emulator/internal_doc/ProcessManagementOptimizations.md172
-rw-r--r--erts/emulator/internal_doc/ThreadProgress.md308
-rw-r--r--erts/emulator/internal_doc/Tracing.md220
-rw-r--r--erts/emulator/sys/unix/sys.c19
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl2
-rwxr-xr-xerts/emulator/utils/make_version4
-rw-r--r--erts/epmd/src/epmd_cli.c4
-rw-r--r--erts/epmd/test/epmd_SUITE.erl23
-rw-r--r--erts/etc/common/erlexec.c30
-rw-r--r--erts/etc/win32/erlang.icobin1398 -> 99678 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin97872 -> 97916 bytes
-rw-r--r--erts/preloaded/src/erlang.erl1
-rw-r--r--erts/vsn.mk6
-rw-r--r--lib/compiler/src/sys_core_fold.erl35
-rw-r--r--lib/compiler/test/andor_SUITE.erl6
-rw-r--r--lib/crypto/c_src/crypto.c138
-rw-r--r--lib/crypto/doc/src/crypto.xml45
-rw-r--r--lib/crypto/src/Makefile1
-rw-r--r--lib/crypto/src/crypto.app.src1
-rw-r--r--lib/crypto/src/crypto.erl15
-rw-r--r--lib/crypto/src/crypto_ec_curves.erl1215
-rw-r--r--lib/crypto/test/crypto_SUITE.erl93
-rw-r--r--lib/debugger/src/dbg_ieval.erl34
-rw-r--r--lib/debugger/test/int_eval_SUITE_data/my_int_eval_module.erl4
-rw-r--r--lib/hipe/icode/hipe_icode.erl1
-rw-r--r--lib/odbc/c_src/odbcserver.c1
-rw-r--r--lib/public_key/asn1/Makefile5
-rw-r--r--lib/public_key/asn1/OTP-PUB-KEY.set.asn1
-rw-r--r--lib/public_key/asn1/RFC5639.asn127
-rw-r--r--lib/public_key/src/pubkey_cert_records.erl30
-rw-r--r--lib/ssh/src/ssh_cli.erl2
-rw-r--r--lib/ssl/src/tls_v1.erl28
-rw-r--r--lib/stdlib/src/orddict.erl8
-rw-r--r--lib/test_server/src/configure.in57
-rw-r--r--lib/test_server/src/ts_install.erl14
-rw-r--r--lib/tools/emacs/erlang.el5
-rwxr-xr-xotp_build2
48 files changed, 3575 insertions, 229 deletions
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 711473afd2..124302a2cb 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -6008,6 +6008,13 @@ ok
<seealso marker="#system_info_multi_scheduling">erlang:system_info(multi_scheduling)</seealso>, and
<seealso marker="#system_info_schedulers">erlang:system_info(schedulers)</seealso>.</p>
</item>
+ <tag><marker id="system_info_otp_correction_package"><c>otp_correction_package</c></marker></tag>
+ <item>
+ <p>Returns a string containing the OTP correction package version
+ number that currenly executing VM is part of. Note that other
+ OTP applications in the system may be part of other OTP correction
+ packages.</p>
+ </item>
<tag><marker id="system_info_otp_release"><c>otp_release</c></marker></tag>
<item>
<p>Returns a string containing the OTP release number.</p>
diff --git a/erts/doc/src/escript.xml b/erts/doc/src/escript.xml
index 180447cac4..d2b09d4515 100644
--- a/erts/doc/src/escript.xml
+++ b/erts/doc/src/escript.xml
@@ -44,6 +44,7 @@
<p><c>escript</c> runs a script written in Erlang.</p>
<p>Here follows an example.</p>
<pre>
+$ <input>chmod u+x factorial</input>
$ <input>cat factorial</input>
#!/usr/bin/env escript
%% -*- erlang -*-
@@ -66,12 +67,13 @@ usage() ->
fac(0) -> 1;
fac(N) -> N * fac(N-1).
-$ <input>factorial 5</input>
+$ <input>./factorial 5</input>
factorial 5 = 120
-$ <input>factorial</input>
+$ <input>./factorial</input>
usage: factorial integer
-$ <input>factorial five</input>
-usage: factorial integer </pre>
+$ <input>./factorial five</input>
+usage: factorial integer
+ </pre>
<p>The header of the Erlang script in the example differs from
a normal Erlang module. The first line is intended to be the
interpreter line, which invokes <c>escript</c>. However if you
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 5638683f88..b270099566 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -575,7 +575,7 @@ GENERATE += $(TTF_DIR)/erl_alloc_types.h
# version include file
$(TARGET)/erl_version.h: ../vsn.mk
- $(gen_verbose)LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(VSN)$(SERIALNO) $(TARGET)
+ $(gen_verbose)LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(SYSTEM_CP_VSN) $(VSN)$(SERIALNO) $(TARGET)
GENERATE += $(TARGET)/erl_version.h
# driver table
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 78ab6fa30f..592cfe273f 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -4326,7 +4326,19 @@ void process_main(void)
flags = Arg(2);
BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
if (size >= SMALL_BITS) {
- Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ Uint wordsneeded;
+ /* check bits size before potential gc.
+ * We do not want a gc and then realize we don't need
+ * the allocated space (i.e. if the op fails)
+ *
+ * remember to reacquire the matchbuffer after gc.
+ */
+
+ mb = ms_matchbuffer(tmp_arg1);
+ if (mb->size - mb->offset < size) {
+ ClauseFail();
+ }
+ wordsneeded = 1+WSIZE(NBYTES((Uint) size));
TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
}
mb = ms_matchbuffer(tmp_arg1);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 414ae2f046..e0b654cb22 100755
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -64,8 +64,10 @@ static Export *gather_gc_info_res_trap;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+static char otp_correction_package[] = ERLANG_OTP_CORRECTION_PACKAGE;
/* Keep erts_system_version as a global variable for easy access from a core */
static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
+ "%s"
" [erts-" ERLANG_VERSION "]"
#if !HEAP_ON_C_STACK && !HALFWORD_HEAP
" [no-c-stack-objects]"
@@ -304,11 +306,28 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail)
int
erts_print_system_version(int to, void *arg, Process *c_p)
{
+ int i, rc = -1;
+ char *rc_str = "";
+ char rc_buf[100];
+ char *ocp = otp_correction_package;
#ifdef ERTS_SMP
Uint total, online, active;
(void) erts_schedulers_state(&total, &online, &active, 0);
#endif
- return erts_print(to, arg, erts_system_version
+ for (i = 0; i < sizeof(otp_correction_package)-4; i++) {
+ if (ocp[i] == '-' && ocp[i+1] == 'r' && ocp[i+2] == 'c')
+ rc = atoi(&ocp[i+3]);
+ }
+ if (rc >= 0) {
+ if (rc == 0)
+ rc_str = " [DEVELOPMENT]";
+ else {
+ erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
+ rc_str = rc_buf;
+ }
+ }
+ return erts_print(to, arg, erts_system_version,
+ rc_str
#ifdef ERTS_SMP
, total, online
#endif
@@ -2417,6 +2436,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
DECL_AM(unknown);
BIF_RET(AM_unknown);
}
+ } else if (ERTS_IS_ATOM_STR("otp_correction_package", BIF_ARG_1)) {
+ int n = sizeof(ERLANG_OTP_CORRECTION_PACKAGE)-1;
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_CORRECTION_PACKAGE, n, NIL));
} else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
int n = sizeof(ERLANG_OTP_RELEASE)-1;
hp = HAlloc(BIF_P, 2*n);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 8c4fffa75b..1af80dd04b 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -553,8 +553,8 @@ void erts_usage(void)
erts_fprintf(stderr, " numbers is %d\n",
ERTS_MAX_NO_OF_SCHEDULERS);
erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
- erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
- erts_fprintf(stderr, " processors available, respectively\n");
+ erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
+ erts_fprintf(stderr, " processors available, respectively\n");
erts_fprintf(stderr, "-t size set the maximum number of atoms the "
"emulator can handle\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md
new file mode 100644
index 0000000000..b93c11c6ec
--- /dev/null
+++ b/erts/emulator/internal_doc/CarrierMigration.md
@@ -0,0 +1,201 @@
+Carrier Migration
+=================
+
+The ERTS memory allocators manage memory blocks in two types of raw
+memory chunks. We call these chunks of raw memory
+*carriers*. Singleblock carriers which only contain one large block,
+and multiblock carriers which contain multiple blocks. A carrier is
+typically created using `mmap()` on unix systems. However, how a
+carrier is created is of minor importance. An allocator instance
+typically manages a mixture of single- and multiblock carriers.
+
+Problem
+-------
+
+When a carrier is empty, i.e. contains only one large free block, it
+is deallocated. Since multiblock carriers can contain both allocated
+blocks and free blocks at the same time, an allocator instance might
+be stuck with a large amount of poorly utilized carriers if the memory
+load decrease. After a peak in memory usage it is expected that not
+all memory can be returned since the blocks still allocated is likely
+to be dispersed over multiple carriers. Such poorly utilized carriers
+can usually be reused if the memory load increase again. However,
+since each scheduler thread manages its own set of allocator
+instances, and memory load is not necessarily connected to CPU load we
+might get into a situation where there are lots of poorly utilized
+multiblock carriers on some allocator instances while we need to
+allocate new multiblock carriers on other allocator instances. In
+scenarios like this, the demand for multiblock carriers in the system
+might increase at the same time as the actual memory demand in the
+system has decreased which is both unwanted and quite unexpected for
+the end user.
+
+Solution
+--------
+
+In order to prevent scenarios like this we've implemented support for
+migration of multiblock carriers between allocator instances of the
+same type.
+
+### Management of Free Blocks ###
+
+In order to be able to remove a carrier from one allocator instance
+and add it to another we need to be able to move references to the
+free blocks of the carrier between the allocator instances. The
+allocator instance specific data structure referring to the free
+blocks it manages often refers to the same carrier from multiple
+places. For example, when the address order bestfit strategy is used
+this data structure is a binary search tree spanning all carriers that
+the allocator instance manages. Free blocks in one specific carrier
+can be referred to from potentially every other carrier that is
+managed, and the amount of such references can be huge. That is, the
+work of removing the free blocks of such a carrier from the search
+tree will be huge. One way of solving this could be to not migrate
+carriers that contain lots of free blocks, but this would prevent us
+from migrating carriers that potentially needs to be migrated in order
+to solve the problem we set out to solve.
+
+By using one data structure of free blocks in each carrier and an
+allocator instance wide data structure of carriers managed by the
+allocator instance, the work needed in order to remove and add
+carriers can be kept to a minimum. When migration of carriers is
+enabled on a specific allocator type, we require that an allocation
+strategy with such an implementation is used. Currently we've
+implemented this for three different allocation strategies. All of
+these strategies use a search tree of carriers sorted so that we can
+find the carrier with the lowest address that can satisfy the
+request. Internally in carriers we use yet another search tree that
+either implement address order first fit, address order best fit,
+or best fit. The abbreviations used for these different allocation
+strategies are `aoff`, and `aoffcaobf`, `aoffcbf`.
+
+### Carrier Pool ###
+
+In order to migrate carriers between allocator instances we move them
+through a pool of carriers. In order for a carrier migration to
+complete, one scheduler needs to move the carrier into the pool, and
+another scheduler needs to take the carrier out of the pool.
+
+The pool is implemented as a lock free, circular, double linked,
+list. The list contains a sentinel which is used as the starting point
+when inserting to, or fetching from the pool. Carriers in the pool are
+elements in this list.
+
+The list can be modified by all scheduler threads
+simultaneously. During modifications the double linked list is allowed
+to get a bit "out of shape". For example, following the `next` pointer
+to the next element and then following the `prev` pointer does not
+always take you back to were you started. The following is however
+always true:
+
+* Repeatedly following `next` pointers will eventually take you to the
+ sentinel.
+* Repeatedly following `prev` pointers will eventually take you to the
+ sentinel.
+* Following a `next` or a `prev` pointer will take you to either an
+ element in the pool, or an element that used to be in the pool.
+
+When inserting a new element we search for a place to insert the
+element by only following `next` pointers, and we always begin by
+skipping the first element encountered. When trying to fetch an
+element we do the same thing, but instead only follow `prev` pointers.
+
+By going different directions when inserting and fetching, we avoid
+contention between threads inserting and threads fetching as much as
+possible. By skipping one element when we begin searching, we preserve
+the sentinel unmodified as much as possible. This is beneficial since
+all search operations need to read the content of the sentinel. If we
+were to modify the sentinel, the cache line containing the sentinel
+would unnecessarily be bounced between processors.
+
+The `prev`, and `next` fields in the elements of the list contains the
+value of the pointer, a modification marker, and a deleted
+marker. Memory operations on these fields are done using atomic memory
+operations. When a thread has set the modification marker in a field,
+no-one except the thread that set the marker is allowed to modify the
+field. If multiple modification markers needs to be set, we always
+begin with `next` fields followed by `prev` fields in the order
+following the actual pointers. This guarantees that no deadlocks will
+occur.
+
+When a carrier is being removed from a pool, we mark it with a thread
+progress value that needs to be reached before we are allowed to
+modify the `next`, and `prev` fields. That is, until we reach this
+thread progress we are not allowed to insert the carrier into the pool
+again, and we are not allowed to deallocate the carrier. This ensures
+that threads inspecting the pool always will be able to traverse the
+pool and reach valid elements. Once we have reached the thread
+progress value that the carrier was tagged with, we know that no
+threads may have references to it via the pool.
+
+### Migration ###
+
+There exist one pool for each allocator type enabling migration of
+carriers between scheduler specific allocator instances of the same
+allocator type.
+
+Each allocator instance keeps track of the current utilization of its
+multiblock carriers. When the utilization falls below the "abandon
+carrier utilization limit" it starts to inspect the utilization of the
+current carrier when deallocations are made. If also the utilization
+of the carrier falls below the "abandon carrier utilization limit" it
+unlinks the carrier from its data structure of available free blocks
+and inserts the carrier into the pool.
+
+Since the carrier has been unlinked from the data structure of
+available free blocks, no more allocations will be made in the
+carrier. The allocator instance putting the carrier into the pool,
+however, still has the responsibility of performing deallocations in
+it while it remains in the pool.
+
+Each carrier has a flag field containing information about allocator
+instance owning the carrier, a flag indicating if the carrier is in
+the pool or not, and a flag indicating if it is busy or not. When the
+carrier is in the pool, the owning allocator instance needs to mark it
+as busy while operating on it. If another thread inspects it in order
+to try to fetch it from the pool, it will abort the fetch if it is
+busy. When fetching the carrier from the pool, ownership will changed
+and further deallocations in the carrier will be redirected to the new
+owner using the delayed dealloc functionality.
+
+If a carrier in the pool becomes empty, it will be withdrawn from the
+pool. All carriers that become empty are also always passed to its
+originating allocator instance for deallocation using the delayed
+dealloc functionality. Since carriers this way always will be
+deallocated by the allocator instance that allocated the carrier the
+underlying functionality of allocating and deallocating carriers can
+remain simple and doesn't have to bother about multiple threads. In a
+NUMA system we will also not mix carriers originating from multiple
+NUMA nodes.
+
+When an allocator instance needs more carrier space, it always begins
+by inspecting its own carriers that are waiting for thread progress
+before they can be deallocated. If no such carrier could be found, it
+then inspects the pool. If no carrier could be fetched from the pool,
+it will allocate a new carrier. Regardless of where the allocator
+instance gets the carrier from it the just links in the carrier into
+its data structure of free blocks.
+
+### Result ###
+
+The use of this strategy of abandoning carriers with poor utilization
+and reusing these in allocator instances with an increased carrier
+demand is extremely effective and completely eliminates the problems
+that otherwise sometimes occurred when CPU load dropped while memory
+load did not.
+
+When using the `aoffcaobf` or `aoff` strategies compared to `gf` or
+`bf`, we loose some performance since we get more modifications in the
+data structure of free blocks. This performance penalty is however
+reduced using the `aoffcbf` strategy. A tradeoff between memory
+consumption and performance is however inevitable, and it is up to
+the user to decide what is most important.
+
+Further work
+------------
+
+It would be quite easy to extend this to allow migration of multiblock
+carriers between all allocator types. More or less the only obstacle
+is maintenance of the statistics information.
+
+
diff --git a/erts/emulator/internal_doc/CodeLoading.md b/erts/emulator/internal_doc/CodeLoading.md
new file mode 100644
index 0000000000..151b9cd57c
--- /dev/null
+++ b/erts/emulator/internal_doc/CodeLoading.md
@@ -0,0 +1,186 @@
+Non-Blocking Code Loading
+=========================
+
+Introduction
+------------
+
+Before OTP R16 when an Erlang code module was loaded, all other
+execution in the VM were halted while the load operation was carried
+out in single threaded mode. This might not be a big problem for
+initial loading of modules during VM boot, but it can be a severe
+problem for availability when upgrading modules or adding new code on
+a VM with running payload. This problem grows with the number of cores
+as both the time it takes to wait for all schedulers to stop increases
+as well as the potential amount of halted ongoing work.
+
+In OTP R16, modules are loaded without blocking the VM.
+Erlang processes may continue executing undisturbed in parallel during
+the entire load operation. The code loading is carried out by a normal
+Erlang process that is scheduled like all the others. The load
+operation is completed by making the loaded code visible to all
+processes in a consistent way with one single atomic
+instruction. Non-blocking code loading will improve real-time
+characteristics when modules are loaded/upgraded on a running SMP
+system.
+
+
+The Load Phases
+---------------
+
+The loading of a module is divided into two phases; a *prepare phase*
+and a *finishing phase*. The prepare phase contains reading the BEAM
+file format and all the preparations of the loaded code that can
+easily be done without interference with the running code. The
+finishing phase will make the loaded (and prepared) code accessible
+from the running code. Old module versions (replaced or deleted) will
+also be made inaccessible by the finishing phase.
+
+The prepare phase is designed to allow several "loader" processes to
+prepare separate modules in parallel while the finishing phase can
+only be done by one loader process at a time. A second loader process
+trying to enter finishing phase will be suspended until the first
+loader is done. This will only block the process, the scheduler is
+free to schedule other work while the second loader is waiting. (See
+`erts_try_seize_code_write_permission` and
+`erts_release_code_write_permission`).
+
+The ability to prepare several modules in parallel is not currently
+used as almost all code loading is serialized by the code_server
+process. The BIF interface is however prepared for this.
+
+ erlang:prepare_loading(Module, Code) -> LoaderState
+ erlang:finish_loading([LoaderState])
+
+The idea is that `prepare_loading` could be called in parallel for
+different modules and returns a "magic binary" containing the internal
+state of each prepared module. Function `finish_loading` could take a
+list of such states and do the finishing of all of them in one go.
+
+Currenlty we use the legacy BIF `erlang:load_module` which is now
+implemented in Erlang by calling the above two functions in
+sequence. Function `finish_loading` is limited to only accepts a list
+with one module state as we do not yet use the multi module loading
+feature.
+
+
+The Finishing Sequence
+----------------------
+
+During VM execution, code is accessed through a number of data
+structures. These *code access structures* are
+
+* Export table. One entry for every exported function.
+* Module table. One entry for each loaded module.
+* "beam_catches". Identifies jump destinations for catch instructions.
+* "beam_ranges". Map code address to function and line in source file.
+
+The most frequently used of these structures is the export table that
+is accessed in run time for every executed external function call to
+get the address of the callee. For performance reasons, we want to
+access all these structures without any overhead from thread
+synchronization. Earlier this was solved with an emergency break. Stop
+the entire VM to mutate these code access structures, otherwise treat
+them as read-only.
+
+The solution in R16 is instead to *replicate* the code access
+structures. We have one set of active structures read by the running
+code. When new code is loaded the active structures are copied, the
+copy is updated to include the newly loaded module and then a switch
+is made to make the updated copy the new active set. The active set is
+identified by a single global atomic variable
+`the_active_code_index`. The switch can thus be made by a single
+atomic write operation. The running code have to read this atomic
+variable when using the active access structures, which means one
+atomic read operation per external function call for example. The
+performance penalty from this extra atomic read is however very small
+as it can be done without any memory barriers at all (as described
+below). With this solution we also preserve the transactional feature
+of a load operation. Running code will never see the intermediate
+result of a half loaded module.
+
+The finishing phase is carried out in the following sequence by the
+BIF `erlang:finish_loading`:
+
+1. Seize exclusive code write permission (suspend process if needed
+ until we get it).
+
+2. Make a full copy of all the active access structures. This copy is
+ called the staging area and is identified by the global atomic
+ variable `the_staging_code_index`.
+
+3. Update all access structures in the staging area to include the
+ newly prepared module.
+
+4. Schedule a thread progress event. That is a time in the future when
+ all schedulers have yielded and executed a full memory barrier.
+
+5. Suspend the loader process.
+
+6. After thread progress, commit the staging area by assigning
+ `the_staging_code_index` to `the_active_code_index`.
+
+7. Release the code write permission allowing other processes to stage
+ new code.
+
+8. Resume the loader process allowing it to return from
+ `erlang:finish_loading`.
+
+
+### Thread Progress
+
+The waiting for thread progress in 4-6 is necessary in order for
+processes to read `the_active_code_index` atomic during normal
+execution without any expensive memory barriers. When we write a new
+value into `the_active_code_index` in step 6, we know that all
+schedulers will see an updated and consistent view of all the new
+active access structures once they become reachable through
+`the_active_code_index`.
+
+The total lack of memory barrier when reading `the_active_code_index`
+has one interesting consequence however. Different processes may see
+the new code at different point in time depending on when different
+cores happen to refresh their hardware caches. This may sound unsafe
+but it actually does not matter. The only property we must guarantee
+is that the ability to see the new code must spread with process
+communication. After receiving a message that was triggered by new
+code, the receiver must be guaranteed to also see the new code. This
+will be guaranteed as all types of process communication involves
+memory barriers in order for the receiver to be sure to read what the
+sender has written. This implicit memory barrier will then also make
+sure that the receiver reads the new value of `the_active_code_index`
+and thereby also sees the new code. This is true for all kinds of
+inter process communication (TCP, ETS, process name registering,
+tracing, drivers, NIFs, etc) not just Erlang messages.
+
+### Code Index Reuse
+
+To optimize the copy operation in step 2, code access structures are
+reused. In current solution we have three sets of code access
+structures, identified by a code index of 0, 1 and 2. These indexes
+are used in a round robin fashion. Instead of having to initialize a
+completely new copy of all access structures for every load operation
+we just have to update with the changes that have happened since the
+last two code load operations. We could get by with only two code
+indexes (0 and 1), but that would require yet another round of waiting
+for thread progress before step 2 in the `finish_loading` sequence. We
+cannot start reusing a code index as staging area until we know that
+no lingering scheduler thread is still using it as the active code
+index. With three generations of code indexes, the waiting for thread
+progress in step 4-6 will give this guarantee for us. Thread progress
+will wait for all running schedulers to reschedule at least one
+time. No ongoing execution reading code access structures reached from
+an old value of `the_active_code_index` can exist after a second round
+of thread progress.
+
+The design choice between two or three generations of code access
+structures is a trade-off between memory consumption and code loading
+latency.
+
+### A Consistent Code View
+
+Some native BIFs may need to get a consistent snapshot view of the
+active code. To do this it is important to only read
+`the_active_code_index` one time and then use that index value for all
+code accessing during the BIF. If a load operation is executed in
+parallel, reading `the_active_code_index` a second time might result
+in a different value, and thereby a different view of the code.
diff --git a/erts/emulator/internal_doc/DelayedDealloc.md b/erts/emulator/internal_doc/DelayedDealloc.md
new file mode 100644
index 0000000000..b7d87b839f
--- /dev/null
+++ b/erts/emulator/internal_doc/DelayedDealloc.md
@@ -0,0 +1,175 @@
+Delayed Dealloc
+===============
+
+Problem
+-------
+
+An easy way to handle memory allocation in a multi-threaded
+environment is to protect the memory allocator with a global lock
+which threads performing memory allocations or deallocations have to
+have locked during the whole operation. This solution of course scales
+very poorly, due to heavy lock contention. An improved solution of
+this scheme is to use multiple thread specific instances of such an
+allocator. That is, each thread allocates in its own allocator
+instance which is protected by a lock. In the general case references
+to memory need to be passed between threads. In the case where a
+thread that needs to deallocate memory that originates from another
+threads allocator instance a lock conflict is possible. In a system as
+the Erlang VM where memory allocation/deallocation is frequent and
+references to memory also are passed around between threads this
+solution will also scale poorly due to lock contention.
+
+Functionality Used to Adress This problem
+-----------------------------------------
+
+In order to reduce contention due to locking of allocator instances we
+introduced completely lock free instances tied to each scheduler
+thread, and an extra locked instance for other threads. The scheduler
+threads in the system is expected to do the major part of the
+work. Other threads may still be needed but should not perform any
+major and/or time critical work. The limited amount of contention that
+appears on the locked allocator instance can more or less be
+disregarded.
+
+Since we still need to be able to pass references to memory between
+scheduler threads we need some way to manage this. An allocator
+instance belonging to one scheduler thread is only allowed to be
+manipulated by that scheduler thread. When other threads need to
+deallocate memory originating from a foreign allocator instance, they
+only pass the memory block to a "message box" containing deallocation
+jobs attached to the originating allocator instance. When a scheduler
+thread detects such deallocation job it performs the actual
+deallocation.
+
+The "message box" is implemented using a lock free single linked list
+through the memory blocks to deallocate. The order of the elements in
+this list is not important. Insertion of new free blocks will be made
+somewhere near the end of this list. Requirering that the new blocks
+need to be inserted at the end would cause unnecessary contention when
+large amount of memory blocks are inserted simultaneous by multiple
+threads.
+
+The data structure refering to this single linked list cover two cache
+lines. One cache line containing information about the head of the
+list, and one cache line containing information about the tail of the
+list. This in order to reduce cache line ping ponging of this data
+structure. The head of the list will only be manipulated by the thread
+owning the allocator instance, and the tail will be manipulated by
+other threads inserting deallocation jobs.
+
+### Tail ###
+
+In the tail part of the data structure we find a pointer to the last
+element of the list, or at least something that is near the end of the
+list. In the uncontended case it will point to the end of the list,
+but when simultaneous insert operations are performed it will point to
+something near the end of the list.
+
+When insterting an element one will try to write a pointer to the new
+element in the next pointer of the element pointed to by the last
+pointer. This is done using an atomic compare and swap that expects
+the next pointer to be `NULL`. If this succeds the thread performing
+this operation moves the last pointer to point to the newly inserted
+element.
+
+If the atomic compare and swap described above failed, the last
+pointer didn't point to the last element. In this case we need to
+insert the new element somewhere inbetween the element that the last
+pointer pointed to and the actual last element. If we do it this way
+the last pointer will eventually end up at the last element when
+threads stop adding new elements. When trying to insert somewhere near
+the end and failing to do so, the inserting thread sometimes moves to
+the next element and somtimes tries with the same element again. This
+in order to spread the inserted elements during heavy contention. That
+is, we try to spread the modifications of memory to different
+locations instead of letting all threads continue to try to modify the
+same location in memory.
+
+### Head ###
+
+The head contains pointers to begining of the list (`head.first`), and
+to the first block which other threads may refer to
+(`head.unref_end`). Blocks between these pointers are only refered to
+by the head part of the data structure which is only used by the
+thread owning the allocator instance. When these two pointers are not
+equal the thread owning the allocator instance deallocate block after
+block until `head.first` reach `head.unref_end`.
+
+We of course periodically need to move the `head.unref_end` closer to
+the end in order to be able to continue deallocating memory
+blocks. Since all threads inserting new elements in the linked list
+will enter the list using the last pointer we can use this
+knowledge. If we call `erts_thr_progress_later()` and wait until we
+have reached that thread progress we know that no managed threads can
+refer the elements up to the element pointed to by the last pointer at
+the time when we called `erts_thr_progress_later()`. This since, all
+managed threads must have left the code implementing this at least
+once, and they always enters into the list via the last pointer. The
+`tail.next` field contains information about next `head.unref_end`
+pointer and thread progress that needs to be reached before we can
+move `head.unref_end`.
+
+Unfortunately not only threads managed by the thread progress
+functionality may insert memory blocks. Other threads also needs to be
+taken care of. Other threads will not be as frequent users of this
+functionality as managed threads, so using a less efficient scheme for
+them is not that big of a problem. In order to handle unmanaged
+threads we use two reference counters. When an unmanaged thread enters
+this implementation it increments the reference counter currently
+used, and when it leaves this implementation it decrements the same
+reference counter. When the consumer thread calls
+`erts_thr_progress_later()` in order to determine when it is safe to
+move `head.unref_end`, it also swaps reference counters for unmanaged
+threads. The previous current represents outstanding references from
+the time up to this point. The new current represents future reference
+following this point. When the consumer thread detects that we have
+both reached the desired thread progress and when the previous current
+reference counter reach zero it is safe to move the `head.unref_end`.
+
+The reason for using two reference counters is that we need to know
+that the reference counter eventually will reach zero. If we only used
+one reference counter it would potentially be held above zero for ever
+by different unmanaged threads.
+
+### Empty List ###
+
+If no new memory blocks are inserted into the list, it should
+eventually be emptied. All pointers to the list however expect to
+always point to something. This is solved by inserting an empty
+"marker" element, which only has to purpose of being there in the
+absense of other elements. That is when the list is empty it only
+contains this "marker" element.
+
+### Contention ###
+
+When elements are continously inserted by threads not owning the
+allocator instance, the thread owning the allocator instance will be
+able to work more or less undisturbed by other threads at the head end
+of the list. At the tail end large amounts of simultaneous inserts may
+cause contention, but we reduce such contention by spreading inserts
+of new elements near the end instead of requiring all new elements to
+be inserted at the end.
+
+### Schedulers and The Locked Allocator Instance ###
+
+Also the locked allocator instance for use by non-scheduler threads
+have a message box for deallocation jobs just as all the other
+allocator instances. The reason for this is that other threads may
+allocate memory pass it to a scheduler that then needs to deallocate
+it. We do not want the scheduler to have to wait for the lock on this
+locked instance. Since also locked instances has message boxes for
+deallocation jobs, the scheduler can just insert the job and avoid the
+locking.
+
+
+### A Benchmark Result ###
+
+When running the ehb benchmark, large amount of messages are passed
+around between schedulers. All message passing will in some way or the
+other cause memory allocation and deallocation. Since messages are
+passed between different schedulers we will get contention on the
+allocator instances where messages were allocated. By the introduction
+of the delayed dealloc feature, we got a speedup of between 25-45%,
+depending on configuration of the benchmark, when running on a
+relatively new machine with an Intel i7 quad core processor with
+hyper-threading using 8 schedulers. \ No newline at end of file
diff --git a/erts/emulator/internal_doc/PTables.md b/erts/emulator/internal_doc/PTables.md
new file mode 100644
index 0000000000..6fe0e7665d
--- /dev/null
+++ b/erts/emulator/internal_doc/PTables.md
@@ -0,0 +1,356 @@
+Process and Port Tables
+=======================
+
+Problems
+--------
+
+The process table is a mapping from process identifiers to process
+structure pointers. The process structure contains miscellaneous
+information about a process, as for example pointers to its heap,
+message queue, etc. When the runtime system needs to operate on a
+process, it looks up the process structure in the process table using
+the process identifier. An example of this is when passing a message
+to a process.
+
+The process table has for a very long time just been an array of
+pointers to process structures. Since process identifiers internally
+in the runtime system are 28-bit integers it is quite easy to map a
+process identifier to index into the array. The 28-bits were divided
+into two sets. The least significant set of bits was used as index
+into the array. The most significant set of bits was only used to be
+able to distinguish between a number of identifiers with which map to
+the same index in the array. As long as process table sizes of a power
+of two was used we had 2^28 unique process identifiers.
+
+When the first SMP support was implemented, the table still was kept
+more or less the same way, but protected by two types of locks. One
+lock that protected the whole table against modifications and an array
+of locks protecting different parts of the table. The exact locking
+strategy previously used isn't interesting. What is interesting is
+that it suffered from heavy lock contention especially when lots of
+modifications was being made, but also when only performing lookups.
+
+In order to be able to detect when it is safe to deallocate a
+previously used process structure, reference counting of the structure
+was used. Also this was problematic, since simultaneous lookups needed
+to modify the reference counter which also caused contention on the
+cache line where the reference counter was located. This since all
+modifications needs to be communicated between all involved
+processors.
+
+The port table is very similar to the process table. The major
+difference, at least in concept, is that it is a mapping from port
+identifiers to port structures. It had a similar implementation, but
+with some differences. Instead of being an array of pointers it was an
+array of structures, and instead of being protected by two types of
+locks it was only protected by one global lock. This table also
+suffered from lock contention in various situations.
+
+Solution
+--------
+
+The process table was the major problem to address since processes are
+much more frequently used than ports. The first implementation only
+implemented this for processes, but since the port table is very
+similar and very similar problems occur on the port table, the process
+table implementation was later generalized so that it could also be
+used implementing the port table. For simplicity I will only talk
+about the process table in the following text, but the same will apply
+to the port table unless otherwise stated.
+
+If we disregard the locking issues, the original solution is very
+appealing. The mapping from process identifier to index into the array
+is very fast, and this property is something we would like to
+keep. The vast majority of operations on these tables are lookups so
+optimizing for lookups is what we want to do.
+
+### Lookup ###
+
+Using a set of bits in the process identifier as index into an array
+seems hard to beat. By replacing the array of pointers with an array
+of our pointer sized atomic data type, a lookup will consist of the
+following:
+
+1. Mapping the 28-bit integer to an index into the array.
+
+ More about this mapping later.
+
+2. Read the pointer using an atomic memory operation at determined
+ index in array.
+
+ On all platforms that we provide atomic memory operations, this is
+ just a `volatile` read, preventing the compiler to use values in
+ registers, forcing the a read from memory.
+
+3. Depending on use, issue appropriate memory barrier.
+
+ A common barrier used is a barrier with acquire semantics. On
+ x86/x86_64 this maps to a compiler barrier preventing the compiler
+ to reorder instructions, but on other hardware often some kind of
+ light weight hardware memory barrier is also needed.
+
+ When comparing with a locked approach, at least one heavy weight
+ memory barrier will be issued when locking the lock on most, if
+ not all, hardware architectures (including x86/x86_64), and often
+ some kind of light weight memory barrier will be issued when
+ unlocking the lock.
+
+When looking at this very simple solution with very little overhead
+you might wonder why we didn't implement it this way from the
+beginning. It all boils down to the read operation of the pointer. We
+need some way to know that it is safe to access the memory pointed
+to. One way of doing this is to place a reference counter in the
+process structure. Increment of the reference counter at lookup needs
+to be done atomically with the lookup. A lock can typically provide
+this service for us, which was the approach we previously
+used. Another approach could be to co-locate the reference counter
+with the pointer in the table. The major problem with this approach is
+the modifications of the reference counter. This since these
+modification would have to be communicated between all involved
+processor cause contention on the cache line containing the reference
+counter. The new lookup approach above is possible since we can use
+the "thread progress" functionality in order to determine when it is
+safe to deallocate the process structure. We'll get back to this when
+describing deletion in the table.
+
+Using this new lookup approach we wont modify any memory at all which
+is important. A lookup conceptually only read memory, now this is true
+in the implementation also which is important from a scalability
+perspective. The previous implementation modified the cache line
+containing the reference counter two times, and the cache line
+containing the corresponding lock two times at each lookup.
+
+### Modifications of the Table ###
+
+A lightweight lookup in the table was the most important feature, but
+we also wanted to improve modifications of the table. The process
+table is modified when a new process is spawned, i.e. a new pointer is
+inserted into the table, and when a process terminates, i.e. a pointer
+is deleted in the table.
+
+Assuming that we spawn fewer processes than the maximum amount of
+unique process identifiers in the system, one has always been able to
+determine the order of process creation just by comparing process
+identifiers. If PidX is larger than PidY, then PidX was created after
+PidY assuming both identifiers originates from the same node. However,
+since we have a quite limited amount of unique identifiers today
+(2^28), this property cannot be relied upon if we create large amount
+of processes. But never the less, this is a property the system always
+have had.
+
+If we would have had a huge amount of unique identifiers available, it
+would have tempting to drop or modify this ordering property as
+described above. The ordering property could for example be based on
+the scheduler performing the spawn operation. It would have been
+possible to reserve large ranges of identifiers exclusive for each
+scheduler thread which could be used minimizing the need for
+communication when allocating identifiers. The amount of identifiers
+we got to work with today is, however, not even close to be enough for
+such an approach.
+
+Since we have a limited amount of unique identifiers, we need to be
+careful not to waste them. If previously used identifiers are reused
+too quick, identifiers originating from terminated processes will
+refer to newly created processes, and mixups will occur. The
+previously used approach was quite good at not wasting
+identifiers. Using a modified version of the same approach also lets
+us keep the ordering property that we have always had.
+
+#### Insert ####
+
+The original approach is more or less to search for next free index or
+slot in the array. The search starts from the last slot allocated. If
+we reach the end of the array we increase a "wrapped counter" and then
+continue the search. The process identifier is constructed by writing
+the index to the least significant set of bits, and the "wrapped
+counter" to the most significant set of bits. The amount of bits in
+each set of bits is decided at boot time, so that maximum index will
+just fit into the least significant set of bits.
+
+In the modified lock free version of this approach we more or less do
+it the same way, but with some important modifications trying to avoid
+unnecessary contention when multiple schedulers create processes
+simultaneously. Since multiple threads might be trying to search for
+the next free slot at the same time from the same starting point we
+want subsequent slots to be located in different cache lines. Multiple
+schedulers simultaneously writing new pointers into the table are
+therefore very likely to write into adjacent slots. If adjacent slots
+are located in the same cache line all modification of this cache line
+needs to be communicated between all involved processors which will be
+very expensive and scale very poor. By locating adjacent slots in
+different cache lines only true conflicts will trigger communication
+between involved processors, i.e., avoiding false sharing.
+
+A cache line is larger than a pointer, typically 8 or 16 times larger,
+so using one cache line for each slot only containing one pointer
+would be a waste of space. Each cache line will be able to hold a
+fixed amount of slots. The first slot of the table will be the first
+slot of the first cache line, the second slot of the table will be the
+first slot of the second cache line until we reach the end of the
+array. The next slot after that will be the second slot of the first
+cache line, etc, moving forward one cache line internal slot each time
+we wrap. This way we will be able to fit the same amount of pointers
+into an array of the same size while always keeping adjacent slots in
+different cache lines.
+
+The mapping from identifier to slot or index into the array gets a bit
+more complicated than before. Instead of a `shift` and a bitwise
+`and`, we get two `shift`s, two bitwise `and`s, and an `add` (see
+implementation of `erts_ptab_data2pix()` in `erl_ptab.h`). However, by
+storing this information optimized for lookup we only need a `shift`
+and a bitwise `and` on 32-bit platforms. On 64-bit platforms we got
+enough room for the 28-bit identifier in the least significant
+halfword, and the index in the most significant halfword, in other
+words, we just need to read the most significant halfword to get the
+index. That is, this operation is as fast, or faster than before. The
+downside is that on 32-bit platforms we need to convert this
+information into the 28-bit identifier number when printing, or when
+ordering identifiers from the same node. These operations are,
+however, extremely infrequent compared to lookups.
+
+When we insert a new element in the table we do the following:
+
+1. We begin by reserving space in the table by atomically
+ incrementing a counter of processes in the table. If our increment
+ brings the counter above the maximum size of the table, the
+ operation fail and a `system_limit` exception is raised.
+
+2. The table contains a 64-bit atomic variable of the last identifier
+ used. Only the least significant bits will be used when actually
+ creating the identifier. This identifier is where the search
+ begin.
+
+3. We increment last identifier value used. In order determine the
+ slot that corresponds to this identifier we call
+ `erts_ptab_data2pix()` that maps identifier to slot. We read the
+ content of the slot. If the slot is free we try to write a
+ reservation marker using an atomic compare and swap. If this fails
+ we repeat this step until it succeeds.
+
+4. Change the table variable of last identifier used. Since multiple
+ writes might occur at the same time this value may already have
+ been changed by to an identifier larger that the one we got. In
+ this case we can continue; otherwise, we need to change it to the
+ identifier we got.
+
+5. We now do some initializations of the process structure that
+ cannot be done before we know the process identifier, and have to
+ be done before we publish the structure in the table. This, for
+ example, includes storing the identifier in the process structure.
+
+6. Now we can publish the structure in the table by writing the the
+ pointer to the process structure in the slot previously reserved
+ in 3.
+
+Using this approach we keep the properties like identifier ordering,
+and identifier reuse while improving performance and scalability. It
+has one flaw, though. There is no guarantee that the operation will
+terminate. This can quite easily be fixed though, and will be fixed in
+the next release. We will get back to this below.
+
+#### Delete ####
+
+When a process terminates, we mark the process as terminated in the
+process structure, the counter of number of processes in the table is
+decreased, and the reference to the process structure is removed by
+writing a `NULL` pointer into the corresponding slot. The scheduler
+thread performing this then schedule a thread progress later job which
+will do the final cleanup and deallocate the process structure. The
+thread progress functionality will make sure that this job will not
+execute until it is certain that all managed threads have dropped all
+references to the process structure.
+
+### BIF Iterating Over the Table ###
+
+The `erlang:processes/1` and `erlang:port/1` BIFs iterate over the
+tables and return corresponding identifiers. These BIF should return a
+consistent snapshot of the table content during some time when the BIF
+is executing. In order to implement this we use locking in a strange
+way. We use an "inverted rwlock".
+
+When performing lookups in the table we do not need to bother about
+the locking at all, but when modifying the table we read lock the
+rwlock protecting the table which allows for multiple writers during
+normal operation. When the BIF that iterates over the table need
+access to the table it write locks the rwlock and reads content of the
+table. The BIF do not read the whole table in one go but instead read
+small chunks at time only write locking while reading. The actual
+implementation of the BIFs is out of the scope of this document.
+
+An out of the box rwlock will typically suffer from contention on the
+single cache line containing the state of the rwlock even in the case
+we are only read locking. Instead of using such an rwlock, we have our
+own implementation of reader optimized rwlocks which keeps track of
+reader threads in separate thread specific cache lines. This in order
+to avoid contention on a singe cache line. As long as we only do read
+lock operations, threads only need to read a global cache line and
+modify its own cache line, and by this minimize communication between
+involved processors. The iterating BIFs are normally very infrequently
+used, so in the normal case we will only do read lock operations on
+the table global rwlock.
+
+### Future Improvements ###
+
+The first improvement is to fix the guarantee so that insert
+operations will be guaranteed to terminate. When the operation starts
+we verify that there actually exist a free slot that we can use. The
+problem is that we might not find it since it may move when multiple
+threads modify the table at the same time as we are trying to find the
+slot. The easy fix is to abort the operation if an empty slot could
+not be found in a finite number operation, and then restart the
+operation under a write lock. This will be implemented in next
+release, but furter work should be made trying to find a better
+solution.
+
+This and also previous implementation do not work well when the table
+is nearly full. We will both get long search times for free slots, and
+we will reuse identifiers more frequently since we more frequently
+wrap during the search. These tables works best when the table is much
+larger than the amount of simultaneous existing processes. One easy
+improvement is to always have room for more processes than we allow in
+the table. This will also be implemented in the next release, but this
+should probably also be worked more on trying to find an even better
+solution.
+
+It would also be nice to get rid of the rwlock all together. The use
+of a reader optimized rwlock makes sure we do not any contention on
+the lock, but unnecessary memory barriers will be issued due to the
+lock. The main issue here is to modify iterating BIFs so that they do
+not require exclusive access to the table while reading a sequence of
+slots. In principle this should be rather easy, the code can handle
+sequences of variable sizes, so shrinking the sequence size of slots
+to one would solv the problem. This will, however, need some tweeks
+and modifications of not trival code, but is something that should be
+looked at in the future.
+
+By increasing the size of identifiers, at least on 64-bit machines
+(which isn't as easy as it first might seem) we get further room for
+improvement. Besides the obvious improvement of not reusing
+identifiers as fast as we currently do, it makes it possible to
+further avoid contention when inserting elements in the table. At
+least if we drop this ordering property, which isn't that useful
+anyway.
+
+### Some Benchmark Results ###
+
+In order to test modifications of the process table we ran a couple of
+benchmarks where lots of processes are spawned and terminated
+simultaneously, and got a speedup of between 150-200%. Running a
+similar benchmark but with ports we got a speedup of about 130%.
+
+The BIF `erlang:is_process_alive/1` is the closest you can get to a
+process table lookup only. The BIF looks up the process corresponding
+to the process identifier passed as argument, and then checks if it is
+alive. By running multiple processes looping over this BIF checking
+the same process, we get a speedup between 20000-23000%. Conceptually
+this operation only involve read operations. In the implementation
+used in R16B also only read operation are performed, while the
+previous implementation need to lock structures in order to read the
+data, suffering from both lock contention and contention due to
+modifications of cache lines used by lock internal data structures and
+the reference counter on the process being looked up.
+
+The benchmarks were run on a relatively new machine with an Intel i7
+quad core processor with hyper-threading using 8 schedulers. On a
+machine with more communication overhead and/or larger amount of
+logical processors the speedups are expected to be even larger.
diff --git a/erts/emulator/internal_doc/PortSignals.md b/erts/emulator/internal_doc/PortSignals.md
new file mode 100644
index 0000000000..b1afb7c5cb
--- /dev/null
+++ b/erts/emulator/internal_doc/PortSignals.md
@@ -0,0 +1,267 @@
+Port Signals
+============
+
+Problems
+--------
+
+Erlang ports conceptually are very similar to Erlang processes. Erlang
+processes execute Erlang code in the virtual machine, while an Erlang
+port execute native code typically used for communication with the
+outside world. For example, when an Erlang process wants to
+communicate using TCP over the network, it communicates via an Erlang
+port implementing the TCP socket interface in native code. Both Erlang
+Processes and Ports communicate using asynchronous signaling. The
+native code executed by an Erlang port is a collection of callback
+functions, called a driver. Each callback more or less implements the
+code of a signal to, or from the port.
+
+Even though processes and ports conceptually always have been very
+similar, the implementations have been very different. Originally,
+more or less all port signals were handled synchronously at the time
+they occurred. Very early in the development of the SMP support for
+the runtime system we recognized that this was a huge problem for
+signals between ports and the outside world. That is, I/O events to
+and from the outside world, or I/O signals. This was one of the first
+things that had to be rewritten in order to be able to do I/O in
+parallel at all. The solution was to implement scheduling of these
+signals. I/O signals corresponding to different ports could then be
+executed in parallel on different scheduler threads. Signals from
+processes to ports was not as big of a problem as the I/O signals, and
+the implementation of those was left as they were.
+
+Each port is protected by its own lock to protect against simultaneous
+execution in multiple threads. Previously when a process, executing on
+a scheduler thread, sent a port a signal, it locked the port lock and
+synchronously executed the code corresponding to the signal. If the
+lock was busy, the scheduler thread blocked waiting until it could
+lock the lock. If multiple processes executing simultaneously on
+different scheduler threads, sent signals to the same port, schedulers
+suffered from heavy lock contention. Such contention could also occur
+between I/O signals for the port executing on one scheduler thread,
+and a signal from a process to the port executing on another scheduler
+thread. Beside the contention issues, we also loose potential work to
+execute in parallel on different scheduler threads. This since the
+process sending the *asynchronous* signal is blocked while the code
+implementing the signal is executed synchronously.
+
+Solution
+--------
+
+In order to prevent multiple schedulers from trying to execute signals
+to/from the same port simultaneously, we need to be able to ensure
+that all signals to/from a port are executed in sequence on one
+scheduler. More or less, the only way to do this is to schedule all
+types of signals. Signals corresponding to a port can then be executed
+in sequence by one single scheduler thread. If only one thread tries
+to execute the port, no contention will appear on the port
+lock. Besides getting rid of the contention, processes sending signals
+to the port can also continue execution of their own Erlang code on
+other schedulers at the same time as the signaling code is executing
+on another scheduler.
+
+When implementing this there are a couple of important properties that
+we either need, or want to preserve:
+
+* Signal ordering guarantee. Signals from process `X` to port `Y`,
+ *must* be delivered to `Y` in the same order as sent from `X`.
+
+* Signal latency. Due to the previous synchronous implementation,
+ latency of signals sent from processes to ports have usually been
+ very low. During contention the latency has of course
+ increased. Users expect latency of these signals to be low, a
+ sudden increase in latency would not be appreciated by our users.
+
+* Compatible flow control. Ports have for a very long time had the
+ possibility to use the busy port functionality when implementing
+ flow control. One may argue that this functionality fits very bad
+ with the conceptually completely asynchronous signaling, but the
+ functionality has been there for ages and is expected to be
+ there. When a port sets itself into a busy state, `command`
+ signals should not be delivered, and senders of such signals
+ should suspend until the port sets itself in a not busy state.
+
+### Scheduling of Port Signals ###
+
+A run queue has four queues for processes of different priority and
+one queue for ports. The scheduler thread associated with the run
+queue switch evenly between execution of processes and execution of
+ports while both processes and ports exist in the queue. This is not
+completely true, but not important for this discussion. A port that is
+in a run queue also has a queue of tasks to execute. Each task
+corresponds to an in- or outgoing signal. When the port is selected
+for execution each task will be executed in sequence. The run queue
+locks not only protected the queues of ports, but also the queues of
+port tasks.
+
+Since we go from a state where I/O signals are the only port related
+signals scheduled, to a state where potentially all port related
+signals may be scheduled we may drastically increase the load on the
+run queue lock. The amount of scheduled port tasks very much depend on
+the Erlang application executing, which we do not control, and we do
+not want to get increased contention on the run queue locks. We
+therefore need another approach of protecting the port task queue.
+
+#### Task Queue ####
+
+We chose a "semi locked" approach, with one public locked task queue,
+and a private, lock free, queue like, task data structure. This "semi
+locked" approach is similar to how the message boxes of processes are
+managed. The lock is port specific and only used for protection of
+port tasks, so the run queue lock is now needed in more or less the
+same way for ports as for processes. This ensures that we wont see an
+increased lock contention on run queue locks due to this rewrite of
+the port functionality.
+
+When an executing port runs out of work to execute in the private task
+data structure, it moves the public task queue into the private task
+data structure while holding the lock. Once tasks has been moved to
+the private data structure no lock protects them. This way the port
+can continue working on tasks in the private data structure without
+having to fight for the lock.
+
+I/O signals may however be aborted. This could be solved by letting
+the port specific scheduling lock also protect the private task data
+structure, but then the port very frequently would have to fight with
+others enqueueing new tasks. In order to handle this while keeping the
+private task data structure lock free, we use a similar "non
+aggressive" approach as we use when handling processes that gets
+suspended while in the run queue. Instead of removing the aborted port
+task, we just mark it as aborted using an atomic memory
+operation. When a task is selected for execution, we first verify that
+it has not been aborted. If aborted we, just drop the task.
+
+A task that can be aborted is referred via another data structure from
+other parts of the system, so that a thread that needs to abort the
+task can reach it. In order to be sure to safely deallocate a task
+that is no longer used, we first clear this reference and then use the
+thread progress functionality in order to make sure no references can
+exist to the task. Unfortunately, also unmanaged threads might abort
+tasks. This is very infrequent, but might occur. This could be handled
+locally for each port, but would require extra information in each
+port structure which very infrequently would be used. Instead of
+implementing this in each port, we implemented general functionality
+that can be used from unmanaged threads to delay thread progress.
+
+The private "queue like" task data structure could have been an
+ordinary queue if it wasn't for the busy port functionality. When the
+port has flagged itself as busy, `command` signals are not allowed to
+be delivered and need to be blocked. Other signals sent from the same
+sender following a `command` signal that has been blocked also have to
+be blocked; otherwise, we would violate the ordering guarantee. At the
+same time, other signals that have no dependencies to blocked
+`command` signals are expected to be delivered.
+
+The above requirements makes the private task data structure a rather
+complex data structure. It has a queue of unprocessed tasks, and a
+busy queue. The busy queue contains blocked tasks corresponding to
+`command` signals, and tasks with dependencies to such tasks. The busy
+queue is accompanied by a table over blocked tasks based on sender
+with a references into last task in the busy queue from a specific
+sender. This since we need check for dependencies when new tasks are
+processed in the queue of unprocessed tasks. When a new task is
+processed that needs to be blocked it isn't enqueued at the end of the
+busy queue, but instead directly after the last task with the same
+sender. This in order to easily be able to detect when we have tasks
+that no longer have any dependencies to tasks corresponding to
+`command` signals which should be moved out of the busy queue. When
+the port executes, it switches between processing tasks from the busy
+queue, and processing directly from the unprocessed queue based on its
+busy state. When processing directly from the unprocessed queue it
+might, of course, have to move a task into the busy queue instead of
+executing it.
+
+#### Busy Port Queue ####
+
+Since it is the port itself which decides when it is time to enter a
+busy state, it needs to be executing in order to enter the busy
+state. As a result of `command` signals being scheduled, we may get
+into a situation where the port gets flooded by a huge amount of
+`command` signals before it even gets a chance to set itself into a
+busy state. This since it has not been scheduled for execution
+yet. That is, under these circumstances the busy port functionality
+loose the flow control properties it was intended to provide.
+
+In order to solve this, we introduced a new busy feature, namely "busy
+port queue". The port has a limit of `command` data that is allowed to
+be enqueued in the task queue. When this limit is reached, the port
+will automatically enter a busy port queue state. When in this state,
+senders of `command` signals will be suspended, but `command` signals
+will still be delivered to the port unless it is also in a busy port
+state. This limit is known as the high limit.
+
+There is also a low limit. When the amount of queued `command` data
+falls below this limit and the port is in a busy port queue state, the
+busy port queue state is automatically disabled. The low limit should
+typically be significantly lower than the high limit in order to
+prevent frequent oscillation around the busy port queue state.
+
+By introduction of this new busy state we still can provide the flow
+control. Old driver do not even have to be changed. The limits can,
+however, be configured and even disabled by the port. By default the
+high limit is 8 KB and the low limit is 4 KB.
+
+### Preparation of Signal Send ###
+
+Previously all operations sending signals to ports began by acquiring
+the port lock, then performed preparations for sending the signal, and
+then finaly sent the signal. The preparations typically included
+inspecting the state of the port, and preparing the data to pass along
+with the signal. The preparation of data is frequently quite time
+consuming, and did not really depend on the port. That is we would
+like to do this without having the port lock locked.
+
+In order to improve this, state information was re-organized in the
+port structer, so that we can access it using atomic memory
+operations. This together with the new port table implementation,
+enabled us to lookup the port and inspect the state before acquiring
+the port lock, which in turn made it possible to perform preparations
+of signal data before acquiring the port lock.
+
+### Preserving Low Latency ###
+
+If we disregard the contended cases, we will inevitably get a higher
+latency when scheduling signals for execution at a later time than by
+executing the signal immediately. In order to preserve the low latency
+we now first check if this is a contended case or not. If it is, we
+schedule the signal for later execution; otherwise, we execute the
+signal immediately. It is a contended case if other signals already
+are scheduled on the port, or if we fail to acquire the port
+lock. That is we will not block waiting for the lock.
+
+Doing it this way we will preserve the low latency at the expense of
+lost potential parallel execution of the signal and other code in the
+process sending the signal. This default behaviour can however be
+changed on port basis or system wide, forcing scheduling of all
+signals from processes to ports that are not part of a synchronous
+communication. That is, an unconditional request/response pair of
+asynchronous signals. In this case it is no potential for parallelism,
+and by that no point forcing scheduling of the request signal.
+
+The immediate execution of signals may also cause a scheduler that is
+about to execute scheduled tasks to block waiting for the port
+lock. This is however more or less the only scenario where a scheduler
+needs to wait for the port lock. The maximum time it has to wait is
+the time it takes to execute one signal, since we always schedule
+signals when contention occurs.
+
+### Signal Operations ###
+
+Besides implementing the functionality enabling the scheduling,
+preparation of signal data without port lock, etc, each operation
+sending signals to ports had to be quite extensively re-written. This
+in order to move all sub-operations that can be done without the lock
+to a place before we have acquired the lock, and also since signals
+now sometimes are executed immediately and sometimes scheduled for
+execution at a later time which put different requirements on the data
+to pass along with the signal.
+
+### Some Benchmark Results ###
+
+When running some simple benchmarks where contention only occur due to
+I/O signals contending with signals from one single process we got a
+speedup of 5-15%. When multiple processes send signals to one single
+port the improvements can be much larger, but the scenario with one
+process contending with I/O is the most common one.
+
+The benchmarks were run on a relatively new machine with an Intel i7
+quad core processor with hyper-threading using 8 schedulers. \ No newline at end of file
diff --git a/erts/emulator/internal_doc/ProcessManagementOptimizations.md b/erts/emulator/internal_doc/ProcessManagementOptimizations.md
new file mode 100644
index 0000000000..9e83633bef
--- /dev/null
+++ b/erts/emulator/internal_doc/ProcessManagementOptimizations.md
@@ -0,0 +1,172 @@
+Process Management Optimizations
+================================
+
+Problems
+--------
+
+Early versions of the SMP support for the runtime system completely
+relied on locking in order to protect data accesses from multiple
+threads. In some cases this isn't that problematic, but in some cases
+it really is. It complicates the code, ensuring all locks needed are
+actually held, and ensuring that all locks are acquired in such an
+order that no deadlock occur. Acquiring locks in the right order often
+also involve releasing locks held, forcing threads to reread data
+already read. A good recipe for creation of bugs. Trying to use more
+fine-grained locking in order to increase possible parallelism in the
+system makes the complexity situation even worse. Having to acquire a
+bunch of locks when doing operations also often cause heavy lock
+contention which cause poor scalability.
+
+Management of processes internally in the runtime system suffered from
+these problems. When changing state on a process, for example from
+`waiting` to `runnable`, a lock on the process needed to be
+locked. When inserting a process into a run queue also a lock
+protecting the run queue had to be locked. When migrating a process
+from one run queue to another run queue, locks on both run queues and
+on the process had to be locked.
+
+This last example is a quite common case in during normal
+operation. For example, when a scheduler thread runs out of work it
+tries to steal work from another scheduler threads run queue. When
+searching for a victim to steal from there was a lot of juggling of
+run queue locks involved, and during the actual theft finalized by
+having to lock both run queues and the process. When one scheduler
+runs out of work, often others also do, causing lots of lock
+contention.
+
+Solution
+--------
+
+### Process ###
+
+In order to avoid these situations we wanted to be able to do most of
+the fundamental operations on a process without having to acquire a
+lock on the process. Some examples of such fundamental operations are,
+moving a process between run queues, detecting if we need to insert it
+into a run queue or not, detecting if it is alive or not.
+
+All of this information in the process structure that was needed by
+these operations was protected by the process `status` lock, but the
+information was spread across a number of fields. The fields used was
+typically state fields that could contain a small number of different
+states. By reordering this information a bit we could *easily* fit
+this information into a 32-bit wide field of bit flags (only 12-flags
+were needed). By moving this information we could remove five 32-bit
+wide fields and one pointer field from the process structure! The move
+also enabled us to easily read and change the state using atomic
+memory operations.
+
+### Run Queue ###
+
+As with processes we wanted to be able to do the most fundamental
+operations without having to acquire a lock on it. The most important
+being able to determine if we should enqueue a process in a specific
+run queue or not. This involves being able to read actual load, and
+load balancing information.
+
+The load balancing functionality is triggered at repeated fixed
+intervals. The load balancing more or less strives to even out run
+queue lengths over the system. When balancing is triggered,
+information about every run queue is gathered, migrations paths and
+run queue length limits are set up. Migration paths and limits are
+fixed until the next balancing has been done. The most important
+information about each run queue is the maximum run queue length since
+last balancing. All of this information were previously stored in the
+run queues themselves.
+
+When a process has become runnable, for example due to reception of a
+message, we need to determine which run queue to enqueue it
+in. Previously this at least involved locking the run queue that the
+process currently was assigned to while holding the status lock on the
+process. Depending on load we sometimes also had to acquire a lock on
+another run queue in order to be able to determine if it should be
+migrated to that run queue or not.
+
+In order to be able to decide which run queue to use without having to
+lock any run queues, we moved all fixed balancing information out of
+the run queues into a global memory block. That is, migration paths
+and run queue limits. Information that need to be frequently updated,
+like for example maximum run queue length, were kept in the run queue,
+but instead of operating on this information under locks we now use
+atomic memory operations when accessing this information. This made it
+possible to first determine which run queue to use, without locking
+any run queues, and when decided, lock the chosen run queue and insert
+the process.
+
+#### Fixed Balancing Information ####
+
+When determining which run queue to choose we need to read the fixed
+balancing information that we moved out of the run queues. This
+information is global, read only between load balancing operations,
+but will be changed during a load balancing. We do not want to
+introduce a global lock that needs to be acquired when accessing this
+information. A reader optimized rwlock could avoid some of the
+overhead since the data is most frequently read, but it would
+unavoidably cause disruption during load balancing, since this
+information is very frequently read. The likelihood of a large
+disruption due to this also increase as number of schedulers grows.
+
+Instead of using a global lock protecting modifications of this
+information, we write a completely new version of it at each load
+balancing. The new version is written in another memory block than the
+previous one, and published by issuing a write memory barrier and then
+storing a pointer to the new memory block in a global variable using
+an atomic write operation.
+
+When schedulers need to read this information, they read the pointer
+to currently used information using an atomic read operation, and then
+issue a data dependency read barrier, which on most architectures is a
+no-op. That is, it is very little overhead getting access to this
+information.
+
+Instead of allocating and deallocating memory blocks for the different
+versions of the balancing information we keep old memory blocks and
+reuse them when it is safe to do so. In order to be able to determine
+when it is safe to reuse a block we use the thread progress
+functionality, ensuring that no threads have any references to the
+memory block when we reuse it.
+
+#### Be Less Aggressive ####
+
+We implemented a test version using lock free run queues. This
+implementation did however not perform as good as the version using
+one lock per run queue. The reason for this was not investigated
+enough to say why this was. Since the locked version performed better
+we kept it, at least for now. The lock free version, however, forced
+us to use other solutions, some of them we kept.
+
+Previously when a process that was in a run queue got suspended, we
+removed it from the queue straight away. This involved locking the
+process, locking the run queue, and then unlinking it from the double
+linked list implementing the queue. Removing a process from a lock
+free queue gets really complicated. Instead, of removing it from the
+queue, we just leave it in the queue and mark it as suspended. When
+later selected for execution we check if the process is suspended, if
+so just dropped it. During its time in the queue, it might also get
+resumed again, if so execute it when it get selected for execution.
+
+By keeping this part when reverting back to a locked implementation,
+we could remove a pointer field in each process structure, and avoid
+unnecessary operations on the process and the queue which might cause
+contention.
+
+### Combined Modifications ###
+
+By combining the modifications of the process state management and the
+run queue management, we can do large parts of the work involved when
+managing processes with regards to scheduling and migration without
+having any locks locked at all. In these situations we previously had
+to have multiple locks locked. This of course caused a lot of rewrites
+across large parts of the runtime system, but the rewrite both
+simplified code and eliminated locking at a number of places. The
+major benefit is, of course, reduced contention.
+
+### A Benchmark Result ###
+
+When running the chameneosredux benchmark, schedulers frequently run
+out of work trying to steal work from each other. That is, either
+succeeding in migrating, or trying to migrate processes which is a
+scenario which we wanted to optimize. By the introduction of these
+improvements, we got a speedup of 25-35% when running this benchmark
+on a relatively new machine with an Intel i7 quad core processor with
+hyper-threading using 8 schedulers. \ No newline at end of file
diff --git a/erts/emulator/internal_doc/ThreadProgress.md b/erts/emulator/internal_doc/ThreadProgress.md
new file mode 100644
index 0000000000..6118bcf0f6
--- /dev/null
+++ b/erts/emulator/internal_doc/ThreadProgress.md
@@ -0,0 +1,308 @@
+Thread Progress
+===============
+
+Problems
+--------
+
+### Knowing When Threads Have Completed Accesses to a Data Structure ###
+
+When multiple threads access the same data structure you often need to
+know when all threads have completed their accesses. For example, in
+order to know when it is safe to deallocate the data structure. One
+simple way to accomplish this is to reference count all accesses to
+the data structure. The problem with this approach is that the cache
+line where the reference counter is located needs to be communicated
+between all involved processors. Such communication can become
+extremely expensive and will scale poorly if the reference counter is
+frequently accessed. That is, we want to use some other approach of
+keeping track of threads than reference counting.
+
+### Knowing That Modifications of Memory is Consistently Observed ###
+
+Different hardware architectures have different memory models. Some
+architectures allows very aggressive reordering of memory accesses
+while other architectures only reorder a few specific cases. Common to
+all modern hardware is, however, that some type of reordering will
+occur. When using locks to protect all memory accesses made from
+multiple threads such reorderings will not be visible. The locking
+primitives will ensure that the memory accesses will be ordered. When
+using lock free algorithms one do however have to take this reordering
+made by the hardware into account.
+
+Hardware memory barriers or memory fences are instructions that can be
+used to enforce order between memory accesses. Different hardware
+architectures provide different memory barriers. Lock free algorithms
+need to use memory barriers in order to ensure that memory accesses
+are not reordered in such ways that the algorithm breaks down. Memory
+barriers are also expensive instructions, so you typically want to
+minimize the use of these instructions.
+
+Functionality Used to Address These Problems
+-------------------------------------------
+
+The "thread progress" functionality in the Erlang VM is used to
+address these problems. The name "thread progress" was chosen since we
+want to use it to determine when all threads in a set of threads have
+made such progress so that two specific events have taken place for
+all them.
+
+The set of threads that we are interested in we call managed
+threads. The managed threads are the only threads that we get any
+information about. These threads *have* to frequently report
+progress. Not all threads in the system are able to frequently report
+progress. Such threads cannot be allowed in the set of managed threads
+and are called unmanaged threads. An example of unmanaged threads are
+threads in the async thread pool. Async threads can be blocked for
+very long times and by this be prevented from frequently reporting
+progress. Currently only scheduler threads and a couple of other
+threads are managed threads.
+
+### Thread Progress Events ###
+
+Any thread in the system may use the thread progress functionality in
+order to determine when the following events have occured at least
+once in all managed threads:
+
+1. The thread has returned from other code to a known state in the
+ thread progress functionality, which is independent of any other
+ code.
+2. The thread has executed a full memory barrier.
+
+These events, of course, need to occur ordered to other memory
+operations. The operation of determining this begins by initiating the
+thread progress operation. The thread that initiated the thread
+progress operation after this poll for the completion of the
+operation. Both of these events must occur at least once *after* the
+thread progress operation has been initiated, and at least once
+*before* the operation has completed in each managed thread. This is
+ordered using communication via memory which makes it possible to draw
+conclusion about the memory state after the thread progress operation
+has completed. Lets call the progress made from initiation to
+comletion for "thread progress".
+
+Assuming that the thread progress functionality is efficient, a lot of
+algorithms can both be simplified and made more efficient than using
+the first approach that comes to mind. A couple of examples follows.
+
+By being able to determine when the first event above has occurred we
+can easily know when all managed threads have completed accesses to a
+data structure. This can be determined the following way. We have an
+implementation of some functionality `F` using a data structure
+`D`. The reference to `D` is always looked up before `D` is being
+accessed, and the references to `D` is always dropped before we leave
+the code implementing `F`. If we remove the possibility to look up `D`
+and then wait until the first event has occurred in all managed
+threads, no managed threads can have any references to the data
+structure `D`. This could for example have been achieved by using
+reference counting, but the cache line containing the reference
+counter would in this case be ping ponged between all processors
+accessing `D` at every access.
+
+By being able to determine when the second event has occurred it is
+quite easy to do complex modifications of memory that needs to be seen
+consistently by other threads without having to resort to locking. By
+doing the modifications, then issuing a full memory barrier, then wait
+until the second event has occurred in all managed threads, and then
+publish the modifications, we know that all managed threads reading
+this memory will get a consistent view of the modifications. Managed
+threads reading this will not have to issue any extra memory barriers
+at all.
+
+Implementation of the Thread Progress Functionality
+---------------------------------------------------
+
+### Requirement on the Implementation ###
+
+In order to be able to determine when all managed threads have reached
+the states that we are interested in we need to communicate between
+all involved threads. We of course want to minimize this
+communication.
+
+We also want threads to be able to determine when thread progress has
+been made relatively fast. That is we need to have some balance
+between comunication overhead and time to complete the operation.
+
+### API ###
+
+I will only present the most important functions in the API here.
+
+* `ErtsThrPrgrVal erts_thr_progress_later(void)` - Initiation of the
+ operation. The thread progress value returned can be used testing
+ for completion of the operation.
+* `int erts_thr_progress_has_reached(ErtsThrPrgrVal val)` - Returns
+ a non zero value when we have reached the thread progress value
+ passed as argument. That is, when a non zero value is returned the
+ operation has completed.
+
+When a thread calls `my_val = erts_thr_progress_later()` and waits for
+`erts_thr_progress_has_reached(my_val)` to return a non zero value it
+knows that thread progress has been made.
+
+While waiting for `erts_thr_progress_has_reached()` to return a non
+zero value we typically do not want to block waiting, but instead want
+to continue working with other stuff. If we run out of other stuff to
+work on we typically do want to block waiting until we have reached
+the thread progress value that we are waiting for. In order to be able
+to do this we provide functionality for waking up a thread when a
+certain thread progress value has been reached:
+
+* `void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+ ErtsThrPrgrVal val)` - Request wake up. The calling thread will be
+ woken when thread progress has reached val.
+
+Managed threads frequently need to update their thread progress by
+calling the following functions:
+
+* `int erts_thr_progress_update(ErtsSchedulerData *esdp)` - Update
+ thread progress. If a non zero value is returned
+ `erts_thr_progress_leader_update()` has to be called without any
+ locks held.
+* `int erts_thr_progress_leader_update(ErtsSchedulerData *esdp)` -
+ Leader update thread progress.
+
+Unmanaged threads can delay thread progress beeing made:
+
+* `ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay(void)` -
+ Delay thread progress.
+* `void erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle
+ handle)` - Let thread progress continue.
+
+Scheduler threads can schedule an operation to be executed by the
+scheduler itself when thread progress has been made:
+
+* `void erts_schedule_thr_prgr_later_op(void (*funcp)(void *), void
+ *argp, ErtsThrPrgrLaterOp *memp)` - Schedule a call to `funcp`. The
+ call `(*funcp)(argp)` will be executed when thread progress has been
+ made since the call to `erts_schedule_thr_prgr_later_op()` was
+ made.
+
+### Implementation ###
+
+In order to determine when the events has happened we use a global
+counter that is incremented when all managed threads have called
+`erts_thr_progress_update()` (or `erts_thr_progress_leader_update()`).
+This could naively be implemented using a "thread confirmed" counter.
+This would however cause an explosion of communication where all
+involved processors would need to communicate with each other at each
+update.
+
+Instead of confirming at a global location each thread confirms that
+it accepts in increment of the global counter in its own cache
+line. These confirmation cache lines are located in sequence in an
+array, and each confirmation cache line will only be written by one
+and only one thread. One of the managed threads always have the leader
+responsibility. This responsibility may jump between threads, but as
+long as there are some activity in the system always one of them will
+have the leader responsibility. The thread with the leader
+responsibility will call `erts_thr_progress_leader_update()` which
+will check that all other threads have confirmed an increment of the
+global counter before doing the increment of the global counter. The
+leader thread is the only thread reading the confirmation cache
+lines.
+
+Doing it this way we will get a communication pattern of information
+going from the leader thread out to all other managed threads and then
+back from the other threads to the leader thread. This since only the
+leader thread will write to the global counter and all other threads
+will only read it, and since each confirmation cache lines will only
+be written by one specific thread and only read by the leader
+thread. When each managed thread is distributed over different
+processors, the communication between processors will be a reflection
+of this communication pattern between threads.
+
+The value returned from `erts_thr_progress_later()` equals the, by
+this thread, latest confirmed value plus two. The global value may be
+latest confirmed value or latest confirmed value minus one. In order
+to be certain that all other managed threads actually will call
+`erts_thr_progress_update()` at least once before we reach the value
+returned from `erts_thr_progress_later()`, the global counter plus one
+is not enough. This since all other threads may already have confirmed
+current global value plus one at the time when we call
+`erts_thr_progress_later()`. They are however guaranteed not to have
+confirmed global value plus two at this time.
+
+The above described implementation more or less minimizes the
+comunication needed before we can increment the global counter. The
+amount of communication in the system due to the thread progress
+functionality however also depend on the frequency with which managed
+threads call `erts_thr_progress_update()`. Today each scheduler thread
+calls `erts_thr_progress_update()` more or less each time an Erlang
+process is scheduled out. One way of further reducing communication
+due to the thread progress functionality is to only call
+`erts_thr_progress_update()` every second, or third time an Erlang
+process is scheduled out, or even less frequently than that. However,
+by doing updates of thread progress less frequently all operations
+depending on the thread progress functionality will also take a longer
+time.
+
+#### Delay of Thread Progress by Unmanaged Threads ####
+
+In order to implement delay of thread progress from unmanaged threads
+we use two reference counters. One being `current` and one being
+`waiting`. When an unmanaged thread wants to delay thread progress it
+increments `current` and gets a handle back to the reference counter
+it incremented. When it later wants to enable continuation of thread
+progress it uses the handle to decrement the reference counter it
+previously incremented.
+
+When the leader threads is about to increment the global thread
+progress counter it verifies that the `waiting` counter is zero before
+doing so. If not zero, the leader isn't allowed to increment the
+global counter, and needs to wait before it can do this. When it is
+zero, it swaps the `waiting` and `current` counters before increasing
+the global counter. From now on the new `waiting` counter will
+decrease, so that it eventualy will reach zero, making it possible to
+increment the global counter the next time. If we only used one
+reference counter it would potentially be held above zero for ever by
+different unmanaged threads.
+
+When an unmanaged thread increment the `current` counter it will not
+prevent the next increment of the global counter, but instead the
+increment after that. This is sufficient since the global counter
+needs to be incremented two times before thread progress has been
+made. It is also desirable not to prevent the first increment, since
+the likelyhood increases that the delay is withdrawn before any
+increment of the global counter is delayed. That is, the operation
+will cause as little disruption as possible.
+
+However, this feature of delaying thread progress from unmanaged
+threads should preferably be used as little as possible, since heavy
+use of it will cause contention on the reference counter cache
+lines. The functionality is however very useful in code which normally
+only executes in managed threads, but which may under some infrequent
+circumstances be executed in other threads.
+
+#### Overhead ####
+
+The overhead caused by the thread progress functionality is more or
+less fixed using the same amount of schedulers regardless of the
+number of uses of the functionality. Already today quite a lot of
+functionality use it, and we plan to use it even more. When rewriting
+old implementations of ERTS internal functionality to use the thread
+progress functionality, this implies removing communication in the old
+implementation. Otherwise it is simply no point rewriting the old
+implementation to use the thread progress functionality. Since the
+thread progress overhead is more or less fixed, the rewrite will cause
+a reduction of the total communication in the system.
+
+##### An Example #####
+
+The main structure of an ETS table was originally managed using
+reference counting. Already a long time ago we replaced this strategy
+since the reference counter caused contention on each access of the
+table. The solution used was to schedule "confirm deletion" jobs on
+each scheduler in order to know when it was safe to deallocate the
+table structure of a removed table. These confirm deletion jobs needed
+to be allocated. That is, we had to allocate and deallocate as many
+blocks as schedulers in order to deallocate one block. This of course
+was a quite an expensive operation, but we only needed to do this once
+when removing a table. It was more important to get rid of the
+contention on the reference counter which was present on every
+operation on the table.
+
+When the thread progress functionality had been introduced, we could
+remove the code implementing the "confirm deletion" jobs, and then
+just schedule a thread progress later operation which deallocates the
+structure. Besides simplifying the code a lot, we got an increase of
+more than 10% of the number of transactions per second handled on a
+mnesia tpcb benchmark executing on a quad core machine.
diff --git a/erts/emulator/internal_doc/Tracing.md b/erts/emulator/internal_doc/Tracing.md
new file mode 100644
index 0000000000..30bc5327a7
--- /dev/null
+++ b/erts/emulator/internal_doc/Tracing.md
@@ -0,0 +1,220 @@
+Non-blocking trace setting
+==========================
+
+Introduction
+------------
+
+Before OTP R16 when trace settings were changed by `erlang:trace_pattern`,
+all other execution in the VM were halted while the trace operation
+was carried out in single threaded mode. Similar to code loading, this
+can impose a severe problem for availability that grows with the
+number of cores.
+
+In OTP R16, trace breakpoints are set in the code without blocking the
+VM. Erlang processes may continue executing undisturbed in parallel
+during the entire operation. The same base technique is used as for
+code loading. A staging area of breakpoints is prepared and then made
+active with a single atomic operation.
+
+
+Redesign of Breakpoint Wheel
+----------------------------
+
+To make it easier to manage breakpoints without single threaded mode a
+redesign of the breakpoint mechanism has been made. The old
+"breakpoint wheel" data structure was a circular double-linked list of
+breakpoints for each instrumented function. It was invented before the
+SMP emulator. To support it in the SMP emulator, is was essentially
+expanded to one breakpoint wheel per scheduler. As more breakpoint
+types have been added, the implementation have become messy and hard
+to understand and maintain.
+
+In the new design the old wheel was dropped and instead replaced by
+one struct (`GenericBp`) to hold the data for all types of breakpoints
+for each instrumented function. A bit-flag field is used to indicate
+what different type of break actions that are enabled.
+
+
+Same Same but Different
+-----------------------
+Even though `trace_pattern` use the same technique as the non-blocking
+code loading with replicated generations of data structures and an
+atomic switch, the implementations are quite separate from each
+other. One initial idea was to use the existing mechanism of code
+loading to do a dummy load operation that would make a copy of the
+affected modules. That copy could then be instrumented with
+breakpoints before making it reachable with the same atomic switch as
+done for code loading. This approach seems straight forward but has a
+number of shortcomings, one being the large memory footprint when many
+modules are instrumented. Another problem is how execution will reach
+the new instrumented code. Normally loaded code can only be reached
+through external functions calls. Trace settings must be activated
+instantaneously without the need of external function calls.
+
+The choosen solution is instead for tracing to use the technique of
+replication applied on the data structures for breakpoints. Two
+generations of breakpoints are kept and indentified by index of 0 and
+1. The global atomic variables `erts_active_bp_index` will determine
+which generation of breakpoints running code will use.
+
+### Atomicy Without Atomic Operations
+
+Not using the code loading generations (or any other code duplication)
+means that `trace_pattern` must at some point write to the active beam
+code in order for running processes to reach the staged breakpoints
+structures. This can be done with one single atomic write operation
+per instrumented function. The beam instruction words are however read
+with normal memory loads and not through the atomic API. The only
+guarantee we need is that the written instruction word is seen as
+atomic. Either fully written or not at all. This is true for word
+aligned write operation on all hardware architectures we use.
+
+
+Adding a new Breakpoint
+-----------------------
+This is a simplified sequence describing what `trace_pattern` goes
+through when adding a new breakpoint.
+
+1. Seize exclusive code write permission (suspend process until we get it).
+
+2. Allocate breakpoint structure `GenericBp` including both generations.
+ Set the active part as disabled with a zeroed flagfield. Save the original
+ instruction word in the breakpoint.
+
+3. Write a pointer to the breakpoint at offset -4 from the first
+ instruction "func_info" header.
+
+4. Set the staging part of the breakpoint as enabled with specified
+ breakpoint data.
+
+5. Wait for thread progress.
+
+6. Write a `op_i_generic_breakpoint` as the first instruction for the function.
+ This instruction will execute the breakpoint that it finds at offset -4.
+
+7. Wait for thread progress.
+
+8. Commit the breadpoint by switching `erts_active_bp_index`.
+
+9. Wait for thread progress.
+
+10. Prepare for next call to `trace_pattern` by updating the new staging part
+ (the old active) of the breakpoint to be identic to the the new active part.
+
+11. Release code write permission and return from `trace_pattern`.
+
+
+The code write permission "lock" seized in step 1 is the same as used
+by code loading. This will ensure that only one process at a time can
+stage new trace settings but it will also prevent concurrent code
+loading and make sure we see a consistent view of the beam code during
+the entire sequence.
+
+Between step 6 and 8, runninng processes might execute the written
+`op_i_generic_breakpoint` instruction. They will get the breakpoint
+structure written in step 3, read `erts_active_bp_index` and execute
+the corresponding part of the breakpoint. Before the switch in step 8
+becomes visible they will however execute the disabled part of the
+breakpoint structure and do nothing other than executing the saved
+original instruction.
+
+
+To Updating and Remove Breakpoints
+----------------------------------
+
+The above sequence did only describe adding a new breakpoint. We do
+basically the same sequence to update the settings of an existing
+breakpoint except step 2,3 and 6 can be skipped as it has already been
+done.
+
+To remove a breakpoint some more steps are needed. The idea is to
+first stage the breakpoint as disabled, do the switch, wait for thread
+progress and then remove the disabled breakpoint by restoring the
+original beam instruction.
+
+Here is a more complete sequence that contains both adding, updating
+and removing breakpoints.
+
+1. Seize exclusive code write permission (suspend process until we get it).
+
+2. Allocate new breakpoint structures with a disabled active part and
+ the original beam instruction. Write a pointer to the breakpoint in
+ "func_info" header at offset -4.
+
+3. Update the staging part of all affected breakpoints. Disable
+ breakpoints that are to be removed.
+
+4. Wait for thread progress.
+
+5. Write a `op_i_generic_breakpoint` as the first instruction for all
+ functions with new breakpoints.
+
+6. Wait for thread progress.
+
+7. Commit all staged breadpoints by switching `erts_active_bp_index`.
+
+8. Wait for thread progress.
+
+
+9. Restore original beam instruction for disabled breakpoints.
+
+10. Wait for thread progress.
+
+11. Prepare for next call to `trace_pattern` by updating the new
+ staging area (the old active) for all enabled breakpoints.
+
+12. Deallocate disabled breakpoint structures.
+
+13. Release code write permission and return from `trace_pattern`.
+
+
+### All that Waiting for Thread Progress
+
+There are four rounds of waiting for thread progress in the above
+sequence. In the code loading sequence we sacrificed memory overhead
+of three generations to avoid a second round of thread progress. The
+latency of `trace_pattern` should not be such a big problem for
+however, as it is normally not called in a rapid sequence.
+
+The waiting in step 4 is to make sure all threads will see an updated
+view of the breakpoint structures once they become reachable through
+the `op_i_generic_breakpoint` instruction written in step 5.
+
+The waiting in step 6 is to make the activation of the new trace
+settings "as atomic as possible". Different cores might see the new
+value of `erts_active_bp_index` at different times as it is read
+without any memory barrier. But this is the best we can do without
+more expensive thread synchronization.
+
+The waiting in step 8 is to make sure we dont't restore the original
+bream instructions for disabled breakpoints until we know that no
+thread is still accessing the old enabled part of a disabled
+breakpoint.
+
+The waiting in step 10 is to make sure no lingering thread is still
+accessing disabled breakpoint structures to be deallocated in step
+12.
+
+
+Global Tracing
+--------------
+
+Call tracing with `global` option only affects external function
+calls. This was earlier handled by inserting a special trace
+instruction in export entries without the use of breakpoints. With the
+new non-blocking tracing we want to avoid special handling for global
+tracing and make use of the staging and atomic switching within the
+breakpoint mechanism. The solution was to create the same type of
+breakpoint structure for a global call trace. The difference to local
+tracing is that we insert the `op_i_generic_breakpoint` instruction
+(with its pointer at offset -4) in the export entry rather than in the
+code.
+
+
+Future work
+-----------
+
+We still go to single threaded mode when new code is loaded for a
+module that is traced, or when loading code when there is a default
+trace pattern set. That is not impossible to fix, but that requires
+much closer cooperation between tracing BIFs and the loader BIFs.
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 61f9f6a59a..59e34eb819 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -547,6 +547,25 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
+
+ {
+ /*
+ * Unfortunately we depend on fd 0,1,2 in the old shell code.
+ * So if for some reason we do not have those open when we start
+ * we have to open them here. Not doing this can cause the emulator
+ * to deadlock when reaping the fd_driver ports :(
+ */
+ int fd;
+ /* Make sure fd 0 is open */
+ if ((fd = open("/dev/null", O_RDONLY)) != 0)
+ close(fd);
+ /* Make sure fds 1 and 2 are open */
+ while (fd < 3) {
+ fd = open("/dev/null", O_WRONLY);
+ }
+ close(fd);
+ }
+
}
void
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 81539faa09..6a43e2b0e7 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -1495,7 +1495,7 @@ mcall(Node, Funs) ->
end, Refs).
erl_rel_flag_var() ->
- "ERL_"++erlang:system_info(otp_release)++"_FLAGS".
+ "ERL_OTP"++erlang:system_info(otp_release)++"_FLAGS".
clear_erl_rel_flags() ->
EnvVar = erl_rel_flag_var(),
diff --git a/erts/emulator/utils/make_version b/erts/emulator/utils/make_version
index 7757fa8138..02b68f2b39 100755
--- a/erts/emulator/utils/make_version
+++ b/erts/emulator/utils/make_version
@@ -41,6 +41,9 @@ if ($ARGV[0] eq '-o') {
my $release = shift;
defined $release or die "No release specified";
+my $correction_package = shift;
+defined $correction_package or die "No correction package specified";
+
my $version = shift;
defined $version or die "No version name specified";
@@ -53,6 +56,7 @@ open(FILE, ">$outputfile") or die "Can't create $outputfile: $!";
print FILE <<EOF;
/* This file was created by 'make_version' -- don't modify. */
#define ERLANG_OTP_RELEASE "$release"
+#define ERLANG_OTP_CORRECTION_PACKAGE "$correction_package"
#define ERLANG_VERSION "$version"
#define ERLANG_COMPILE_DATE "$time_str"
#define ERLANG_ARCHITECTURE "$architecture"
diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c
index 8817bde8d7..bd30bc35d9 100644
--- a/erts/epmd/src/epmd_cli.c
+++ b/erts/epmd/src/epmd_cli.c
@@ -118,7 +118,7 @@ void epmd_call(EpmdVars *g,int what)
if (!g->silent) {
rval = erts_snprintf(buf, OUTBUF_SIZE,
"epmd: up and running on port %d with data:\n", j);
- write(1, buf, rval);
+ fwrite(buf, 1, rval, stdout);
}
while(1) {
if ((rval = read(fd,buf,OUTBUF_SIZE)) <= 0) {
@@ -126,7 +126,7 @@ void epmd_call(EpmdVars *g,int what)
epmd_cleanup_exit(g,0);
}
if (!g->silent)
- write(1, buf, rval); /* Potentially UTF-8 encoded */
+ fwrite(buf, 1, rval, stdout); /* Potentially UTF-8 encoded */
}
}
diff --git a/erts/epmd/test/epmd_SUITE.erl b/erts/epmd/test/epmd_SUITE.erl
index cc24a556a3..a752abf33b 100644
--- a/erts/epmd/test/epmd_SUITE.erl
+++ b/erts/epmd/test/epmd_SUITE.erl
@@ -69,6 +69,8 @@
returns_valid_empty_extra/1,
returns_valid_populated_extra_with_nulls/1,
+ names_stdout/1,
+
buffer_overrun_1/1,
buffer_overrun_2/1,
no_nonlocal_register/1,
@@ -118,6 +120,7 @@ all() ->
too_large, alive_req_too_small_1, alive_req_too_small_2,
alive_req_too_large, returns_valid_empty_extra,
returns_valid_populated_extra_with_nulls,
+ names_stdout,
{group, buffer_overrun}, no_nonlocal_register,
no_nonlocal_kill, no_live_killing].
@@ -759,6 +762,24 @@ returns_valid_populated_extra_with_nulls(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+names_stdout(doc) ->
+ ["Test that epmd -names prints registered nodes to stdout"];
+names_stdout(suite) ->
+ [];
+names_stdout(Config) when is_list(Config) ->
+ ?line ok = epmdrun(),
+ ?line {ok,Sock} = register_node("foobar"),
+ ?line ok = epmdrun("-names"),
+ ?line {ok, Data} = receive {_Port, {data, D}} -> {ok, D}
+ after 10000 -> {error, timeout}
+ end,
+ ?line {match,_} = re:run(Data, "^epmd: up and running", [multiline]),
+ ?line {match,_} = re:run(Data, "^name foobar at port", [multiline]),
+ ?line ok = close(Sock),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
buffer_overrun_1(suite) ->
[];
buffer_overrun_1(doc) ->
@@ -968,7 +989,7 @@ epmdrun(Epmd,Args0) ->
O ->
" "++O
end,
- osrun("\"" ++ Epmd ++ "\"" ++ Args ++ " " ?EPMDARGS " -port " ++ integer_to_list(?PORT)).
+ osrun("\"" ++ Epmd ++ "\"" ++ " " ?EPMDARGS " -port " ++ integer_to_list(?PORT) ++ Args).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index 1d7811d570..c30203c632 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -1972,35 +1972,8 @@ get_file_args(char *filename, argv_buf *abp, argv_buf *xabp)
}
static void
-write_erl_otp_flags(char *bufp)
-{
- /* ERL_OTP<MAJOR-VSN>_FLAGS */
- int ix = 0;
- char *otp_p;
- char otp[] = OTP_SYSTEM_VERSION;
-
- bufp[ix++] = 'E';
- bufp[ix++] = 'R';
- bufp[ix++] = 'L';
- bufp[ix++] = '_';
- bufp[ix++] = 'O';
- bufp[ix++] = 'T';
- bufp[ix++] = 'P';
- for (otp_p = &otp[0]; '0' <= *otp_p && *otp_p <= '9'; otp_p++)
- bufp[ix++] = *otp_p;
- bufp[ix++] = '_';
- bufp[ix++] = 'F';
- bufp[ix++] = 'L';
- bufp[ix++] = 'A';
- bufp[ix++] = 'G';
- bufp[ix++] = 'S';
- bufp[ix] = '\0';
-}
-
-static void
initial_argv_massage(int *argc, char ***argv)
{
- char erl_otp_flags_buf[] = "ERL_OTP" OTP_SYSTEM_VERSION "_FLAGS";
argv_buf ab = {0}, xab = {0};
int ix, vix, ac;
char **av;
@@ -2016,8 +1989,7 @@ initial_argv_massage(int *argc, char ***argv)
vix = 0;
- write_erl_otp_flags(erl_otp_flags_buf);
- av = build_args_from_env(erl_otp_flags_buf);
+ av = build_args_from_env("ERL_OTP" OTP_SYSTEM_VERSION "_FLAGS");
if (av)
avv[vix++].argv = av;
diff --git a/erts/etc/win32/erlang.ico b/erts/etc/win32/erlang.ico
index cee8b58af9..7b62d31aa9 100644
--- a/erts/etc/win32/erlang.ico
+++ b/erts/etc/win32/erlang.ico
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 73fac27161..3c77d6ae0f 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 0ed677c3d8..f99d5bfdd0 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -2246,6 +2246,7 @@ tuple_to_list(_Tuple) ->
(modified_timing_level) -> integer() | undefined;
(multi_scheduling) -> disabled | blocked | enabled;
(multi_scheduling_blockers) -> [PID :: pid()];
+ (otp_correction_package) -> string();
(otp_release) -> string();
(port_count) -> non_neg_integer();
(port_limit) -> pos_integer();
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 30aa870144..8e77a9a26e 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,11 @@
#
VSN = 6.0
-SYSTEM_VSN = 17.0-rc0
+
+# OTP major version
+SYSTEM_VSN = 17
+# OTP correction package version
+SYSTEM_CP_VSN = 17.0-rc0
# Port number 4365 in 4.2
# Port number 4366 in 4.3
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index 6b0ae87172..e2002c8e48 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -1452,14 +1452,14 @@ let_subst_list([], [], _) -> {[],[],[]}.
%%pattern(Pat, Sub) -> pattern(Pat, Sub, Sub).
-pattern(#c_var{name=V0}=Pat, Isub, Osub) ->
+pattern(#c_var{}=Pat, Isub, Osub) ->
case sub_is_val(Pat, Isub) of
true ->
V1 = make_var_name(),
Pat1 = #c_var{name=V1},
{Pat1,sub_set_var(Pat, Pat1, scope_add([V1], Osub))};
false ->
- {Pat,sub_del_var(Pat, scope_add([V0], Osub))}
+ {Pat,sub_del_var(Pat, Osub)}
end;
pattern(#c_literal{}=Pat, _, Osub) -> {Pat,Osub};
pattern(#c_cons{anno=Anno,hd=H0,tl=T0}, Isub, Osub0) ->
@@ -1522,6 +1522,9 @@ is_subst(_) -> false.
%% chains so we never have to search more than once. Use orddict so
%% we know the format.
%%
+%% In addition to the list of substitutions, we also keep track of
+%% all variable currently live (the scope).
+%%
%% sub_subst_scope/1 adds dummy substitutions for all variables
%% in the scope in order to force renaming if variables in the
%% scope occurs as pattern variables.
@@ -1548,8 +1551,17 @@ sub_set_name(V, Val, #sub{v=S,s=Scope,t=Tdb0}=Sub) ->
Tdb = copy_type(V, Val, Tdb1),
Sub#sub{v=orddict:store(V, Val, S),s=gb_sets:add(V, Scope),t=Tdb}.
-sub_del_var(#c_var{name=V}, #sub{v=S,t=Tdb}=Sub) ->
- Sub#sub{v=orddict:erase(V, S),t=kill_types(V, Tdb)}.
+sub_del_var(#c_var{name=V}, #sub{v=S,s=Scope,t=Tdb}=Sub) ->
+ %% Profiling shows that for programs with many record operations,
+ %% sub_del_var/2 is a bottleneck. Since the scope contains all
+ %% variables that are live, we know that V cannot be present in S
+ %% if it is not in the scope.
+ case gb_sets:is_member(V, Scope) of
+ false ->
+ Sub#sub{s=gb_sets:insert(V, Scope)};
+ true ->
+ Sub#sub{v=orddict:erase(V, S),t=kill_types(V, Tdb)}
+ end.
sub_subst_var(#c_var{name=V}, Val, #sub{v=S0}) ->
%% Fold chained substitutions.
@@ -1559,13 +1571,16 @@ sub_subst_scope(#sub{v=S0,s=Scope}=Sub) ->
S = [{-1,#c_var{name=Sv}} || Sv <- gb_sets:to_list(Scope)]++S0,
Sub#sub{v=S}.
-sub_is_val(#c_var{name=V}, #sub{v=S}) ->
- v_is_value(V, S).
+sub_is_val(#c_var{name=V}, #sub{v=S,s=Scope}) ->
+ %% When the bottleneck in sub_del_var/2 was eliminated, this
+ %% became the new bottleneck. Since the scope contains all
+ %% live variables, a variable V can only be the target for
+ %% a substitution if it is in the scope.
+ gb_sets:is_member(V, Scope) andalso v_is_value(V, S).
-v_is_value(Var, Sub) ->
- any(fun ({_,#c_var{name=Val}}) when Val =:= Var -> true;
- (_) -> false
- end, Sub).
+v_is_value(Var, [{_,#c_var{name=Var}}|_]) -> true;
+v_is_value(Var, [_|T]) -> v_is_value(Var, T);
+v_is_value(_, []) -> false.
%% clauses(E, [Clause], TopLevel, Context, Sub) -> [Clause].
%% Trim the clauses by removing all clauses AFTER the first one which
diff --git a/lib/compiler/test/andor_SUITE.erl b/lib/compiler/test/andor_SUITE.erl
index 4ffbe07e32..7bef0aa27c 100644
--- a/lib/compiler/test/andor_SUITE.erl
+++ b/lib/compiler/test/andor_SUITE.erl
@@ -194,6 +194,9 @@ t_andalso(Config) when is_list(Config) ->
?line false = id(false) andalso not id(glurf),
?line false = false andalso not id(glurf),
+ true = begin (X1 = true) andalso X1, X1 end,
+ false = false = begin (X2 = false) andalso X2, X2 end,
+
ok.
t_orelse(Config) when is_list(Config) ->
@@ -224,6 +227,9 @@ t_orelse(Config) when is_list(Config) ->
?line true = id(true) orelse not id(glurf),
?line true = true orelse not id(glurf),
+ true = begin (X1 = true) orelse X1, X1 end,
+ false = begin (X2 = false) orelse X2, X2 end,
+
ok.
t_andalso_1({X,Y}) ->
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index 310a741b0b..925ad0c091 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -384,100 +384,6 @@ static ErlNifFunc nif_funcs[] = {
{"ecdh_compute_key_nif", 3, ecdh_compute_key_nif}
};
-#if defined(HAVE_EC)
-struct nid_map {
- char *name;
- int nid;
- ERL_NIF_TERM atom;
-};
-
-static struct nid_map ec_curves[] = {
- /* prime field curves */
- /* secg curves */
- { "secp112r1", NID_secp112r1 },
- { "secp112r2", NID_secp112r2 },
- { "secp128r1", NID_secp128r1 },
- { "secp128r2", NID_secp128r2 },
- { "secp160k1", NID_secp160k1 },
- { "secp160r1", NID_secp160r1 },
- { "secp160r2", NID_secp160r2 },
- /* SECG secp192r1 is the same as X9.62 prime192v1 */
- { "secp192r1", NID_X9_62_prime192v1 },
- { "secp192k1", NID_secp192k1 },
- { "secp224k1", NID_secp224k1 },
- { "secp224r1", NID_secp224r1 },
- { "secp256k1", NID_secp256k1 },
- /* SECG secp256r1 is the same as X9.62 prime256v1 */
- { "secp256r1", NID_X9_62_prime256v1 },
- { "secp384r1", NID_secp384r1 },
- { "secp521r1", NID_secp521r1 },
- /* X9.62 curves */
- { "prime192v1", NID_X9_62_prime192v1 },
- { "prime192v2", NID_X9_62_prime192v2 },
- { "prime192v3", NID_X9_62_prime192v3 },
- { "prime239v1", NID_X9_62_prime239v1 },
- { "prime239v2", NID_X9_62_prime239v2 },
- { "prime239v3", NID_X9_62_prime239v3 },
- { "prime256v1", NID_X9_62_prime256v1 },
- /* characteristic two field curves */
- /* NIST/SECG curves */
- { "sect113r1", NID_sect113r1 },
- { "sect113r2", NID_sect113r2 },
- { "sect131r1", NID_sect131r1 },
- { "sect131r2", NID_sect131r2 },
- { "sect163k1", NID_sect163k1 },
- { "sect163r1", NID_sect163r1 },
- { "sect163r2", NID_sect163r2 },
- { "sect193r1", NID_sect193r1 },
- { "sect193r2", NID_sect193r2 },
- { "sect233k1", NID_sect233k1 },
- { "sect233r1", NID_sect233r1 },
- { "sect239k1", NID_sect239k1 },
- { "sect283k1", NID_sect283k1 },
- { "sect283r1", NID_sect283r1 },
- { "sect409k1", NID_sect409k1 },
- { "sect409r1", NID_sect409r1 },
- { "sect571k1", NID_sect571k1 },
- { "sect571r1", NID_sect571r1 },
- /* X9.62 curves */
- { "c2pnb163v1", NID_X9_62_c2pnb163v1 },
- { "c2pnb163v2", NID_X9_62_c2pnb163v2 },
- { "c2pnb163v3", NID_X9_62_c2pnb163v3 },
- { "c2pnb176v1", NID_X9_62_c2pnb176v1 },
- { "c2tnb191v1", NID_X9_62_c2tnb191v1 },
- { "c2tnb191v2", NID_X9_62_c2tnb191v2 },
- { "c2tnb191v3", NID_X9_62_c2tnb191v3 },
- { "c2pnb208w1", NID_X9_62_c2pnb208w1 },
- { "c2tnb239v1", NID_X9_62_c2tnb239v1 },
- { "c2tnb239v2", NID_X9_62_c2tnb239v2 },
- { "c2tnb239v3", NID_X9_62_c2tnb239v3 },
- { "c2pnb272w1", NID_X9_62_c2pnb272w1 },
- { "c2pnb304w1", NID_X9_62_c2pnb304w1 },
- { "c2tnb359v1", NID_X9_62_c2tnb359v1 },
- { "c2pnb368w1", NID_X9_62_c2pnb368w1 },
- { "c2tnb431r1", NID_X9_62_c2tnb431r1 },
- /* the WAP/WTLS curves
- * [unlike SECG, spec has its own OIDs for curves from X9.62] */
- { "wtls1", NID_wap_wsg_idm_ecid_wtls1 },
- { "wtls3", NID_wap_wsg_idm_ecid_wtls3 },
- { "wtls4", NID_wap_wsg_idm_ecid_wtls4 },
- { "wtls5", NID_wap_wsg_idm_ecid_wtls5 },
- { "wtls6", NID_wap_wsg_idm_ecid_wtls6 },
- { "wtls7", NID_wap_wsg_idm_ecid_wtls7 },
- { "wtls8", NID_wap_wsg_idm_ecid_wtls8 },
- { "wtls9", NID_wap_wsg_idm_ecid_wtls9 },
- { "wtls10", NID_wap_wsg_idm_ecid_wtls10 },
- { "wtls11", NID_wap_wsg_idm_ecid_wtls11 },
- { "wtls12", NID_wap_wsg_idm_ecid_wtls12 },
- /* IPSec curves */
- { "ipsec3", NID_ipsec3 },
- { "ipsec4", NID_ipsec4 }
-};
-
-#define EC_CURVES_CNT (sizeof(ec_curves)/sizeof(struct nid_map))
-
-#endif /* HAVE_EC */
-
ERL_NIF_INIT(crypto,nif_funcs,load,NULL,upgrade,unload)
@@ -632,12 +538,6 @@ static int init(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_tpbasis = enif_make_atom(env,"tpbasis");
atom_ppbasis = enif_make_atom(env,"ppbasis");
atom_onbasis = enif_make_atom(env,"onbasis");
-
- {
- int i;
- for (i = 0; i < EC_CURVES_CNT; i++)
- ec_curves[i].atom = enif_make_atom(env,ec_curves[i].name);
- }
#endif
init_digest_types(env);
@@ -725,7 +625,7 @@ static void unload(ErlNifEnv* env, void* priv_data)
static int algo_hash_cnt;
static ERL_NIF_TERM algo_hash[8]; /* increase when extending the list */
static int algo_pubkey_cnt;
-static ERL_NIF_TERM algo_pubkey[2]; /* increase when extending the list */
+static ERL_NIF_TERM algo_pubkey[3]; /* increase when extending the list */
static int algo_cipher_cnt;
static ERL_NIF_TERM algo_cipher[2]; /* increase when extending the list */
@@ -751,6 +651,9 @@ static void init_algorithms_types(ErlNifEnv* env)
algo_pubkey_cnt = 0;
#if defined(HAVE_EC)
+#if !defined(OPENSSL_NO_EC2M)
+ algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env,"ec_gf2m");
+#endif
algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env,"ecdsa");
algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env,"ecdh");
#endif
@@ -2962,21 +2865,9 @@ static ERL_NIF_TERM blowfish_ofb64_encrypt(ErlNifEnv* env, int argc, const ERL_N
}
#if defined(HAVE_EC)
-static int term2curve_id(ERL_NIF_TERM nid)
-{
- int i;
-
- for (i = 0; i < EC_CURVES_CNT; i++)
- if (ec_curves[i].atom == nid)
- return ec_curves[i].nid;
-
- return 0;
-}
-
static EC_KEY* ec_key_new(ErlNifEnv* env, ERL_NIF_TERM curve_arg)
{
EC_KEY *key = NULL;
- int nid = 0;
int c_arity = -1;
const ERL_NIF_TERM* curve;
ErlNifBinary seed;
@@ -2988,18 +2879,12 @@ static EC_KEY* ec_key_new(ErlNifEnv* env, ERL_NIF_TERM curve_arg)
EC_GROUP *group = NULL;
EC_POINT *point = NULL;
- if (enif_is_atom(env, curve_arg)) {
- nid = term2curve_id(curve_arg);
- if (nid == 0)
- return NULL;
- key = EC_KEY_new_by_curve_name(nid);
- }
- else if (enif_is_tuple(env, curve_arg)
- && enif_get_tuple(env,curve_arg,&c_arity,&curve)
- && c_arity == 5
- && get_bn_from_bin(env, curve[3], &bn_order)
- && (curve[4] != atom_none && get_bn_from_bin(env, curve[4], &cofactor))) {
- /* {Field, Prime, Point, Order, CoFactor} = Curve */
+ /* {Field, Prime, Point, Order, CoFactor} = Curve */
+ if (enif_is_tuple(env, curve_arg)
+ && enif_get_tuple(env,curve_arg,&c_arity,&curve)
+ && c_arity == 5
+ && get_bn_from_bin(env, curve[3], &bn_order)
+ && (curve[4] != atom_none && get_bn_from_bin(env, curve[4], &cofactor))) {
int f_arity = -1;
const ERL_NIF_TERM* field;
@@ -3033,6 +2918,8 @@ static EC_KEY* ec_key_new(ErlNifEnv* env, ERL_NIF_TERM curve_arg)
/* create the EC_GROUP structure */
group = EC_GROUP_new_curve_GFp(p, a, b, NULL);
+#if !defined(OPENSSL_NO_EC2M)
+
} else if (f_arity == 3 && field[0] == atom_characteristic_two_field) {
/* {characteristic_two_field, M, Basis} */
@@ -3091,6 +2978,7 @@ static EC_KEY* ec_key_new(ErlNifEnv* env, ERL_NIF_TERM curve_arg)
goto out_err;
group = EC_GROUP_new_curve_GF2m(p, a, b, NULL);
+#endif
} else
goto out_err;
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index 406fd5e59a..aa60bba96a 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -99,7 +99,9 @@
<p><code>ecdh_private() = key_value() </code></p>
- <p><code>ecdh_params() = ec_named_curve() |
+ <p><code>ecdh_params() = ec_named_curve() | ec_explicit_curve()</code></p>
+
+ <p><code>ec_explicit_curve() =
{ec_field(), Prime :: key_value(), Point :: key_value(), Order :: integer(), CoFactor :: none | integer()} </code></p>
<p><code>ec_field() = {prime_field, Prime :: integer()} |
@@ -114,7 +116,15 @@
secp192k1| secp160r2| secp128r2| secp128r1| sect233r1| sect233k1| sect193r2| sect193r1|
sect131r2| sect131r1| sect283r1| sect283k1| sect163r2| secp256k1| secp160k1| secp160r1|
secp112r2| secp112r1| sect113r2| sect113r1| sect239k1| sect163r1| sect163k1| secp256r1|
- secp192r1 </code></p>
+ secp192r1|
+ brainpoolP160r1| brainpoolP160t1| brainpoolP192r1| brainpoolP192t1| brainpoolP224r1|
+ brainpoolP224t1| brainpoolP256r1| brainpoolP256t1| brainpoolP320r1| brainpoolP320t1|
+ brainpoolP384r1| brainpoolP384t1| brainpoolP512r1| brainpoolP512t1
+ </code>
+ Note that the <em>sect</em> curves are GF2m (characteristic two) curves and are only supported if the
+ underlying OpenSSL has support for them.
+ See also <seealso marker="#supports-0">crypto:supports/0</seealso>
+ </p>
<p><code>stream_cipher() = rc4 | aes_ctr </code></p>
@@ -143,8 +153,11 @@
</p>
<p><code> cipher_algorithms() = des_cbc | des_cfb | des3_cbc | des3_cbf | des_ede3 |
blowfish_cbc | blowfish_cfb64 | aes_cbc128 | aes_cfb128| aes_cbc256 | aes_ige256 | rc2_cbc | aes_ctr| rc4 </code> </p>
- <p><code> public_key_algorithms() = rsa |dss | ecdsa | dh | ecdh </code> </p>
-
+ <p><code> public_key_algorithms() = rsa |dss | ecdsa | dh | ecdh | ec_gf2m</code>
+ Note that ec_gf2m is not strictly a public key algorithm, but a restriction on what curves are supported
+ with ecdsa and ecdh.
+ </p>
+
</section>
<funcs>
@@ -680,7 +693,29 @@
</desc>
</func>
-
+ <func>
+ <name>ec_curves() -> EllipticCurveList </name>
+ <fsummary>Provide a list of available named elliptic curves.</fsummary>
+ <type>
+ <v>EllipticCurveList = [ec_named_curve()]</v>
+ </type>
+ <desc>
+ <p>Can be used to determine which named elliptic curves are supported.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>ec_curve(NamedCurve) -> EllipticCurve </name>
+ <fsummary>Get the defining parameters of a elliptic curve.</fsummary>
+ <type>
+ <v>NamedCurve = ec_named_curve()</v>
+ <v>EllipticCurve = ec_explicit_curve()</v>
+ </type>
+ <desc>
+ <p>Return the defining parameters of a elliptic curve.</p>
+ </desc>
+ </func>
+
<func>
<name>verify(Algorithm, DigestType, Msg, Signature, Key) -> boolean()</name>
<fsummary>Verifies a digital signature.</fsummary>
diff --git a/lib/crypto/src/Makefile b/lib/crypto/src/Makefile
index 574c2076f2..eabfd676c5 100644
--- a/lib/crypto/src/Makefile
+++ b/lib/crypto/src/Makefile
@@ -37,6 +37,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/crypto-$(VSN)
MODULES= \
crypto_app \
crypto \
+ crypto_ec_curves \
crypto_server \
crypto_sup
diff --git a/lib/crypto/src/crypto.app.src b/lib/crypto/src/crypto.app.src
index 5548b6a1b5..161ea7c9fe 100644
--- a/lib/crypto/src/crypto.app.src
+++ b/lib/crypto/src/crypto.app.src
@@ -20,6 +20,7 @@
[{description, "CRYPTO version 2"},
{vsn, "%VSN%"},
{modules, [crypto,
+ crypto_ec_curves,
crypto_app,
crypto_sup,
crypto_server]},
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index 12ff060bf9..d953bd3bca 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -34,6 +34,7 @@
-export([public_encrypt/4, private_decrypt/4]).
-export([private_encrypt/4, public_decrypt/4]).
-export([dh_generate_parameters/2, dh_check/1]). %% Testing see
+-export([ec_curve/1, ec_curves/0]).
%% DEPRECATED
%% Replaced by hash_*
@@ -557,7 +558,7 @@ generate_key(srp, {user, [Generator, Prime, Version]}, PrivateArg)
user_srp_gen_key(Private, Generator, Prime);
generate_key(ecdh, Curve, undefined) ->
- ec_key_generate(Curve).
+ ec_key_generate(nif_curve_params(Curve)).
compute_key(dh, OthersPublicKey, MyPrivateKey, DHParameters) ->
@@ -1502,21 +1503,27 @@ ec_key_generate(_Key) -> ?nif_stub.
ecdh_compute_key_nif(_Others, _Curve, _My) -> ?nif_stub.
+ec_curves() ->
+ crypto_ec_curves:curves().
+
+ec_curve(X) ->
+ crypto_ec_curves:curve(X).
+
%%
%% EC
%%
term_to_nif_prime({prime_field, Prime}) ->
- {prime_field, int_to_bin(Prime)};
+ {prime_field, ensure_int_as_bin(Prime)};
term_to_nif_prime(PrimeField) ->
PrimeField.
term_to_nif_curve({A, B, Seed}) ->
{ensure_int_as_bin(A), ensure_int_as_bin(B), Seed}.
nif_curve_params({PrimeField, Curve, BasePoint, Order, CoFactor}) ->
- {term_to_nif_prime(PrimeField), term_to_nif_curve(Curve), ensure_int_as_bin(BasePoint), int_to_bin(Order), int_to_bin(CoFactor)};
+ {term_to_nif_prime(PrimeField), term_to_nif_curve(Curve), ensure_int_as_bin(BasePoint), ensure_int_as_bin(Order), ensure_int_as_bin(CoFactor)};
nif_curve_params(Curve) when is_atom(Curve) ->
%% named curve
- Curve.
+ crypto_ec_curves:curve(Curve).
%% MISC --------------------------------------------------------------------
diff --git a/lib/crypto/src/crypto_ec_curves.erl b/lib/crypto/src/crypto_ec_curves.erl
new file mode 100644
index 0000000000..fe17643d96
--- /dev/null
+++ b/lib/crypto/src/crypto_ec_curves.erl
@@ -0,0 +1,1215 @@
+-module(crypto_ec_curves).
+
+-export([curve/1, curves/0]).
+
+curves() ->
+ CryptoSupport = crypto:supports(),
+ HasGF2m = proplists:get_bool(ec_gf2m, proplists:get_value(public_keys, CryptoSupport)),
+ prime_curves() ++ characteristic_two_curves(HasGF2m).
+
+
+prime_curves() ->
+ [secp112r1,secp112r2,secp128r1,secp128r2,secp160k1,secp160r1,secp160r2,
+ secp192r1,secp192k1,secp224k1,secp224r1,secp256k1,secp256r1,secp384r1,
+ secp521r1,prime192v1,prime192v2,prime192v3,prime239v1,prime239v2,prime239v3,
+ prime256v1,wtls6,wtls7,wtls8,wtls9,wtls12,
+ brainpoolP160r1,brainpoolP160t1,brainpoolP192r1,brainpoolP192t1,
+ brainpoolP224r1,brainpoolP224t1,brainpoolP256r1,brainpoolP256t1,
+ brainpoolP320r1,brainpoolP320t1,brainpoolP384r1,brainpoolP384t1,
+ brainpoolP512r1,brainpoolP512t1].
+
+characteristic_two_curves(true) ->
+ [sect113r1,sect113r2,sect131r1,sect131r2,sect163k1,sect163r1,
+ sect163r2,sect193r1,sect193r2,sect233k1,sect233r1,sect239k1,sect283k1,
+ sect283r1,sect409k1,sect409r1,sect571k1,sect571r1,c2pnb163v1,c2pnb163v2,
+ c2pnb163v3,c2pnb176v1,c2tnb191v1,c2tnb191v2,c2tnb191v3,c2pnb208w1,c2tnb239v1,
+ c2tnb239v2,c2tnb239v3,c2pnb272w1,c2pnb304w1,c2tnb359v1,c2pnb368w1,c2tnb431r1,
+ wtls1,wtls3,wtls4,wtls5,wtls10,wtls11,ipsec3,ipsec4];
+characteristic_two_curves(_) ->
+ [].
+
+curve(secp112r1) ->
+ {
+ {prime_field, <<16#DB7C2ABF62E35E668076BEAD208B:112>>}, %% Prime
+ {<<16#DB7C2ABF62E35E668076BEAD2088:112>>, %% A
+ <<16#659EF8BA043916EEDE8911702B22:112>>, %% B
+ <<16#00F50B028E4D696E676875615175290472783FB1:160>>}, %% Seed
+ <<16#04:8,
+ 16#09487239995A5EE76B55F9C2F098:112, %% X(p0)
+ 16#A89CE5AF8724C0A23E0E0FF77500:112>>, %% Y(p0)
+ <<16#DB7C2ABF62E35E7628DFAC6561C5:112>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp112r2) ->
+ {
+ {prime_field, <<16#DB7C2ABF62E35E668076BEAD208B:112>>}, %% Prime
+ {<<16#6127C24C05F38A0AAAF65C0EF02C:112>>, %% A
+ <<16#51DEF1815DB5ED74FCC34C85D709:112>>, %% B
+ <<16#002757A1114D696E6768756151755316C05E0BD4:160>>}, %% Seed
+ <<16#04:8,
+ 16#4BA30AB5E892B4E1649DD0928643:112, %% X(p0)
+ 16#ADCD46F5882E3747DEF36E956E97:112>>, %% Y(p0)
+ <<16#36DF0AAFD8B8D7597CA10520D04B:112>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(secp128r1) ->
+ {
+ {prime_field, <<16#FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF:128>>}, %% Prime
+ {<<16#FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC:128>>, %% A
+ <<16#E87579C11079F43DD824993C2CEE5ED3:128>>, %% B
+ <<16#000E0D4D696E6768756151750CC03A4473D03679:160>>}, %% Seed
+ <<16#04:8,
+ 16#161FF7528B899B2D0C28607CA52C5B86:128, %% X(p0)
+ 16#CF5AC8395BAFEB13C02DA292DDED7A83:128>>, %% Y(p0)
+ <<16#FFFFFFFE0000000075A30D1B9038A115:128>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp128r2) ->
+ {
+ {prime_field, <<16#FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF:128>>}, %% Prime
+ {<<16#D6031998D1B3BBFEBF59CC9BBFF9AEE1:128>>, %% A
+ <<16#5EEEFCA380D02919DC2C6558BB6D8A5D:128>>, %% B
+ <<16#004D696E67687561517512D8F03431FCE63B88F4:160>>}, %% Seed
+ <<16#04:8,
+ 16#7B6AA5D85E572983E6FB32A7CDEBC140:128, %% X(p0)
+ 16#27B6916A894D3AEE7106FE805FC34B44:128>>, %% Y(p0)
+ <<16#3FFFFFFF7FFFFFFFBE0024720613B5A3:128>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(secp160k1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73:160>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#07:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#3B4C382CE37AA192A4019E763036F4F5DD4D7EBB:160, %% X(p0)
+ 16#938CF935318FDCED6BC28286531733C3F03C4FEE:160>>, %% Y(p0)
+ <<16#0100000000000000000001B8FA16DFAB9ACA16B6B3:168>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp160r1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF:160>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC:160>>, %% A
+ <<16#1C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45:160>>, %% B
+ <<16#1053CDE42C14D696E67687561517533BF3F83345:160>>}, %% Seed
+ <<16#04:8,
+ 16#4A96B5688EF573284664698968C38BB913CBFC82:160, %% X(p0)
+ 16#23A628553168947D59DCC912042351377AC5FB32:160>>, %% Y(p0)
+ <<16#0100000000000000000001F4C8F927AED3CA752257:168>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp160r2) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73:160>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70:160>>, %% A
+ <<16#B4E134D3FB59EB8BAB57274904664D5AF50388BA:160>>, %% B
+ <<16#B99B99B099B323E02709A4D696E6768756151751:160>>}, %% Seed
+ <<16#04:8,
+ 16#52DCB034293A117E1F4FF11B30F7199D3144CE6D:160, %% X(p0)
+ 16#FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E:160>>, %% Y(p0)
+ <<16#0100000000000000000000351EE786A818F3A1A16B:168>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp192r1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF:192>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC:192>>, %% A
+ <<16#64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1:192>>, %% B
+ <<16#3045AE6FC8422F64ED579528D38120EAE12196D5:160>>}, %% Seed
+ <<16#04:8,
+ 16#188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012:192, %% X(p0)
+ 16#07192B95FFC8DA78631011ED6B24CDD573F977A11E794811:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp192k1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37:192>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#03:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D:192, %% X(p0)
+ 16#9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp224k1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D:224>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#05:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C:224, %% X(p0)
+ 16#7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5:224>>, %% Y(p0)
+ <<16#010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7:232>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp224r1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001:224>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE:224>>, %% A
+ <<16#B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4:224>>, %% B
+ <<16#BD71344799D5C7FCDC45B59FA3B9AB8F6A948BC5:160>>}, %% Seed
+ <<16#04:8,
+ 16#B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21:224, %% X(p0)
+ 16#BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34:224>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D:224>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp256k1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F:256>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#07:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798:256, %% X(p0)
+ 16#483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8:256>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141:256>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp256r1) ->
+ {
+ {prime_field, <<16#FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF:256>>}, %% Prime
+ {<<16#FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC:256>>, %% A
+ <<16#5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B:256>>, %% B
+ <<16#C49D360886E704936A6678E1139D26B7819F7E90:160>>}, %% Seed
+ <<16#04:8,
+ 16#6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296:256, %% X(p0)
+ 16#4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5:256>>, %% Y(p0)
+ <<16#FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551:256>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp384r1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE:256, %% Prime
+ 16#FFFFFFFF0000000000000000FFFFFFFF:128>>},
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE:256, %% A
+ 16#FFFFFFFF0000000000000000FFFFFFFC:128>>,
+ <<16#B3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875A:256, %% B
+ 16#C656398D8A2ED19D2A85C8EDD3EC2AEF:128>>,
+ <<16#A335926AA319A27A1D00896A6773A4827ACDAC73:160>>}, %% Seed
+ <<16#04:8,
+ 16#AA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A38:256, %% X(p0)
+ 16#5502F25DBF55296C3A545E3872760AB7:128,
+ 16#3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C0:256, %% Y(p0)
+ 16#0A60B1CE1D7E819D7A431D7C90EA0E5F:128>>,
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF:256, %% Order
+ 16#581A0DB248B0A77AECEC196ACCC52973:128>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(secp521r1) ->
+ {
+ {prime_field, <<16#01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256, %% Prime
+ 16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256,
+ 16#FFFF:16>>},
+ {<<16#01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256, %% A
+ 16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256,
+ 16#FFFC:16>>,
+ <<16#51953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109:256, %% B
+ 16#E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F:256,
+ 16#00:8>>,
+ <<16#D09E8800291CB85396CC6717393284AAA0DA64BA:160>>}, %% Seed
+ <<16#04:8,
+ 16#00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D:256, %% X(p0)
+ 16#3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5:256,
+ 16#BD66:16,
+ 16#011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E:256, %% Y(p0)
+ 16#662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD1:256,
+ 16#6650:16>>,
+ <<16#01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256, %% Order
+ 16#FFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E9138:256,
+ 16#6409:16>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime192v1) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF:192>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC:192>>, %% A
+ <<16#64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1:192>>, %% B
+ <<16#3045AE6FC8422F64ED579528D38120EAE12196D5:160>>}, %% Seed
+ <<16#04:8,
+ 16#188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012:192, %% X(p0)
+ 16#07192B95FFC8DA78631011ED6B24CDD573F977A11E794811:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime192v2) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF:192>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC:192>>, %% A
+ <<16#CC22D6DFB95C6B25E49C0D6364A4E5980C393AA21668D953:192>>, %% B
+ <<16#31A92EE2029FD10D901B113E990710F0D21AC6B6:160>>}, %% Seed
+ <<16#04:8,
+ 16#EEA2BAE7E1497842F2DE7769CFE9C989C072AD696F48034A:192, %% X(p0)
+ 16#6574D11D69B6EC7A672BB82A083DF2F2B0847DE970B2DE15:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFE5FB1A724DC80418648D8DD31:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime192v3) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF:192>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC:192>>, %% A
+ <<16#22123DC2395A05CAA7423DAECCC94760A7D462256BD56916:192>>, %% B
+ <<16#C469684435DEB378C4B65CA9591E2A5763059A2E:160>>}, %% Seed
+ <<16#04:8,
+ 16#7D29778100C65A1DA1783716588DCE2B8B4AEE8E228F1896:192, %% X(p0)
+ 16#38A90F22637337334B49DCB66A6DC8F9978ACA7648A943B0:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFF7A62D031C83F4294F640EC13:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime239v1) ->
+ {
+ {prime_field, <<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF:240>>}, %% Prime
+ {<<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC:240>>, %% A
+ <<16#6B016C3BDCF18941D0D654921475CA71A9DB2FB27D1D37796185C2942C0A:240>>, %% B
+ <<16#E43BB460F0B80CC0C0B075798E948060F8321B7D:160>>}, %% Seed
+ <<16#04:8,
+ 16#0FFA963CDCA8816CCC33B8642BEDF905C3D358573D3F27FBBD3B3CB9AAAF:240, %% X(p0)
+ 16#7DEBE8E4E90A5DAE6E4054CA530BA04654B36818CE226B39FCCB7B02F1AE:240>>, %% Y(p0)
+ <<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF9E5E9A9F5D9071FBD1522688909D0B:240>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime239v2) ->
+ {
+ {prime_field, <<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF:240>>}, %% Prime
+ {<<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC:240>>, %% A
+ <<16#617FAB6832576CBBFED50D99F0249C3FEE58B94BA0038C7AE84C8C832F2C:240>>, %% B
+ <<16#E8B4011604095303CA3B8099982BE09FCB9AE616:160>>}, %% Seed
+ <<16#04:8,
+ 16#38AF09D98727705120C921BB5E9E26296A3CDCF2F35757A0EAFD87B830E7:240, %% X(p0)
+ 16#5B0125E4DBEA0EC7206DA0FC01D9B081329FB555DE6EF460237DFF8BE4BA:240>>, %% Y(p0)
+ <<16#7FFFFFFFFFFFFFFFFFFFFFFF800000CFA7E8594377D414C03821BC582063:240>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime239v3) ->
+ {
+ {prime_field, <<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF:240>>}, %% Prime
+ {<<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC:240>>, %% A
+ <<16#255705FA2A306654B1F4CB03D6A750A30C250102D4988717D9BA15AB6D3E:240>>, %% B
+ <<16#7D7374168FFE3471B60A857686A19475D3BFA2FF:160>>}, %% Seed
+ <<16#04:8,
+ 16#6768AE8E18BB92CFCF005C949AA2C6D94853D0E660BBF854B1C9505FE95A:240, %% X(p0)
+ 16#1607E6898F390C06BC1D552BAD226F3B6FCFE48B6E818499AF18E3ED6CF3:240>>, %% Y(p0)
+ <<16#7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF975DEB41B3A6057C3C432146526551:240>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(prime256v1) ->
+ {
+ {prime_field, <<16#FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF:256>>}, %% Prime
+ {<<16#FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC:256>>, %% A
+ <<16#5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B:256>>, %% B
+ <<16#C49D360886E704936A6678E1139D26B7819F7E90:160>>}, %% Seed
+ <<16#04:8,
+ 16#6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296:256, %% X(p0)
+ 16#4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5:256>>, %% Y(p0)
+ <<16#FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551:256>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(sect113r1) ->
+ {
+ {characteristic_two_field, 113, {tpbasis,9}},
+ {<<16#3088250CA6E7C7FE649CE85820F7:112>>, %% A
+ <<16#E8BEE4D3E2260744188BE0E9C723:112>>, %% B
+ <<16#10E723AB14D696E6768756151756FEBF8FCB49A9:160>>}, %% Seed
+ <<16#04:8,
+ 16#009D73616F35F4AB1407D73562C10F:120, %% X(p0)
+ 16#00A52830277958EE84D1315ED31886:120>>, %% Y(p0)
+ <<16#0100000000000000D9CCEC8A39E56F:120>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect113r2) ->
+ {
+ {characteristic_two_field, 113, {tpbasis,9}},
+ {<<16#689918DBEC7E5A0DD6DFC0AA55C7:112>>, %% A
+ <<16#95E9A9EC9B297BD4BF36E059184F:112>>, %% B
+ <<16#10C0FB15760860DEF1EEF4D696E676875615175D:160>>}, %% Seed
+ <<16#04:8,
+ 16#01A57A6A7B26CA5EF52FCDB8164797:120, %% X(p0)
+ 16#00B3ADC94ED1FE674C06E695BABA1D:120>>, %% Y(p0)
+ <<16#010000000000000108789B2496AF93:120>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect131r1) ->
+ {
+ {characteristic_two_field, 131, {ppbasis,2,3,8}},
+ {<<16#07A11B09A76B562144418FF3FF8C2570B8:136>>, %% A
+ <<16#0217C05610884B63B9C6C7291678F9D341:136>>, %% B
+ <<16#4D696E676875615175985BD3ADBADA21B43A97E2:160>>}, %% Seed
+ <<16#04:8,
+ 16#0081BAF91FDF9833C40F9C181343638399:136, %% X(p0)
+ 16#078C6E7EA38C001F73C8134B1B4EF9E150:136>>, %% Y(p0)
+ <<16#0400000000000000023123953A9464B54D:136>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect131r2) ->
+ {
+ {characteristic_two_field, 131, {ppbasis,2,3,8}},
+ {<<16#03E5A88919D7CAFCBF415F07C2176573B2:136>>, %% A
+ <<16#04B8266A46C55657AC734CE38F018F2192:136>>, %% B
+ <<16#985BD3ADBAD4D696E676875615175A21B43A97E3:160>>}, %% Seed
+ <<16#04:8,
+ 16#0356DCD8F2F95031AD652D23951BB366A8:136, %% X(p0)
+ 16#0648F06D867940A5366D9E265DE9EB240F:136>>, %% Y(p0)
+ <<16#0400000000000000016954A233049BA98F:136>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect163k1) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,3,6,7}},
+ {<<16#01:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#02FE13C0537BBC11ACAA07D793DE4E6D5E5C94EEE8:168, %% X(p0)
+ 16#0289070FB05D38FF58321F2E800536D538CCDAA3D9:168>>, %% Y(p0)
+ <<16#04000000000000000000020108A2E0CC0D99F8A5EF:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect163r1) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,3,6,7}},
+ {<<16#07B6882CAAEFA84F9554FF8428BD88E246D2782AE2:168>>, %% A
+ <<16#0713612DCDDCB40AAB946BDA29CA91F73AF958AFD9:168>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0369979697AB43897789566789567F787A7876A654:168, %% X(p0)
+ 16#00435EDB42EFAFB2989D51FEFCE3C80988F41FF883:168>>, %% Y(p0)
+ <<16#03FFFFFFFFFFFFFFFFFFFF48AAB689C29CA710279B:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect163r2) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,3,6,7}},
+ {<<16#01:8>>, %% A
+ <<16#020A601907B8C953CA1481EB10512F78744A3205FD:168>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#03F0EBA16286A2D57EA0991168D4994637E8343E36:168, %% X(p0)
+ 16#00D51FBC6C71A0094FA2CDD545B11C5C0C797324F1:168>>, %% Y(p0)
+ <<16#040000000000000000000292FE77E70C12A4234C33:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect193r1) ->
+ {
+ {characteristic_two_field, 193, {tpbasis,15}},
+ {<<16#17858FEB7A98975169E171F77B4087DE098AC8A911DF7B01:192>>, %% A
+ <<16#FDFB49BFE6C3A89FACADAA7A1E5BBC7CC1C2E5D831478814:192>>, %% B
+ <<16#103FAEC74D696E676875615175777FC5B191EF30:160>>}, %% Seed
+ <<16#04:8,
+ 16#01F481BC5F0FF84A74AD6CDF6FDEF4BF6179625372D8C0C5E1:200, %% X(p0)
+ 16#0025E399F2903712CCF3EA9E3A1AD17FB0B3201B6AF7CE1B05:200>>, %% Y(p0)
+ <<16#01000000000000000000000000C7F34A778F443ACC920EBA49:200>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect193r2) ->
+ {
+ {characteristic_two_field, 193, {tpbasis,15}},
+ {<<16#0163F35A5137C2CE3EA6ED8667190B0BC43ECD69977702709B:200>>, %% A
+ <<16#C9BB9E8927D4D64C377E2AB2856A5B16E3EFB7F61D4316AE:192>>, %% B
+ <<16#10B7B4D696E676875615175137C8A16FD0DA2211:160>>}, %% Seed
+ <<16#04:8,
+ 16#00D9B67D192E0367C803F39E1A7E82CA14A651350AAE617E8F:200, %% X(p0)
+ 16#01CE94335607C304AC29E7DEFBD9CA01F596F927224CDECF6C:200>>, %% Y(p0)
+ <<16#010000000000000000000000015AAB561B005413CCD4EE99D5:200>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect233k1) ->
+ {
+ {characteristic_two_field, 233, {tpbasis,74}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#017232BA853A7E731AF129F22FF4149563A419C26BF50A4C9D6EEFAD6126:240, %% X(p0)
+ 16#01DB537DECE819B7F70F555A67C427A8CD9BF18AEB9B56E0C11056FAE6A3:240>>, %% Y(p0)
+ <<16#8000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF:232>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(sect233r1) ->
+ {
+ {characteristic_two_field, 233, {tpbasis,74}},
+ {<<16#01:8>>, %% A
+ <<16#66647EDE6C332C7F8C0923BB58213B333B20E9CE4281FE115F7D8F90AD:232>>, %% B
+ <<16#74D59FF07F6B413D0EA14B344B20A2DB049B50C3:160>>}, %% Seed
+ <<16#04:8,
+ 16#00FAC9DFCBAC8313BB2139F1BB755FEF65BC391F8B36F8F8EB7371FD558B:240, %% X(p0)
+ 16#01006A08A41903350678E58528BEBF8A0BEFF867A7CA36716F7E01F81052:240>>, %% Y(p0)
+ <<16#01000000000000000000000000000013E974E72F8A6922031D2603CFE0D7:240>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect239k1) ->
+ {
+ {characteristic_two_field, 239, {tpbasis,158}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#29A0B6A887A983E9730988A68727A8B2D126C44CC2CC7B2A6555193035DC:240, %% X(p0)
+ 16#76310804F12E549BDB011C103089E73510ACB275FC312A5DC6B76553F0CA:240>>, %% Y(p0)
+ <<16#2000000000000000000000000000005A79FEC67CB6E91F1C1DA800E478A5:240>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(sect283k1) ->
+ {
+ {characteristic_two_field, 283, {ppbasis,5,7,12}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC24:256, %% X(p0)
+ 16#58492836:32,
+ 16#01CCDA380F1C9E318D90F95D07E5426FE87E45C0E8184698E45962364E341161:256, %% Y(p0)
+ 16#77DD2259:32>>,
+ <<16#01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE9AE2ED07577265DFF7F94451E06:256, %% Order
+ 16#1E163C61:32>>,
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(sect283r1) ->
+ {
+ {characteristic_two_field, 283, {ppbasis,5,7,12}},
+ {<<16#01:8>>, %% A
+ <<16#027B680AC8B8596DA5A4AF8A19A0303FCA97FD7645309FA2A581485AF6263E31:256, %% B
+ 16#3B79A2F5:32>>,
+ <<16#77E2B07370EB0F832A6DD5B62DFC88CD06BB84BE:160>>}, %% Seed
+ <<16#04:8,
+ 16#05F939258DB7DD90E1934F8C70B0DFEC2EED25B8557EAC9C80E2E198F8CDBECD:256, %% X(p0)
+ 16#86B12053:32,
+ 16#03676854FE24141CB98FE6D4B20D02B4516FF702350EDDB0826779C813F0DF45:256, %% Y(p0)
+ 16#BE8112F4:32>>,
+ <<16#03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEF90399660FC938A90165B042A7C:256, %% Order
+ 16#EFADB307:32>>,
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect409k1) ->
+ {
+ {characteristic_two_field, 409, {tpbasis,87}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0060F05F658F49C1AD3AB1890F7184210EFD0987E307C84C27ACCFB8F9F67CC2:256, %% X(p0)
+ 16#C460189EB5AAAA62EE222EB1B35540CFE9023746:160,
+ 16#01E369050B7C4E42ACBA1DACBF04299C3460782F918EA427E6325165E9EA10E3:256, %% Y(p0)
+ 16#DA5F6C42E9C55215AA9CA27A5863EC48D8E0286B:160>>,
+ <<16#7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE5F83B2D4EA20:256, %% Order
+ 16#400EC4557D5ED3E3E7CA5B4B5C83B8E01E5FCF:152>>,
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(sect409r1) ->
+ {
+ {characteristic_two_field, 409, {tpbasis,87}},
+ {<<16#01:8>>, %% A
+ <<16#21A5C2C8EE9FEB5C4B9A753B7B476B7FD6422EF1F3DD674761FA99D6AC27C8A9:256, %% B
+ 16#A197B272822F6CD57A55AA4F50AE317B13545F:152>>,
+ <<16#4099B5A457F9D69F79213D094C4BCD4D4262210B:160>>}, %% Seed
+ <<16#04:8,
+ 16#015D4860D088DDB3496B0C6064756260441CDE4AF1771D4DB01FFE5B34E59703:256, %% X(p0)
+ 16#DC255A868A1180515603AEAB60794E54BB7996A7:160,
+ 16#0061B1CFAB6BE5F32BBFA78324ED106A7636B9C5A7BD198D0158AA4F5488D08F:256, %% Y(p0)
+ 16#38514F1FDF4B4F40D2181B3681C364BA0273C706:160>>,
+ <<16#010000000000000000000000000000000000000000000000000001E2AAD6A612:256, %% Order
+ 16#F33307BE5FA47C3C9E052F838164CD37D9A21173:160>>,
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(sect571k1) ->
+ {
+ {characteristic_two_field, 571, {ppbasis,2,5,10}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#026EB7A859923FBC82189631F8103FE4AC9CA2970012D5D46024804801841CA4:256, %% X(p0)
+ 16#4370958493B205E647DA304DB4CEB08CBBD1BA39494776FB988B47174DCA88C7:256,
+ 16#E2945283A01C8972:64,
+ 16#0349DC807F4FBF374F4AEADE3BCA95314DD58CEC9F307A54FFC61EFC006D8A2C:256, %% Y(p0)
+ 16#9D4979C0AC44AEA74FBEBBB9F772AEDCB620B01A7BA7AF1B320430C8591984F6:256,
+ 16#01CD4C143EF1C7A3:64>>,
+ <<16#0200000000000000000000000000000000000000000000000000000000000000:256, %% Order
+ 16#00000000131850E1F19A63E4B391A8DB917F4138B630D84BE5D639381E91DEB4:256,
+ 16#5CFE778F637C1001:64>>,
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(sect571r1) ->
+ {
+ {characteristic_two_field, 571, {ppbasis,2,5,10}},
+ {<<16#01:8>>, %% A
+ <<16#02F40E7E2221F295DE297117B7F3D62F5C6A97FFCB8CEFF1CD6BA8CE4A9A18AD:256, %% B
+ 16#84FFABBD8EFA59332BE7AD6756A66E294AFD185A78FF12AA520E4DE739BACA0C:256,
+ 16#7FFEFF7F2955727A:64>>,
+ <<16#2AA058F73A0E33AB486B0F610410C53A7F132310:160>>}, %% Seed
+ <<16#04:8,
+ 16#0303001D34B856296C16C0D40D3CD7750A93D1D2955FA80AA5F40FC8DB7B2ABD:256, %% X(p0)
+ 16#BDE53950F4C0D293CDD711A35B67FB1499AE60038614F1394ABFA3B4C850D927:256,
+ 16#E1E7769C8EEC2D19:64,
+ 16#037BF27342DA639B6DCCFFFEB73D69D78C6C27A6009CBBCA1980F8533921E8A6:256, %% Y(p0)
+ 16#84423E43BAB08A576291AF8F461BB2A8B3531D2F0485C19B16E2F1516E23DD3C:256,
+ 16#1A4827AF1B8AC15B:64>>,
+ <<16#03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:256, %% Order
+ 16#FFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E:256,
+ 16#8382E9BB2FE84E47:64>>,
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(c2pnb163v1) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,1,2,8}},
+ {<<16#072546B5435234A422E0789675F432C89435DE5242:168>>, %% A
+ <<16#C9517D06D5240D3CFF38C74B20B6CD4D6F9DD4D9:160>>, %% B
+ <<16#D2C0FB15760860DEF1EEF4D696E6768756151754:160>>}, %% Seed
+ <<16#04:8,
+ 16#07AF69989546103D79329FCC3D74880F33BBE803CB:168, %% X(p0)
+ 16#01EC23211B5966ADEA1D3F87F7EA5848AEF0B7CA9F:168>>, %% Y(p0)
+ <<16#0400000000000000000001E60FC8821CC74DAEAFC1:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(c2pnb163v2) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,1,2,8}},
+ {<<16#0108B39E77C4B108BED981ED0E890E117C511CF072:168>>, %% A
+ <<16#0667ACEB38AF4E488C407433FFAE4F1C811638DF20:168>>, %% B
+ <<16#53814C050D44D696E67687561517580CA4E29FFD:160>>}, %% Seed
+ <<16#04:8,
+ 16#0024266E4EB5106D0A964D92C4860E2671DB9B6CC5:168, %% X(p0)
+ 16#079F684DDF6684C5CD258B3890021B2386DFD19FC5:168>>, %% Y(p0)
+ <<16#03FFFFFFFFFFFFFFFFFFFDF64DE1151ADBB78F10A7:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(c2pnb163v3) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,1,2,8}},
+ {<<16#07A526C63D3E25A256A007699F5447E32AE456B50E:168>>, %% A
+ <<16#03F7061798EB99E238FD6F1BF95B48FEEB4854252B:168>>, %% B
+ <<16#50CBF1D95CA94D696E676875615175F16A36A3B8:160>>}, %% Seed
+ <<16#04:8,
+ 16#02F9F87B7C574D0BDECF8A22E6524775F98CDEBDCB:168, %% X(p0)
+ 16#05B935590C155E17EA48EB3FF3718B893DF59A05D0:168>>, %% Y(p0)
+ <<16#03FFFFFFFFFFFFFFFFFFFE1AEE140F110AFF961309:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(c2pnb176v1) ->
+ {
+ {characteristic_two_field, 176, {ppbasis,1,2,43}},
+ {<<16#E4E6DB2995065C407D9D39B8D0967B96704BA8E9C90B:176>>, %% A
+ <<16#5DDA470ABE6414DE8EC133AE28E9BBD7FCEC0AE0FFF2:176>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#8D16C2866798B600F9F08BB4A8E860F3298CE04A5798:176, %% X(p0)
+ 16#6FA4539C2DADDDD6BAB5167D61B436E1D92BB16A562C:176>>, %% Y(p0)
+ <<16#010092537397ECA4F6145799D62B0A19CE06FE26AD:168>>, %% Order
+ <<16#FF6E:16>> %% CoFactor
+ };
+
+curve(c2tnb191v1) ->
+ {
+ {characteristic_two_field, 191, {tpbasis,9}},
+ {<<16#2866537B676752636A68F56554E12640276B649EF7526267:192>>, %% A
+ <<16#2E45EF571F00786F67B0081B9495A3D95462F5DE0AA185EC:192>>, %% B
+ <<16#4E13CA542744D696E67687561517552F279A8C84:160>>}, %% Seed
+ <<16#04:8,
+ 16#36B3DAF8A23206F9C4F299D7B21A9C369137F2C84AE1AA0D:192, %% X(p0)
+ 16#765BE73433B3F95E332932E70EA245CA2418EA0EF98018FB:192>>, %% Y(p0)
+ <<16#40000000000000000000000004A20E90C39067C893BBB9A5:192>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(c2tnb191v2) ->
+ {
+ {characteristic_two_field, 191, {tpbasis,9}},
+ {<<16#401028774D7777C7B7666D1366EA432071274F89FF01E718:192>>, %% A
+ <<16#0620048D28BCBD03B6249C99182B7C8CD19700C362C46A01:192>>, %% B
+ <<16#0871EF2FEF24D696E6768756151758BEE0D95C15:160>>}, %% Seed
+ <<16#04:8,
+ 16#3809B2B7CC1B28CC5A87926AAD83FD28789E81E2C9E3BF10:192, %% X(p0)
+ 16#17434386626D14F3DBF01760D9213A3E1CF37AEC437D668A:192>>, %% Y(p0)
+ <<16#20000000000000000000000050508CB89F652824E06B8173:192>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(c2tnb191v3) ->
+ {
+ {characteristic_two_field, 191, {tpbasis,9}},
+ {<<16#6C01074756099122221056911C77D77E77A777E7E7E77FCB:192>>, %% A
+ <<16#71FE1AF926CF847989EFEF8DB459F66394D90F32AD3F15E8:192>>, %% B
+ <<16#E053512DC684D696E676875615175067AE786D1F:160>>}, %% Seed
+ <<16#04:8,
+ 16#375D4CE24FDE434489DE8746E71786015009E66E38A926DD:192, %% X(p0)
+ 16#545A39176196575D985999366E6AD34CE0A77CD7127B06BE:192>>, %% Y(p0)
+ <<16#155555555555555555555555610C0B196812BFB6288A3EA3:192>>, %% Order
+ <<16#06:8>> %% CoFactor
+ };
+
+curve(c2pnb208w1) ->
+ {
+ {characteristic_two_field, 208, {ppbasis,1,2,83}},
+ {<<16#00:8>>, %% A
+ <<16#C8619ED45A62E6212E1160349E2BFA844439FAFC2A3FD1638F9E:208>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#89FDFBE4ABE193DF9559ECF07AC0CE78554E2784EB8C1ED1A57A:208, %% X(p0)
+ 16#0F55B51A06E78E9AC38A035FF520D8B01781BEB1A6BB08617DE3:208>>, %% Y(p0)
+ <<16#0101BAF95C9723C57B6C21DA2EFF2D5ED588BDD5717E212F9D:200>>, %% Order
+ <<16#FE48:16>> %% CoFactor
+ };
+
+curve(c2tnb239v1) ->
+ {
+ {characteristic_two_field, 239, {tpbasis,36}},
+ {<<16#32010857077C5431123A46B808906756F543423E8D27877578125778AC76:240>>, %% A
+ <<16#790408F2EEDAF392B012EDEFB3392F30F4327C0CA3F31FC383C422AA8C16:240>>, %% B
+ <<16#D34B9A4D696E676875615175CA71B920BFEFB05D:160>>}, %% Seed
+ <<16#04:8,
+ 16#57927098FA932E7C0A96D3FD5B706EF7E5F5C156E16B7E7C86038552E91D:240, %% X(p0)
+ 16#61D8EE5077C33FECF6F1A16B268DE469C3C7744EA9A971649FC7A9616305:240>>, %% Y(p0)
+ <<16#2000000000000000000000000000000F4D42FFE1492A4993F1CAD666E447:240>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(c2tnb239v2) ->
+ {
+ {characteristic_two_field, 239, {tpbasis,36}},
+ {<<16#4230017757A767FAE42398569B746325D45313AF0766266479B75654E65F:240>>, %% A
+ <<16#5037EA654196CFF0CD82B2C14A2FCF2E3FF8775285B545722F03EACDB74B:240>>, %% B
+ <<16#2AA6982FDFA4D696E676875615175D266727277D:160>>}, %% Seed
+ <<16#04:8,
+ 16#28F9D04E900069C8DC47A08534FE76D2B900B7D7EF31F5709F200C4CA205:240, %% X(p0)
+ 16#5667334C45AFF3B5A03BAD9DD75E2C71A99362567D5453F7FA6E227EC833:240>>, %% Y(p0)
+ <<16#1555555555555555555555555555553C6F2885259C31E3FCDF154624522D:240>>, %% Order
+ <<16#06:8>> %% CoFactor
+ };
+
+curve(c2tnb239v3) ->
+ {
+ {characteristic_two_field, 239, {tpbasis,36}},
+ {<<16#01238774666A67766D6676F778E676B66999176666E687666D8766C66A9F:240>>, %% A
+ <<16#6A941977BA9F6A435199ACFC51067ED587F519C5ECB541B8E44111DE1D40:240>>, %% B
+ <<16#9E076F4D696E676875615175E11E9FDD77F92041:160>>}, %% Seed
+ <<16#04:8,
+ 16#70F6E9D04D289C4E89913CE3530BFDE903977D42B146D539BF1BDE4E9C92:240, %% X(p0)
+ 16#2E5A0EAF6E5E1305B9004DCE5C0ED7FE59A35608F33837C816D80B79F461:240>>, %% Y(p0)
+ <<16#0CCCCCCCCCCCCCCCCCCCCCCCCCCCCCAC4912D2D9DF903EF9888B8A0E4CFF:240>>, %% Order
+ <<16#0A:8>> %% CoFactor
+ };
+
+curve(c2pnb272w1) ->
+ {
+ {characteristic_two_field, 272, {ppbasis,1,3,56}},
+ {<<16#91A091F03B5FBA4AB2CCF49C4EDD220FB028712D42BE752B2C40094DBACDB586:256, %% A
+ 16#FB20:16>>,
+ <<16#7167EFC92BB2E3CE7C8AAAFF34E12A9C557003D7C73A6FAF003F99F6CC8482E5:256, %% B
+ 16#40F7:16>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#6108BABB2CEEBCF787058A056CBE0CFE622D7723A289E08A07AE13EF0D10D171:256, %% X(p0)
+ 16#DD8D:16,
+ 16#10C7695716851EEF6BA7F6872E6142FBD241B830FF5EFCACECCAB05E02005DDE:256, %% Y(p0)
+ 16#9D23:16>>,
+ <<16#0100FAF51354E0E39E4892DF6E319C72C8161603FA45AA7B998A167B8F1E6295:256, %% Order
+ 16#21:8>>,
+ <<16#FF06:16>> %% CoFactor
+ };
+
+curve(c2pnb304w1) ->
+ {
+ {characteristic_two_field, 304, {ppbasis,1,2,11}},
+ {<<16#FD0D693149A118F651E6DCE6802085377E5F882D1B510B44160074C128807836:256, %% A
+ 16#5A0396C8E681:48>>,
+ <<16#BDDB97E555A50A908E43B01C798EA5DAA6788F1EA2794EFCF57166B8C1403960:256, %% B
+ 16#1E55827340BE:48>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#197B07845E9BE2D96ADB0F5F3C7F2CFFBD7A3EB8B6FEC35C7FD67F26DDF6285A:256, %% X(p0)
+ 16#644F740A2614:48,
+ 16#E19FBEB76E0DA171517ECF401B50289BF014103288527A9B416A105E80260B54:256, %% Y(p0)
+ 16#9FDC1B92C03B:48>>,
+ <<16#0101D556572AABAC800101D556572AABAC8001022D5C91DD173F8FB561DA6899:256, %% Order
+ 16#164443051D:40>>,
+ <<16#FE2E:16>> %% CoFactor
+ };
+
+curve(c2tnb359v1) ->
+ {
+ {characteristic_two_field, 359, {tpbasis,68}},
+ {<<16#5667676A654B20754F356EA92017D946567C46675556F19556A04616B567D223:256, %% A
+ 16#A5E05656FB549016A96656A557:104>>,
+ <<16#2472E2D0197C49363F1FE7F5B6DB075D52B6947D135D8CA445805D39BC345626:256, %% B
+ 16#089687742B6329E70680231988:104>>,
+ <<16#2B354920B724D696E67687561517585BA1332DC6:160>>}, %% Seed
+ <<16#04:8,
+ 16#3C258EF3047767E7EDE0F1FDAA79DAEE3841366A132E163ACED4ED2401DF9C6B:256, %% X(p0)
+ 16#DCDE98E8E707C07A2239B1B097:104,
+ 16#53D7E08529547048121E9C95F3791DD804963948F34FAE7BF44EA82365DC7868:256, %% Y(p0)
+ 16#FE57E4AE2DE211305A407104BD:104>>,
+ <<16#01AF286BCA1AF286BCA1AF286BCA1AF286BCA1AF286BC9FB8F6B85C556892C20:256, %% Order
+ 16#A7EB964FE7719E74F490758D3B:104>>,
+ <<16#4C:8>> %% CoFactor
+ };
+
+curve(c2pnb368w1) ->
+ {
+ {characteristic_two_field, 368, {ppbasis,1,2,85}},
+ {<<16#E0D2EE25095206F5E2A4F9ED229F1F256E79A0E2B455970D8D0D865BD94778C5:256, %% A
+ 16#76D62F0AB7519CCD2A1A906AE30D:112>>,
+ <<16#FC1217D4320A90452C760A58EDCD30C8DD069B3C34453837A34ED50CB54917E1:256, %% B
+ 16#C2112D84D164F444F8F74786046A:112>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#1085E2755381DCCCE3C1557AFA10C2F0C0C2825646C5B34A394CBCFA8BC16B22:256, %% X(p0)
+ 16#E7E789E927BE216F02E1FB136A5F:112,
+ 16#7B3EB1BDDCBA62D5D8B2059B525797FC73822C59059C623A45FF3843CEE8F87C:256, %% Y(p0)
+ 16#D1855ADAA81E2A0750B80FDA2310:112>>,
+ <<16#010090512DA9AF72B08349D98A5DD4C7B0532ECA51CE03E2D10F3B7AC579BD87:256, %% Order
+ 16#E909AE40A6F131E9CFCE5BD967:104>>,
+ <<16#FF70:16>> %% CoFactor
+ };
+
+curve(c2tnb431r1) ->
+ {
+ {characteristic_two_field, 431, {tpbasis,120}},
+ {<<16#1A827EF00DD6FC0E234CAF046C6A5D8A85395B236CC4AD2CF32A0CADBDC9DDF6:256, %% A
+ 16#20B0EB9906D0957F6C6FEACD615468DF104DE296CD8F:176>>,
+ <<16#10D9B4A3D9047D8B154359ABFB1B7F5485B04CEB868237DDC9DEDA982A679A5A:256, %% B
+ 16#919B626D4E50A8DD731B107A9962381FB5D807BF2618:176>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#120FC05D3C67A99DE161D2F4092622FECA701BE4F50F4758714E8A87BBF2A658:256, %% X(p0)
+ 16#EF8C21E7C5EFE965361F6C2999C0C247B0DBD70CE6B7:176,
+ 16#20D0AF8903A96F8D5FA2C255745D3C451B302C9346D9B7E485E7BCE41F6B591F:256, %% Y(p0)
+ 16#3E8F6ADDCBB0BC4C2F947A7DE1A89B625D6A598B3760:176>>,
+ <<16#0340340340340340340340340340340340340340340340340340340323C313FA:256, %% Order
+ 16#B50589703B5EC68D3587FEC60D161CC149C1AD4A91:168>>,
+ <<16#2760:16>> %% CoFactor
+ };
+
+curve(wtls1) ->
+ {
+ {characteristic_two_field, 113, {tpbasis,9}},
+ {<<16#01:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#01667979A40BA497E5D5C270780617:120, %% X(p0)
+ 16#00F44B4AF1ECC2630E08785CEBCC15:120>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFDBF91AF6DEA73:112>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(wtls3) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,3,6,7}},
+ {<<16#01:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#02FE13C0537BBC11ACAA07D793DE4E6D5E5C94EEE8:168, %% X(p0)
+ 16#0289070FB05D38FF58321F2E800536D538CCDAA3D9:168>>, %% Y(p0)
+ <<16#04000000000000000000020108A2E0CC0D99F8A5EF:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(wtls4) ->
+ {
+ {characteristic_two_field, 113, {tpbasis,9}},
+ {<<16#3088250CA6E7C7FE649CE85820F7:112>>, %% A
+ <<16#E8BEE4D3E2260744188BE0E9C723:112>>, %% B
+ <<16#10E723AB14D696E6768756151756FEBF8FCB49A9:160>>}, %% Seed
+ <<16#04:8,
+ 16#009D73616F35F4AB1407D73562C10F:120, %% X(p0)
+ 16#00A52830277958EE84D1315ED31886:120>>, %% Y(p0)
+ <<16#0100000000000000D9CCEC8A39E56F:120>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(wtls5) ->
+ {
+ {characteristic_two_field, 163, {ppbasis,1,2,8}},
+ {<<16#072546B5435234A422E0789675F432C89435DE5242:168>>, %% A
+ <<16#C9517D06D5240D3CFF38C74B20B6CD4D6F9DD4D9:160>>, %% B
+ <<16#D2C0FB15760860DEF1EEF4D696E6768756151754:160>>}, %% Seed
+ <<16#04:8,
+ 16#07AF69989546103D79329FCC3D74880F33BBE803CB:168, %% X(p0)
+ 16#01EC23211B5966ADEA1D3F87F7EA5848AEF0B7CA9F:168>>, %% Y(p0)
+ <<16#0400000000000000000001E60FC8821CC74DAEAFC1:168>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(wtls6) ->
+ {
+ {prime_field, <<16#DB7C2ABF62E35E668076BEAD208B:112>>}, %% Prime
+ {<<16#DB7C2ABF62E35E668076BEAD2088:112>>, %% A
+ <<16#659EF8BA043916EEDE8911702B22:112>>, %% B
+ <<16#00F50B028E4D696E676875615175290472783FB1:160>>}, %% Seed
+ <<16#04:8,
+ 16#09487239995A5EE76B55F9C2F098:112, %% X(p0)
+ 16#A89CE5AF8724C0A23E0E0FF77500:112>>, %% Y(p0)
+ <<16#DB7C2ABF62E35E7628DFAC6561C5:112>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(wtls7) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73:160>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70:160>>, %% A
+ <<16#B4E134D3FB59EB8BAB57274904664D5AF50388BA:160>>, %% B
+ <<16#B99B99B099B323E02709A4D696E6768756151751:160>>}, %% Seed
+ <<16#04:8,
+ 16#52DCB034293A117E1F4FF11B30F7199D3144CE6D:160, %% X(p0)
+ 16#FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E:160>>, %% Y(p0)
+ <<16#0100000000000000000000351EE786A818F3A1A16B:168>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(wtls8) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFDE7:112>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#03:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0000000000000000000000000001:112, %% X(p0)
+ 16#0000000000000000000000000002:112>>, %% Y(p0)
+ <<16#0100000000000001ECEA551AD837E9:120>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(wtls9) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC808F:160>>}, %% Prime
+ {<<16#00:8>>, %% A
+ <<16#03:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0000000000000000000000000000000000000001:160, %% X(p0)
+ 16#0000000000000000000000000000000000000002:160>>, %% Y(p0)
+ <<16#0100000000000000000001CDC98AE0E2DE574ABF33:168>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(wtls10) ->
+ {
+ {characteristic_two_field, 233, {tpbasis,74}},
+ {<<16#00:8>>, %% A
+ <<16#01:8>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#017232BA853A7E731AF129F22FF4149563A419C26BF50A4C9D6EEFAD6126:240, %% X(p0)
+ 16#01DB537DECE819B7F70F555A67C427A8CD9BF18AEB9B56E0C11056FAE6A3:240>>, %% Y(p0)
+ <<16#8000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF:232>>, %% Order
+ <<16#04:8>> %% CoFactor
+ };
+
+curve(wtls11) ->
+ {
+ {characteristic_two_field, 233, {tpbasis,74}},
+ {<<16#01:8>>, %% A
+ <<16#66647EDE6C332C7F8C0923BB58213B333B20E9CE4281FE115F7D8F90AD:232>>, %% B
+ <<16#74D59FF07F6B413D0EA14B344B20A2DB049B50C3:160>>}, %% Seed
+ <<16#04:8,
+ 16#00FAC9DFCBAC8313BB2139F1BB755FEF65BC391F8B36F8F8EB7371FD558B:240, %% X(p0)
+ 16#01006A08A41903350678E58528BEBF8A0BEFF867A7CA36716F7E01F81052:240>>, %% Y(p0)
+ <<16#01000000000000000000000000000013E974E72F8A6922031D2603CFE0D7:240>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(wtls12) ->
+ {
+ {prime_field, <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001:224>>}, %% Prime
+ {<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE:224>>, %% A
+ <<16#B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4:224>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21:224, %% X(p0)
+ 16#BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34:224>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D:224>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(ipsec3) ->
+ {
+ {characteristic_two_field, 155, {tpbasis,62}},
+ {<<16#00:8>>, %% A
+ <<16#07338F:24>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#000000000000000000000000000000000000007B:160, %% X(p0)
+ 16#00000000000000000000000000000000000001C8:160>>, %% Y(p0)
+ <<16#02AAAAAAAAAAAAAAAAAAC7F3C7881BD0868FA86C:160>>, %% Order
+ <<16#03:8>> %% CoFactor
+ };
+
+curve(ipsec4) ->
+ {
+ {characteristic_two_field, 185, {tpbasis,69}},
+ {<<16#00:8>>, %% A
+ <<16#1EE9:16>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#000000000000000000000000000000000000000000000018:192, %% X(p0)
+ 16#00000000000000000000000000000000000000000000000D:192>>, %% Y(p0)
+ <<16#FFFFFFFFFFFFFFFFFFFFFFEDF97C44DB9F2420BAFCA75E:184>>, %% Order
+ <<16#02:8>> %% CoFactor
+ };
+
+curve(brainpoolP160r1) ->
+ {
+ {prime_field, <<16#E95E4A5F737059DC60DFC7AD95B3D8139515620F:160>>}, %% Prime
+ {<<16#340E7BE2A280EB74E2BE61BADA745D97E8F7C300:160>>, %% A
+ <<16#1E589A8595423412134FAA2DBDEC95C8D8675E58:160>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#BED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3:160, %% X(p0)
+ 16#1667CB477A1A8EC338F94741669C976316DA6321:160>>, %% Y(p0)
+ <<16#E95E4A5F737059DC60DF5991D45029409E60FC09:160>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP160t1) ->
+ {
+ {prime_field, <<16#E95E4A5F737059DC60DFC7AD95B3D8139515620F:160>>}, %% Prime
+ {<<16#E95E4A5F737059DC60DFC7AD95B3D8139515620C:160>>, %% A
+ <<16#7A556B6DAE535B7B51ED2C4D7DAA7A0B5C55F380:160>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#B199B13B9B34EFC1397E64BAEB05ACC265FF2378:160, %% X(p0)
+ 16#ADD6718B7C7C1961F0991B842443772152C9E0AD:160>>, %% Y(p0)
+ <<16#E95E4A5F737059DC60DF5991D45029409E60FC09:160>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP192r1) ->
+ {
+ {prime_field, <<16#C302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297:192>>}, %% Prime
+ {<<16#6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF:192>>, %% A
+ <<16#469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9:192>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#C0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6:192, %% X(p0)
+ 16#14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F:192>>, %% Y(p0)
+ <<16#C302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP192t1) ->
+ {
+ {prime_field, <<16#C302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297:192>>}, %% Prime
+ {<<16#C302F41D932A36CDA7A3463093D18DB78FCE476DE1A86294:192>>, %% A
+ <<16#13D56FFAEC78681E68F9DEB43B35BEC2FB68542E27897B79:192>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#3AE9E58C82F63C30282E1FE7BBF43FA72C446AF6F4618129:192, %% X(p0)
+ 16#097E2C5667C2223A902AB5CA449D0084B7E5B3DE7CCC01C9:192>>, %% Y(p0)
+ <<16#C302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1:192>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP224r1) ->
+ {
+ {prime_field, <<16#D7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF:224>>}, %% Prime
+ {<<16#68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43:224>>, %% A
+ <<16#2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B:224>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D:224, %% X(p0)
+ 16#58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD:224>>, %% Y(p0)
+ <<16#D7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F:224>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP224t1) ->
+ {
+ {prime_field, <<16#D7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF:224>>}, %% Prime
+ {<<16#D7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FC:224>>, %% A
+ <<16#4B337D934104CD7BEF271BF60CED1ED20DA14C08B3BB64F18A60888D:224>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#6AB1E344CE25FF3896424E7FFE14762ECB49F8928AC0C76029B4D580:224, %% X(p0)
+ 16#0374E9F5143E568CD23F3F4D7C0D4B1E41C8CC0D1C6ABD5F1A46DB4C:224>>, %% Y(p0)
+ <<16#D7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F:224>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP256r1) ->
+ {
+ {prime_field, <<16#A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377:256>>}, %% Prime
+ {<<16#7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9:256>>, %% A
+ <<16#26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6:256>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262:256, %% X(p0)
+ 16#547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997:256>>, %% Y(p0)
+ <<16#A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7:256>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP256t1) ->
+ {
+ {prime_field, <<16#A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377:256>>}, %% Prime
+ {<<16#A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5374:256>>, %% A
+ <<16#662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04:256>>, %% B
+ none}, %% Seed
+ <<16#04:8,
+ 16#A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4:256, %% X(p0)
+ 16#2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE:256>>, %% Y(p0)
+ <<16#A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7:256>>, %% Order
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP320r1) ->
+ {
+ {prime_field, <<16#D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28:256, %% Prime
+ 16#FCD412B1F1B32E27:64>>},
+ {<<16#3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F4:256, %% A
+ 16#92F375A97D860EB4:64>>,
+ <<16#520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD88453981:256, %% B
+ 16#6F5EB4AC8FB1F1A6:64>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C7:256, %% X(p0)
+ 16#10AF8D0D39E20611:64,
+ 16#14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7:256, %% Y(p0)
+ 16#D35245D1692E8EE1:64>>,
+ <<16#D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E9:256, %% Order
+ 16#8691555B44C59311:64>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP320t1) ->
+ {
+ {prime_field, <<16#D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28:256, %% Prime
+ 16#FCD412B1F1B32E27:64>>},
+ {<<16#D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28:256, %% A
+ 16#FCD412B1F1B32E24:64>>,
+ <<16#A7F561E038EB1ED560B3D147DB782013064C19F27ED27C6780AAF77FB8A547CE:256, %% B
+ 16#B5B4FEF422340353:64>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#925BE9FB01AFC6FB4D3E7D4990010F813408AB106C4F09CB7EE07868CC136FFF:256, %% X(p0)
+ 16#3357F624A21BED52:64,
+ 16#63BA3A7A27483EBF6671DBEF7ABB30EBEE084E58A0B077AD42A5A0989D1EE71B:256, %% Y(p0)
+ 16#1B9BC0455FB0D2C3:64>>,
+ <<16#D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E9:256, %% Order
+ 16#8691555B44C59311:64>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP384r1) ->
+ {
+ {prime_field, <<16#8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123:256, %% Prime
+ 16#ACD3A729901D1A71874700133107EC53:128>>},
+ {<<16#7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F:256, %% A
+ 16#8AA5814A503AD4EB04A8C7DD22CE2826:128>>,
+ <<16#04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D5:256, %% B
+ 16#7CB4390295DBC9943AB78696FA504C11:128>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8:256, %% X(p0)
+ 16#E826E03436D646AAEF87B2E247D4AF1E:128,
+ 16#8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF9912928:256, %% Y(p0)
+ 16#0E4646217791811142820341263C5315:128>>,
+ <<16#8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7:256, %% Order
+ 16#CF3AB6AF6B7FC3103B883202E9046565:128>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP384t1) ->
+ {
+ {prime_field, <<16#8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123:256, %% Prime
+ 16#ACD3A729901D1A71874700133107EC53:128>>},
+ {<<16#8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123:256, %% A
+ 16#ACD3A729901D1A71874700133107EC50:128>>,
+ <<16#7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D:256, %% B
+ 16#2074AA263B88805CED70355A33B471EE:128>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFF:256, %% X(p0)
+ 16#C4FF191B946A5F54D8D0AA2F418808CC:128,
+ 16#25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE:256, %% Y(p0)
+ 16#469408584DC2B2912675BF5B9E582928:128>>,
+ <<16#8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7:256, %% Order
+ 16#CF3AB6AF6B7FC3103B883202E9046565:128>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP512r1) ->
+ {
+ {prime_field, <<16#AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330871:256, %% Prime
+ 16#7D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3:256>>},
+ {<<16#7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC:256, %% A
+ 16#2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA:256>>,
+ <<16#3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A7:256, %% B
+ 16#2BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723:256>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098E:256, %% X(p0)
+ 16#FF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822:256,
+ 16#7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111:256, %% Y(p0)
+ 16#B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892:256>>,
+ <<16#AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870:256, %% Order
+ 16#553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069:256>>,
+ <<16#01:8>> %% CoFactor
+ };
+
+curve(brainpoolP512t1) ->
+ {
+ {prime_field, <<16#AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330871:256, %% Prime
+ 16#7D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3:256>>},
+ {<<16#AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330871:256, %% A
+ 16#7D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F0:256>>,
+ <<16#7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A6:256, %% B
+ 16#2BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E:256>>,
+ none}, %% Seed
+ <<16#04:8,
+ 16#640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D:256, %% X(p0)
+ 16#82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA:256,
+ 16#5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9:256, %% Y(p0)
+ 16#D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332:256>>,
+ <<16#AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870:256, %% Order
+ 16#553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069:256>>,
+ <<16#01:8>> %% CoFactor
+ }.
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 42e200fcf0..d1be7cea68 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -437,7 +437,7 @@ do_generate_compute({dh, P, G}) ->
SharedSecret = crypto:compute_key(dh, UserPub, HostPriv, [P, G]).
do_compute({ecdh = Type, Pub, Priv, Curve, SharedSecret}) ->
- Secret = crypto:bytes_to_integer(crypto:compute_key(Type, Pub, Priv, Curve)),
+ Secret = crypto:compute_key(Type, Pub, Priv, Curve),
case Secret of
SharedSecret ->
ok;
@@ -445,6 +445,9 @@ do_compute({ecdh = Type, Pub, Priv, Curve, SharedSecret}) ->
ct:fail({{crypto, compute_key, [Type, Pub, Priv, Curve]}, {expected, SharedSecret}, {got, Other}})
end.
+hexstr2point(X, Y) ->
+ <<4:8, (hexstr2bin(X))/binary, (hexstr2bin(Y))/binary>>.
+
hexstr2bin(S) ->
list_to_binary(hexstr2list(S)).
@@ -668,7 +671,7 @@ group_config(srp, Config) ->
GenerateCompute = [srp3(), srp6(), srp6a()],
[{generate_compute, GenerateCompute} | Config];
group_config(ecdh, Config) ->
- Compute = [ecdh()],
+ Compute = ecdh(),
[{compute, Compute} | Config];
group_config(dh, Config) ->
GenerateCompute = [dh()],
@@ -1498,9 +1501,89 @@ srp(ClientPrivate, Generator, Prime, Version, Verifier, ServerPublic, ServerPriv
{host, [Verifier, Prime, Version, Scrambler]},
SessionKey}.
ecdh() ->
- {ecdh, 10053111454769593468622878414300213417816614162107065345116848162553478019161427871683337786549966,
- 1373339791687564785573162818422814591820885704654,
- secp160r1, 990333295438215762119481641129490894973766052278}.
+ %% http://csrc.nist.gov/groups/STM/cavp/
+ [{ecdh, hexstr2point("42ea6dd9969dd2a61fea1aac7f8e98edcc896c6e55857cc0", "dfbe5d7c61fac88b11811bde328e8a0d12bf01a9d204b523"),
+ hexstr2bin("f17d3fea367b74d340851ca4270dcb24c271f445bed9d527"),
+ secp192r1,
+ hexstr2bin("803d8ab2e5b6e6fca715737c3a82f7ce3c783124f6d51cd0")},
+ {ecdh, hexstr2point("deb5712fa027ac8d2f22c455ccb73a91e17b6512b5e030e7", "7e2690a02cc9b28708431a29fb54b87b1f0c14e011ac2125"),
+ hexstr2bin("56e853349d96fe4c442448dacb7cf92bb7a95dcf574a9bd5"),
+ secp192r1,
+ hexstr2bin("c208847568b98835d7312cef1f97f7aa298283152313c29d")},
+ {ecdh, hexstr2point("af33cd0629bc7e996320a3f40368f74de8704fa37b8fab69abaae280", "882092ccbba7930f419a8a4f9bb16978bbc3838729992559a6f2e2d7"),
+ hexstr2bin("8346a60fc6f293ca5a0d2af68ba71d1dd389e5e40837942df3e43cbd"),
+ secp224r1,
+ hexstr2bin("7d96f9a3bd3c05cf5cc37feb8b9d5209d5c2597464dec3e9983743e8")},
+ {ecdh, hexstr2point("13bfcd4f8e9442393cab8fb46b9f0566c226b22b37076976f0617a46", "eeb2427529b288c63c2f8963c1e473df2fca6caa90d52e2f8db56dd4"),
+ hexstr2bin("043cb216f4b72cdf7629d63720a54aee0c99eb32d74477dac0c2f73d"),
+ secp224r1,
+ hexstr2bin("ee93ce06b89ff72009e858c68eb708e7bc79ee0300f73bed69bbca09")},
+ {ecdh, hexstr2point("700c48f77f56584c5cc632ca65640db91b6bacce3a4df6b42ce7cc838833d287", "db71e509e3fd9b060ddb20ba5c51dcc5948d46fbf640dfe0441782cab85fa4ac"),
+ hexstr2bin("7d7dc5f71eb29ddaf80d6214632eeae03d9058af1fb6d22ed80badb62bc1a534"),
+ secp256r1,
+ hexstr2bin("46fc62106420ff012e54a434fbdd2d25ccc5852060561e68040dd7778997bd7b")},
+ {ecdh, hexstr2point("809f04289c64348c01515eb03d5ce7ac1a8cb9498f5caa50197e58d43a86a7ae", "b29d84e811197f25eba8f5194092cb6ff440e26d4421011372461f579271cda3"),
+ hexstr2bin("38f65d6dce47676044d58ce5139582d568f64bb16098d179dbab07741dd5caf5"),
+ secp256r1,
+ hexstr2bin("057d636096cb80b67a8c038c890e887d1adfa4195e9b3ce241c8a778c59cda67")},
+ {ecdh, hexstr2point("a7c76b970c3b5fe8b05d2838ae04ab47697b9eaf52e764592efda27fe7513272734466b400091adbf2d68c58e0c50066", "ac68f19f2e1cb879aed43a9969b91a0839c4c38a49749b661efedf243451915ed0905a32b060992b468c64766fc8437a"),
+ hexstr2bin("3cc3122a68f0d95027ad38c067916ba0eb8c38894d22e1b15618b6818a661774ad463b205da88cf699ab4d43c9cf98a1"),
+ secp384r1,
+ hexstr2bin("5f9d29dc5e31a163060356213669c8ce132e22f57c9a04f40ba7fcead493b457e5621e766c40a2e3d4d6a04b25e533f1")},
+ {ecdh, hexstr2point("30f43fcf2b6b00de53f624f1543090681839717d53c7c955d1d69efaf0349b7363acb447240101cbb3af6641ce4b88e0", "25e46c0c54f0162a77efcc27b6ea792002ae2ba82714299c860857a68153ab62e525ec0530d81b5aa15897981e858757"),
+ hexstr2bin("92860c21bde06165f8e900c687f8ef0a05d14f290b3f07d8b3a8cc6404366e5d5119cd6d03fb12dc58e89f13df9cd783"),
+ secp384r1,
+ hexstr2bin("a23742a2c267d7425fda94b93f93bbcc24791ac51cd8fd501a238d40812f4cbfc59aac9520d758cf789c76300c69d2ff")},
+ {ecdh, hexstr2point("00685a48e86c79f0f0875f7bc18d25eb5fc8c0b07e5da4f4370f3a9490340854334b1e1b87fa395464c60626124a4e70d0f785601d37c09870ebf176666877a2046d", "01ba52c56fc8776d9e8f5db4f0cc27636d0b741bbe05400697942e80b739884a83bde99e0f6716939e632bc8986fa18dccd443a348b6c3e522497955a4f3c302f676"),
+ hexstr2bin("017eecc07ab4b329068fba65e56a1f8890aa935e57134ae0ffcce802735151f4eac6564f6ee9974c5e6887a1fefee5743ae2241bfeb95d5ce31ddcb6f9edb4d6fc47"),
+ secp521r1,
+ hexstr2bin("005fc70477c3e63bc3954bd0df3ea0d1f41ee21746ed95fc5e1fdf90930d5e136672d72cc770742d1711c3c3a4c334a0ad9759436a4d3c5bf6e74b9578fac148c831")},
+ {ecdh, hexstr2point("01df277c152108349bc34d539ee0cf06b24f5d3500677b4445453ccc21409453aafb8a72a0be9ebe54d12270aa51b3ab7f316aa5e74a951c5e53f74cd95fc29aee7a", "013d52f33a9f3c14384d1587fa8abe7aed74bc33749ad9c570b471776422c7d4505d9b0a96b3bfac041e4c6a6990ae7f700e5b4a6640229112deafa0cd8bb0d089b0"),
+ hexstr2bin("00816f19c1fb10ef94d4a1d81c156ec3d1de08b66761f03f06ee4bb9dcebbbfe1eaa1ed49a6a990838d8ed318c14d74cc872f95d05d07ad50f621ceb620cd905cfb8"),
+ secp521r1,
+ hexstr2bin("000b3920ac830ade812c8f96805da2236e002acbbf13596a9ab254d44d0e91b6255ebf1229f366fb5a05c5884ef46032c26d42189273ca4efa4c3db6bd12a6853759")},
+
+ %% RFC-6954, Appendix A
+ {ecdh, hexstr2point("A9C21A569759DA95E0387041184261440327AFE33141CA04B82DC92E",
+ "98A0F75FBBF61D8E58AE5511B2BCDBE8E549B31E37069A2825F590C1"),
+ hexstr2bin("6060552303899E2140715816C45B57D9B42204FB6A5BF5BEAC10DB00"),
+ brainpoolP224r1,
+ hexstr2bin("1A4BFE705445120C8E3E026699054104510D119757B74D5FE2462C66")},
+ {ecdh, hexstr2point("034A56C550FF88056144E6DD56070F54B0135976B5BF77827313F36B",
+ "75165AD99347DC86CAAB1CBB579E198EAF88DC35F927B358AA683681"),
+ hexstr2bin("39F155483CEE191FBECFE9C81D8AB1A03CDA6790E7184ACE44BCA161"),
+ brainpoolP224r1,
+ hexstr2bin("1A4BFE705445120C8E3E026699054104510D119757B74D5FE2462C66")},
+ {ecdh, hexstr2point("44106E913F92BC02A1705D9953A8414DB95E1AAA49E81D9E85F929A8E3100BE5",
+ "8AB4846F11CACCB73CE49CBDD120F5A900A69FD32C272223F789EF10EB089BDC"),
+ hexstr2bin("55E40BC41E37E3E2AD25C3C6654511FFA8474A91A0032087593852D3E7D76BD3"),
+ brainpoolP256r1,
+ hexstr2bin("89AFC39D41D3B327814B80940B042590F96556EC91E6AE7939BCE31F3A18BF2B")},
+ {ecdh, hexstr2point("8D2D688C6CF93E1160AD04CC4429117DC2C41825E1E9FCA0ADDD34E6F1B39F7B",
+ "990C57520812BE512641E47034832106BC7D3E8DD0E4C7F1136D7006547CEC6A"),
+ hexstr2bin("81DB1EE100150FF2EA338D708271BE38300CB54241D79950F77B063039804F1D"),
+ brainpoolP256r1,
+ hexstr2bin("89AFC39D41D3B327814B80940B042590F96556EC91E6AE7939BCE31F3A18BF2B")},
+ {ecdh, hexstr2point("68B665DD91C195800650CDD363C625F4E742E8134667B767B1B476793588F885AB698C852D4A6E77A252D6380FCAF068",
+ "55BC91A39C9EC01DEE36017B7D673A931236D2F1F5C83942D049E3FA20607493E0D038FF2FD30C2AB67D15C85F7FAA59"),
+ hexstr2bin("032640BC6003C59260F7250C3DB58CE647F98E1260ACCE4ACDA3DD869F74E01F8BA5E0324309DB6A9831497ABAC96670"),
+ brainpoolP384r1,
+ hexstr2bin("0BD9D3A7EA0B3D519D09D8E48D0785FB744A6B355E6304BC51C229FBBCE239BBADF6403715C35D4FB2A5444F575D4F42")},
+ {ecdh, hexstr2point("4D44326F269A597A5B58BBA565DA5556ED7FD9A8A9EB76C25F46DB69D19DC8CE6AD18E404B15738B2086DF37E71D1EB4",
+ "62D692136DE56CBE93BF5FA3188EF58BC8A3A0EC6C1E151A21038A42E9185329B5B275903D192F8D4E1F32FE9CC78C48"),
+ hexstr2bin("1E20F5E048A5886F1F157C74E91BDE2B98C8B52D58E5003D57053FC4B0BD65D6F15EB5D1EE1610DF870795143627D042"),
+ brainpoolP384r1,
+ hexstr2bin("0BD9D3A7EA0B3D519D09D8E48D0785FB744A6B355E6304BC51C229FBBCE239BBADF6403715C35D4FB2A5444F575D4F42")},
+ {ecdh, hexstr2point("0A420517E406AAC0ACDCE90FCD71487718D3B953EFD7FBEC5F7F27E28C6149999397E91E029E06457DB2D3E640668B392C2A7E737A7F0BF04436D11640FD09FD",
+ "72E6882E8DB28AAD36237CD25D580DB23783961C8DC52DFA2EC138AD472A0FCEF3887CF62B623B2A87DE5C588301EA3E5FC269B373B60724F5E82A6AD147FDE7"),
+ hexstr2bin("230E18E1BCC88A362FA54E4EA3902009292F7F8033624FD471B5D8ACE49D12CFABBC19963DAB8E2F1EBA00BFFB29E4D72D13F2224562F405CB80503666B25429"),
+ brainpoolP512r1,
+ hexstr2bin("A7927098655F1F9976FA50A9D566865DC530331846381C87256BAF3226244B76D36403C024D7BBF0AA0803EAFF405D3D24F11A9B5C0BEF679FE1454B21C4CD1F")},
+ {ecdh, hexstr2point("9D45F66DE5D67E2E6DB6E93A59CE0BB48106097FF78A081DE781CDB31FCE8CCBAAEA8DD4320C4119F1E9CD437A2EAB3731FA9668AB268D871DEDA55A5473199F",
+ "2FDC313095BCDD5FB3A91636F07A959C8E86B5636A1E930E8396049CB481961D365CC11453A06C719835475B12CB52FC3C383BCE35E27EF194512B71876285FA"),
+ hexstr2bin("16302FF0DBBB5A8D733DAB7141C1B45ACBC8715939677F6A56850A38BD87BD59B09E80279609FF333EB9D4C061231FB26F92EEB04982A5F1D1764CAD57665422"),
+ brainpoolP512r1,
+ hexstr2bin("A7927098655F1F9976FA50A9D566865DC530331846381C87256BAF3226244B76D36403C024D7BBF0AA0803EAFF405D3D24F11A9B5C0BEF679FE1454B21C4CD1F")}].
dh() ->
{dh, 0087761979513264537414556992123116644042638206717762626089877284926656954974893442000747478454809111207351620687968672207938731607963470779396984752680274820156266685080223616226905101126463253150237669547023934604953898814222890239130021414026118792251620881355456432549881723310342870016961804255746630219, 2}.
diff --git a/lib/debugger/src/dbg_ieval.erl b/lib/debugger/src/dbg_ieval.erl
index 6ce3262ed2..14a17fe304 100644
--- a/lib/debugger/src/dbg_ieval.erl
+++ b/lib/debugger/src/dbg_ieval.erl
@@ -712,23 +712,25 @@ expr({'if',Line,Cs}, Bs, Ieval) ->
if_clauses(Cs, Bs, Ieval#ieval{line=Line});
%% Andalso/orelse
-expr({'andalso',Line,E1,E2}, Bs, Ieval) ->
- case expr(E1, Bs, Ieval#ieval{line=Line, top=false}) of
- {value,false,_}=Res ->
- Res;
- {value,true,_} ->
- expr(E2, Bs, Ieval#ieval{line=Line, top=false});
- {value,Val,Bs} ->
- exception(error, {badarg,Val}, Bs, Ieval)
+expr({'andalso',Line,E1,E2}, Bs0, Ieval) ->
+ case expr(E1, Bs0, Ieval#ieval{line=Line, top=false}) of
+ {value,false,_}=Res ->
+ Res;
+ {value,true,Bs} ->
+ {value,Val,_} = expr(E2, Bs, Ieval#ieval{line=Line, top=false}),
+ {value,Val,Bs};
+ {value,Val,Bs} ->
+ exception(error, {badarg,Val}, Bs, Ieval)
end;
-expr({'orelse',Line,E1,E2}, Bs, Ieval) ->
- case expr(E1, Bs, Ieval#ieval{line=Line, top=false}) of
- {value,true,_}=Res ->
- Res;
- {value,false,_} ->
- expr(E2, Bs, Ieval#ieval{line=Line, top=false});
- {value,Val,_} ->
- exception(error, {badarg,Val}, Bs, Ieval)
+expr({'orelse',Line,E1,E2}, Bs0, Ieval) ->
+ case expr(E1, Bs0, Ieval#ieval{line=Line, top=false}) of
+ {value,true,_}=Res ->
+ Res;
+ {value,false,Bs} ->
+ {value,Val,_} = expr(E2, Bs, Ieval#ieval{line=Line, top=false}),
+ {value,Val,Bs};
+ {value,Val,Bs} ->
+ exception(error, {badarg,Val}, Bs, Ieval)
end;
%% Matching expression
diff --git a/lib/debugger/test/int_eval_SUITE_data/my_int_eval_module.erl b/lib/debugger/test/int_eval_SUITE_data/my_int_eval_module.erl
index c5c6a56363..ab485fd350 100644
--- a/lib/debugger/test/int_eval_SUITE_data/my_int_eval_module.erl
+++ b/lib/debugger/test/int_eval_SUITE_data/my_int_eval_module.erl
@@ -236,4 +236,8 @@ otp_8310() ->
(catch {a, [X || X <- a]}),
{'EXIT',{{bad_generator,b},_}} =
(catch {a, << <<X>> || << X >> <= b >>}),
+ true = begin (X1 = true) andalso X1, X1 end,
+ false = begin (X2 = false) andalso X2, X2 end,
+ true = begin (X3 = true) orelse X3, X3 end,
+ false = begin (X4 = false) orelse X4, X4 end,
ok.
diff --git a/lib/hipe/icode/hipe_icode.erl b/lib/hipe/icode/hipe_icode.erl
index 6d4758bbf1..0e651a351c 100644
--- a/lib/hipe/icode/hipe_icode.erl
+++ b/lib/hipe/icode/hipe_icode.erl
@@ -1464,6 +1464,7 @@ successors(I) ->
case fail_label(I) of [] -> []; L when is_integer(L) -> [L] end;
#icode_enter{} -> [];
#icode_return{} -> [];
+ #icode_comment{} -> [];
%% the following are included here for handling linear code
#icode_move{} -> [];
#icode_begin_handler{} -> []
diff --git a/lib/odbc/c_src/odbcserver.c b/lib/odbc/c_src/odbcserver.c
index 8de81a30ae..b4655ce373 100644
--- a/lib/odbc/c_src/odbcserver.c
+++ b/lib/odbc/c_src/odbcserver.c
@@ -98,6 +98,7 @@
/* ----------------------------- INCLUDES ------------------------------*/
+#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
diff --git a/lib/public_key/asn1/Makefile b/lib/public_key/asn1/Makefile
index a4e36c7293..c1b3bc866d 100644
--- a/lib/public_key/asn1/Makefile
+++ b/lib/public_key/asn1/Makefile
@@ -41,7 +41,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/public_key-$(VSN)
ASN_TOP = OTP-PUB-KEY PKCS-FRAME
ASN_MODULES = PKIX1Explicit88 PKIX1Implicit88 PKIX1Algorithms88 \
PKIXAttributeCertificate PKCS-1 PKCS-3 PKCS-7 PKCS-8 PKCS-10 PKCS5v2-0 OTP-PKIX \
- InformationFramework
+ InformationFramework RFC5639
ASN_ASNS = $(ASN_MODULES:%=%.asn1)
ASN_ERLS = $(ASN_TOP:%=%.erl)
ASN_HRLS = $(ASN_TOP:%=%.hrl)
@@ -116,7 +116,8 @@ OTP-PUB-KEY.asn1db: PKIX1Algorithms88.asn1 \
PKCS-7.asn1\
PKCS-10.asn1\
InformationFramework.asn1\
- OTP-PKIX.asn1
+ OTP-PKIX.asn1 \
+ RFC5639.asn1
$(EBIN)/PKCS-FRAME.beam: PKCS-FRAME.erl PKCS-FRAME.hrl
PKCS-FRAME.erl PKCS-FRAME.hrl: PKCS-FRAME.asn1db
diff --git a/lib/public_key/asn1/OTP-PUB-KEY.set.asn b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
index e94f428e4b..b3f3ccdb77 100644
--- a/lib/public_key/asn1/OTP-PUB-KEY.set.asn
+++ b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
@@ -9,3 +9,4 @@ DSS.asn1
ECPrivateKey.asn1
PKCS-7.asn1
PKCS-10.asn1
+RFC5639.asn1
diff --git a/lib/public_key/asn1/RFC5639.asn1 b/lib/public_key/asn1/RFC5639.asn1
new file mode 100644
index 0000000000..85b8533132
--- /dev/null
+++ b/lib/public_key/asn1/RFC5639.asn1
@@ -0,0 +1,27 @@
+RFC5639 {iso(1) identified-organization(3) teletrust(36) algorithm(3) signature-algorithm(3) ecSign(2) 8} DEFINITIONS EXPLICIT TAGS ::=
+
+BEGIN
+
+ecStdCurvesAndGeneration OBJECT IDENTIFIER::= {iso(1)
+ identified-organization(3) teletrust(36) algorithm(3) signature-algorithm(3) ecSign(2) 8}
+
+ellipticCurveRFC5639 OBJECT IDENTIFIER ::= {ecStdCurvesAndGeneration 1}
+
+versionOne OBJECT IDENTIFIER ::= {ellipticCurveRFC5639 1}
+
+brainpoolP160r1 OBJECT IDENTIFIER ::= {versionOne 1}
+brainpoolP160t1 OBJECT IDENTIFIER ::= {versionOne 2}
+brainpoolP192r1 OBJECT IDENTIFIER ::= {versionOne 3}
+brainpoolP192t1 OBJECT IDENTIFIER ::= {versionOne 4}
+brainpoolP224r1 OBJECT IDENTIFIER ::= {versionOne 5}
+brainpoolP224t1 OBJECT IDENTIFIER ::= {versionOne 6}
+brainpoolP256r1 OBJECT IDENTIFIER ::= {versionOne 7}
+brainpoolP256t1 OBJECT IDENTIFIER ::= {versionOne 8}
+brainpoolP320r1 OBJECT IDENTIFIER ::= {versionOne 9}
+brainpoolP320t1 OBJECT IDENTIFIER ::= {versionOne 10}
+brainpoolP384r1 OBJECT IDENTIFIER ::= {versionOne 11}
+brainpoolP384t1 OBJECT IDENTIFIER ::= {versionOne 12}
+brainpoolP512r1 OBJECT IDENTIFIER ::= {versionOne 13}
+brainpoolP512t1 OBJECT IDENTIFIER ::= {versionOne 14}
+
+END
diff --git a/lib/public_key/src/pubkey_cert_records.erl b/lib/public_key/src/pubkey_cert_records.erl
index fdd89aa70d..f7a361d5a8 100644
--- a/lib/public_key/src/pubkey_cert_records.erl
+++ b/lib/public_key/src/pubkey_cert_records.erl
@@ -147,6 +147,20 @@ namedCurves(?'sect163r1') -> sect163r1;
namedCurves(?'sect163k1') -> sect163k1;
namedCurves(?'secp256r1') -> secp256r1;
namedCurves(?'secp192r1') -> secp192r1;
+namedCurves(?'brainpoolP160r1') -> brainpoolP160r1;
+namedCurves(?'brainpoolP160t1') -> brainpoolP160t1;
+namedCurves(?'brainpoolP192r1') -> brainpoolP192r1;
+namedCurves(?'brainpoolP192t1') -> brainpoolP192t1;
+namedCurves(?'brainpoolP224r1') -> brainpoolP224r1;
+namedCurves(?'brainpoolP224t1') -> brainpoolP224t1;
+namedCurves(?'brainpoolP256r1') -> brainpoolP256r1;
+namedCurves(?'brainpoolP256t1') -> brainpoolP256t1;
+namedCurves(?'brainpoolP320r1') -> brainpoolP320r1;
+namedCurves(?'brainpoolP320t1') -> brainpoolP320t1;
+namedCurves(?'brainpoolP384r1') -> brainpoolP384r1;
+namedCurves(?'brainpoolP384t1') -> brainpoolP384t1;
+namedCurves(?'brainpoolP512r1') -> brainpoolP512r1;
+namedCurves(?'brainpoolP512t1') -> brainpoolP512t1;
namedCurves(sect571r1) -> ?'sect571r1';
namedCurves(sect571k1) -> ?'sect571k1';
@@ -180,7 +194,21 @@ namedCurves(sect239k1) -> ?'sect239k1';
namedCurves(sect163r1) -> ?'sect163r1';
namedCurves(sect163k1) -> ?'sect163k1';
namedCurves(secp256r1) -> ?'secp256r1';
-namedCurves(secp192r1) -> ?'secp192r1'.
+namedCurves(secp192r1) -> ?'secp192r1';
+namedCurves(brainpoolP160r1) -> ?'brainpoolP160r1';
+namedCurves(brainpoolP160t1) -> ?'brainpoolP160t1';
+namedCurves(brainpoolP192r1) -> ?'brainpoolP192r1';
+namedCurves(brainpoolP192t1) -> ?'brainpoolP192t1';
+namedCurves(brainpoolP224r1) -> ?'brainpoolP224r1';
+namedCurves(brainpoolP224t1) -> ?'brainpoolP224t1';
+namedCurves(brainpoolP256r1) -> ?'brainpoolP256r1';
+namedCurves(brainpoolP256t1) -> ?'brainpoolP256t1';
+namedCurves(brainpoolP320r1) -> ?'brainpoolP320r1';
+namedCurves(brainpoolP320t1) -> ?'brainpoolP320t1';
+namedCurves(brainpoolP384r1) -> ?'brainpoolP384r1';
+namedCurves(brainpoolP384t1) -> ?'brainpoolP384t1';
+namedCurves(brainpoolP512r1) -> ?'brainpoolP512r1';
+namedCurves(brainpoolP512t1) -> ?'brainpoolP512t1'.
%%--------------------------------------------------------------------
%%% Internal functions
diff --git a/lib/ssh/src/ssh_cli.erl b/lib/ssh/src/ssh_cli.erl
index 2c8e515a14..41febf9707 100644
--- a/lib/ssh/src/ssh_cli.erl
+++ b/lib/ssh/src/ssh_cli.erl
@@ -349,7 +349,7 @@ delete_chars(N, {Buf, BufTail, Col}, Tty) when N > 0 ->
{Buf, NewBufTail, Col}};
delete_chars(N, {Buf, BufTail, Col}, Tty) -> % N < 0
NewBuf = nthtail(-N, Buf),
- NewCol = Col + N,
+ NewCol = case Col + N of V when V >= 0 -> V; _ -> 0 end,
M1 = move_cursor(Col, NewCol, Tty),
M2 = move_cursor(NewCol + length(BufTail) - N, NewCol, Tty),
{[M1, BufTail, lists:duplicate(-N, $ ) | M2],
diff --git a/lib/ssl/src/tls_v1.erl b/lib/ssl/src/tls_v1.erl
index 2395e98642..7c7fdd64c3 100644
--- a/lib/ssl/src/tls_v1.erl
+++ b/lib/ssl/src/tls_v1.erl
@@ -368,11 +368,19 @@ finished_label(server) ->
%% list ECC curves in prefered order
ecc_curves(_Minor) ->
- [?sect571r1,?sect571k1,?secp521r1,?sect409k1,?sect409r1,
- ?secp384r1,?sect283k1,?sect283r1,?secp256k1,?secp256r1,
- ?sect239k1,?sect233k1,?sect233r1,?secp224k1,?secp224r1,
- ?sect193r1,?sect193r2,?secp192k1,?secp192r1,?sect163k1,
- ?sect163r1,?sect163r2,?secp160k1,?secp160r1,?secp160r2].
+ TLSCurves = [sect571r1,sect571k1,secp521r1,brainpoolP512r1,
+ sect409k1,sect409r1,brainpoolP384r1,secp384r1,
+ sect283k1,sect283r1,brainpoolP256r1,secp256k1,secp256r1,
+ sect239k1,sect233k1,sect233r1,secp224k1,secp224r1,
+ sect193r1,sect193r2,secp192k1,secp192r1,sect163k1,
+ sect163r1,sect163r2,secp160k1,secp160r1,secp160r2],
+ CryptoCurves = crypto:ec_curves(),
+ lists:foldr(fun(Curve, Curves) ->
+ case proplists:get_bool(Curve, CryptoCurves) of
+ true -> [pubkey_cert_records:namedCurves(Curve)|Curves];
+ false -> Curves
+ end
+ end, [], TLSCurves).
%% ECC curves from draft-ietf-tls-ecc-12.txt (Oct. 17, 2005)
oid_to_enum(?sect163k1) -> 1;
@@ -399,7 +407,10 @@ oid_to_enum(?secp224r1) -> 21;
oid_to_enum(?secp256k1) -> 22;
oid_to_enum(?secp256r1) -> 23;
oid_to_enum(?secp384r1) -> 24;
-oid_to_enum(?secp521r1) -> 25.
+oid_to_enum(?secp521r1) -> 25;
+oid_to_enum(?brainpoolP256r1) -> 26;
+oid_to_enum(?brainpoolP384r1) -> 27;
+oid_to_enum(?brainpoolP512r1) -> 28.
enum_to_oid(1) -> ?sect163k1;
enum_to_oid(2) -> ?sect163r1;
@@ -425,7 +436,10 @@ enum_to_oid(21) -> ?secp224r1;
enum_to_oid(22) -> ?secp256k1;
enum_to_oid(23) -> ?secp256r1;
enum_to_oid(24) -> ?secp384r1;
-enum_to_oid(25) -> ?secp521r1.
+enum_to_oid(25) -> ?secp521r1;
+enum_to_oid(26) -> ?brainpoolP256r1;
+enum_to_oid(27) -> ?brainpoolP384r1;
+enum_to_oid(28) -> ?brainpoolP512r1.
sufficent_ec_support() ->
CryptoSupport = crypto:supports(),
diff --git a/lib/stdlib/src/orddict.erl b/lib/stdlib/src/orddict.erl
index da60fc1bb6..c98d78b34d 100644
--- a/lib/stdlib/src/orddict.erl
+++ b/lib/stdlib/src/orddict.erl
@@ -56,8 +56,10 @@ to_list(Dict) -> Dict.
List :: [{Key :: term(), Value :: term()}],
Orddict :: orddict().
+from_list([]) -> [];
+from_list([{_,_}]=Pair) -> Pair;
from_list(Pairs) ->
- lists:foldl(fun ({K,V}, D) -> store(K, V, D) end, [], Pairs).
+ lists:ukeysort(1, reverse_pairs(Pairs, [])).
-spec size(Orddict) -> non_neg_integer() when
Orddict :: orddict().
@@ -235,3 +237,7 @@ merge(F, [{K1,V1}|D1], [{_K2,V2}|D2]) -> %K1 == K2
[{K1,F(K1, V1, V2)}|merge(F, D1, D2)];
merge(F, [], D2) when is_function(F, 3) -> D2;
merge(F, D1, []) when is_function(F, 3) -> D1.
+
+reverse_pairs([{_,_}=H|T], Acc) ->
+ reverse_pairs(T, [H|Acc]);
+reverse_pairs([], Acc) -> Acc.
diff --git a/lib/test_server/src/configure.in b/lib/test_server/src/configure.in
index 3815027721..067663feb4 100644
--- a/lib/test_server/src/configure.in
+++ b/lib/test_server/src/configure.in
@@ -38,6 +38,35 @@ AC_ARG_ENABLE(debug-mode,
*) CFLAGS=$DEBUG_FLAGS ;;
esac ], )
+AC_ARG_ENABLE(m64-build,
+AS_HELP_STRING([--enable-m64-build],
+ [build 64-bit binaries using the -m64 flag to (g)cc]),
+[ case "$enableval" in
+ no) enable_m64_build=no ;;
+ *) enable_m64_build=yes ;;
+ esac
+],enable_m64_build=no)
+
+AC_ARG_ENABLE(m32-build,
+AS_HELP_STRING([--enable-m32-build],
+ [build 32-bit binaries using the -m32 flag to (g)cc]),
+[ case "$enableval" in
+ no) enable_m32_build=no ;;
+ *) enable_m32_build=yes ;;
+ esac
+],enable_m32_build=no)
+
+no_mXX_LDFLAGS="$LDFLAGS"
+
+if test X${enable_m64_build} = Xyes; then
+ CFLAGS="-m64 $CFLAGS"
+ LDFLAGS="-m64 $LDFLAGS"
+fi
+if test X${enable_m32_build} = Xyes; then
+ CFLAGS="-m32 $CFLAGS"
+ LDFLAGS="-m32 $LDFLAGS"
+fi
+
AC_CHECK_LIB(m, sin)
#--------------------------------------------------------------------
@@ -132,6 +161,12 @@ case $system in
AC_CHECK_HEADER(dld.h, [
SHLIB_LD="ld"
SHLIB_LDFLAGS="-shared"])
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
fi
SHLIB_EXTRACT_ALL=""
;;
@@ -142,11 +177,17 @@ case $system in
SHLIB_LD="ld"
SHLIB_LDFLAGS="$LDFLAGS -Bshareable -x"
SHLIB_SUFFIX=".so"
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
], [
# No dynamic loading.
SHLIB_CFLAGS=""
SHLIB_LD="ld"
- SHLIB_LDFLAGS="$LDFLAGS"
+ SHLIB_LDFLAGS=""
SHLIB_SUFFIX=""
AC_MSG_ERROR(don't know how to compile and link dynamic drivers)
])
@@ -155,7 +196,13 @@ case $system in
*-solaris2*|*-sysv4*)
SHLIB_CFLAGS="-KPIC"
SHLIB_LD="/usr/ccs/bin/ld"
- SHLIB_LDFLAGS="$LDFLAGS -G -z text"
+ SHLIB_LDFLAGS="$no_mXX_LDFLAGS -G -z text"
+ if test X${enable_m64_build} = Xyes; then
+ SHLIB_LDFLAGS="-64 $SHLIB_LDFLAGS"
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
SHLIB_SUFFIX=".so"
SHLIB_EXTRACT_ALL="-z allextract"
;;
@@ -170,6 +217,12 @@ case $system in
SHLIB_CFLAGS="-fPIC"
SHLIB_LD="ld"
SHLIB_LDFLAGS="$LDFLAGS -shared"
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
SHLIB_SUFFIX=".so"
SHLIB_EXTRACT_ALL=""
;;
diff --git a/lib/test_server/src/ts_install.erl b/lib/test_server/src/ts_install.erl
index e9e559df5d..bc62015ac3 100644
--- a/lib/test_server/src/ts_install.erl
+++ b/lib/test_server/src/ts_install.erl
@@ -112,6 +112,12 @@ get_vars([], name, [], Result) ->
get_vars(_, _, _, _) ->
{error, fatal_bad_conf_vars}.
+config_flags() ->
+ case os:getenv("CONFIG_FLAGS") of
+ false -> [];
+ CF -> string:tokens(CF, " \t\n")
+ end.
+
unix_autoconf(XConf) ->
Configure = filename:absname("configure"),
Flags = proplists:get_value(crossflags,XConf,[]),
@@ -122,11 +128,14 @@ unix_autoconf(XConf) ->
erlang:system_info(threads) /= false],
Debug = [" --enable-debug-mode" ||
string:str(erlang:system_info(system_version),"debug") > 0],
- Args = Host ++ Build ++ Threads ++ Debug,
+ MXX_Build = [Y || Y <- config_flags(),
+ Y == "--enable-m64-build"
+ orelse Y == "--enable-m32-build"],
+ Args = Host ++ Build ++ Threads ++ Debug ++ " " ++ MXX_Build,
case filelib:is_file(Configure) of
true ->
OSXEnv = macosx_cflags(),
- io:format("Running ~sEnv: ~p~n",
+ io:format("Running ~s~nEnv: ~p~n",
[lists:flatten(Configure ++ Args),Env++OSXEnv]),
Port = open_port({spawn, lists:flatten(["\"",Configure,"\"",Args])},
[stream, eof, {env,Env++OSXEnv}]),
@@ -135,7 +144,6 @@ unix_autoconf(XConf) ->
{error, no_configure_script}
end.
-
get_xcomp_flag(Flag, Flags) ->
get_xcomp_flag(Flag, Flag, Flags).
get_xcomp_flag(Flag, Tag, Flags) ->
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index c1e9bec6ae..fd90e3a870 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -620,7 +620,6 @@ resulting regexp is surrounded by \\_< and \\_>."
"if"
"let"
"of"
- "query"
"receive"
"try"
"when")
@@ -2608,7 +2607,7 @@ Value is list (stack token-start token-type in-what)."
(erlang-skip-blank to)))
(eq (following-char) ?\())
(erlang-push (list 'fun token (current-column)) stack)))
- ((looking-at "\\(begin\\|query\\)[^_a-zA-Z0-9]")
+ ((looking-at "\\(begin\\)[^_a-zA-Z0-9]")
(erlang-push (list 'begin token (current-column)) stack))
;; Normal when case
;;((looking-at "when\\s ")
@@ -3118,7 +3117,7 @@ This assumes that the preceding expression is either simple
(defun erlang-at-keyword ()
"Are we looking at an Erlang keyword which will increase indentation?"
- (looking-at (concat "\\(when\\|if\\|fun\\|case\\|begin\\|query\\|"
+ (looking-at (concat "\\(when\\|if\\|fun\\|case\\|begin\\|"
"of\\|receive\\|after\\|catch\\|try\\)[^_a-zA-Z0-9]")))
(defun erlang-at-operator ()
diff --git a/otp_build b/otp_build
index ca3ffa21a8..945027f0ed 100755
--- a/otp_build
+++ b/otp_build
@@ -1310,7 +1310,7 @@ determine_version_controller
# Unset ERL_FLAGS and ERL_OTP<Major-VSN>_FLAGS during bootstrap to
# prevent potential problems
-otp_major_vsn=`cat erts/vsn.mk | grep SYSTEM_VSN | sed "s|SYSTEM_VSN[^=]*=[^0-9]*\([0-9]*\).*|\1|"`
+otp_major_vsn=`cat erts/vsn.mk | grep SYSTEM_VSN | sed "s|SYSTEM_VSN[^=]*=[^0-9]*\([0-9]*\).*|\1|g"`
erl_otp_flags="ERL_OTP${otp_major_vsn}_FLAGS"
unset ERL_FLAGS
unset ${erl_otp_flags}