From ead6adc15fca3c314351523080d2ccb1956d5956 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Hoguin?= Date: Tue, 2 May 2017 12:23:46 +0200 Subject: Initial commit --- .gitignore | 1 + Makefile | 1 + README.asciidoc | 9 + compat/sys/queue.h | 574 +++++++++++++++++++++++++++++++++++++++++++++++++++++ nif_helpers.c | 195 ++++++++++++++++++ nif_helpers.h | 139 +++++++++++++ plugins.mk | 10 + 7 files changed, 929 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 README.asciidoc create mode 100644 compat/sys/queue.h create mode 100644 nif_helpers.c create mode 100644 nif_helpers.h create mode 100644 plugins.mk diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5761abc --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.o diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..1263948 --- /dev/null +++ b/Makefile @@ -0,0 +1 @@ +all: diff --git a/README.asciidoc b/README.asciidoc new file mode 100644 index 0000000..fdea59e --- /dev/null +++ b/README.asciidoc @@ -0,0 +1,9 @@ += NIF helpers + +An Erlang.mk plugin and C library for writing NIFs. + +== Getting help + +* Official IRC Channel: #ninenines on irc.freenode.net +* https://github.com/ninenines/nif_helpers/issues[Issues tracker] +* http://ninenines.eu/services[Commercial Support] diff --git a/compat/sys/queue.h b/compat/sys/queue.h new file mode 100644 index 0000000..daf4553 --- /dev/null +++ b/compat/sys/queue.h @@ -0,0 +1,574 @@ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues, and circular queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define LIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var); \ + (var) = ((var)->field.le_next)) + +/* + * List access methods. + */ +#define LIST_EMPTY(head) ((head)->lh_first == NULL) +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) do { \ + (head)->slh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = (head)->slh_first; \ + while(curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +/* + * Singly-linked List access methods. + */ +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + + +/* + * Singly-linked Tail queue declarations. + */ +#define STAILQ_HEAD(name, type) \ +struct name { \ + struct type *stqh_first; /* first element */ \ + struct type **stqh_last; /* addr of last next element */ \ +} + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ +struct { \ + struct type *stqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_INIT(head) do { \ + (head)->stqh_first = NULL; \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (head)->stqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.stqe_next = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &(elm)->field.stqe_next; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (listelm)->field.stqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if ((head)->stqh_first == (elm)) { \ + STAILQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->stqh_first; \ + while (curelm->field.stqe_next != (elm)) \ + curelm = curelm->field.stqe_next; \ + if ((curelm->field.stqe_next = \ + curelm->field.stqe_next->field.stqe_next) == NULL) \ + (head)->stqh_last = &(curelm)->field.stqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define STAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->stqh_first); \ + (var); \ + (var) = ((var)->field.stqe_next)) + +#define STAILQ_CONCAT(head1, head2) do { \ + if (!STAILQ_EMPTY((head2))) { \ + *(head1)->stqh_last = (head2)->stqh_first; \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * Singly-linked Tail queue access methods. + */ +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) +#define STAILQ_FIRST(head) ((head)->stqh_first) +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + SIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var); \ + (var) = ((var)->field.sqe_next)) + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + + +/* + * Tail queue definitions. + */ +#define _TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define _TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) + +/* + * Tail queue functions. + */ +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var); \ + (var) = ((var)->field.tqe_next)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * Tail queue access methods. + */ +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + + +/* + * Circular queue definitions. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { (void *)&head, (void *)&head } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue functions. + */ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = (void *)(head); \ + (head)->cqh_last = (void *)(head); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = (void *)(head); \ + if ((head)->cqh_last == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = (void *)(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if ((elm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->cqh_first); \ + (var) != (const void *)(head); \ + (var) = ((var)->field.cqe_next)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for ((var) = ((head)->cqh_last); \ + (var) != (const void *)(head); \ + (var) = ((var)->field.cqe_prev)) + +/* + * Circular queue access methods. + */ +#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) +#define CIRCLEQ_LAST(head) ((head)->cqh_last) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) + +#define CIRCLEQ_LOOP_NEXT(head, elm, field) \ + (((elm)->field.cqe_next == (void *)(head)) \ + ? ((head)->cqh_first) \ + : (elm->field.cqe_next)) +#define CIRCLEQ_LOOP_PREV(head, elm, field) \ + (((elm)->field.cqe_prev == (void *)(head)) \ + ? ((head)->cqh_last) \ + : (elm->field.cqe_prev)) + +#endif /* sys/queue.h */ diff --git a/nif_helpers.c b/nif_helpers.c new file mode 100644 index 0000000..f29d76d --- /dev/null +++ b/nif_helpers.c @@ -0,0 +1,195 @@ +// Copyright (c) 2014-2015, Loïc Hoguin +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +#include "nif_helpers.h" +#include +#include + +extern ERL_NIF_TERM atom_ok; +extern ERL_NIF_TERM atom__nif_thread_ret_; + +typedef struct nif_thread_message { + TAILQ_ENTRY(nif_thread_message) next_entry; + + ErlNifPid* from_pid; + void* function; + nif_thread_arg* args; +} nif_thread_message; + +typedef TAILQ_HEAD(nif_thread_mailbox, nif_thread_message) nif_thread_mailbox; + +typedef struct { + ErlNifTid tid; + ErlNifMutex* lock; + ErlNifCond* cond; + nif_thread_mailbox* mailbox; +} nif_thread_state; + +// Message. + +static nif_thread_message* nif_thread_message_alloc(void* f, nif_thread_arg* args, ErlNifPid* pid) +{ + nif_thread_message* msg = (nif_thread_message*)enif_alloc(sizeof(nif_thread_message)); + + msg->from_pid = pid; + msg->function = f; + msg->args = args; + + return msg; +} + +static void nif_thread_message_free(nif_thread_message* msg) +{ + enif_free(msg->from_pid); + enif_free(msg->args); + enif_free(msg); +} + +// Calls and casts. + +static ERL_NIF_TERM nif_thread_send(nif_thread_state* st, nif_thread_message* msg) +{ + enif_mutex_lock(st->lock); + + TAILQ_INSERT_TAIL(st->mailbox, msg, next_entry); + + enif_cond_signal(st->cond); + enif_mutex_unlock(st->lock); + + return atom_ok; +} + +ERL_NIF_TERM nif_thread_cast(ErlNifEnv* env, void (*f)(nif_thread_arg*), int a, ...) +{ + va_list ap; + int i; + + nif_thread_arg* args = (nif_thread_arg*)enif_alloc(a * sizeof(nif_thread_arg)); + + va_start(ap, a); + for (i = 0; i < a; i++) + args[i] = va_arg(ap, void*); + va_end(ap); + + nif_thread_message* msg = nif_thread_message_alloc(f, args, NULL); + + return nif_thread_send((nif_thread_state*)enif_priv_data(env), msg); +} + +ERL_NIF_TERM nif_thread_call(ErlNifEnv* env, ERL_NIF_TERM (*f)(ErlNifEnv*, nif_thread_arg*), int a, ...) +{ + va_list ap; + int i; + + nif_thread_arg* args = (nif_thread_arg*)enif_alloc(a * sizeof(nif_thread_arg)); + + va_start(ap, a); + for (i = 0; i < a; i++) + args[i] = va_arg(ap, void*); + va_end(ap); + + ErlNifPid* pid = (ErlNifPid*)enif_alloc(sizeof(ErlNifPid)); + nif_thread_message* msg = nif_thread_message_alloc((void*)f, args, enif_self(env, pid)); + + return nif_thread_send((nif_thread_state*)enif_priv_data(env), msg); +} + +// Main thread loop. + +static int nif_thread_receive(nif_thread_state* st, nif_thread_message** msg) +{ + enif_mutex_lock(st->lock); + + while (TAILQ_EMPTY(st->mailbox)) + enif_cond_wait(st->cond, st->lock); + + *msg = TAILQ_FIRST(st->mailbox); + TAILQ_REMOVE(st->mailbox, TAILQ_FIRST(st->mailbox), next_entry); + + enif_mutex_unlock(st->lock); + + if ((*msg)->function == NULL) + return 0; + + return 1; +} + +static void nif_thread_handle(ErlNifEnv* env, nif_thread_state* st, nif_thread_message* msg) +{ + if (msg->from_pid == NULL) { + void (*cast)(nif_thread_arg*) = msg->function; + cast(msg->args); + } else { + ERL_NIF_TERM (*call)(ErlNifEnv*, nif_thread_arg*) = msg->function; + ERL_NIF_TERM ret = call(env, msg->args); + + enif_send(NULL, msg->from_pid, env, + enif_make_tuple2(env, atom__nif_thread_ret_, ret)); + + enif_clear_env(env); + } + + nif_thread_message_free(msg); +} + +static void* nif_main_thread(void* obj) +{ + ErlNifEnv* env = enif_alloc_env(); + nif_thread_state* st = (nif_thread_state*)obj; + nif_thread_message* msg; + + while (nif_thread_receive(st, &msg)) + nif_thread_handle(env, st, msg); + + enif_free_env(env); + + return NULL; +} + +// Main thread creation/destruction. + +void* nif_create_main_thread(char* name) +{ + nif_thread_state* st = (nif_thread_state*)enif_alloc(sizeof(nif_thread_state)); + + st->lock = enif_mutex_create("nif_thread_mailbox_lock"); + st->cond = enif_cond_create("nif_thread_mailbox_cond"); + st->mailbox = (nif_thread_mailbox*)enif_alloc(sizeof(nif_thread_mailbox)); + TAILQ_INIT(st->mailbox); + +#if defined(__APPLE__) && defined(__MACH__) + // On OSX we identify ourselves as the main thread to ensure that + // we are compatible with libraries that require it. For example + // this is necessary with SDL2 in order to receive input events. + erl_drv_steal_main_thread(name, &(st->tid), nif_main_thread, st, NULL); +#else + enif_thread_create(name, &(st->tid), nif_main_thread, st, NULL); +#endif + + return (void*)st; +} + +void nif_destroy_main_thread(void* void_st) +{ + nif_thread_state* st = (nif_thread_state*)void_st; + nif_thread_message* msg = nif_thread_message_alloc(NULL, NULL, NULL); + + nif_thread_send(st, msg); + enif_thread_join(st->tid, NULL); + + enif_cond_destroy(st->cond); + enif_mutex_destroy(st->lock); + enif_free(st->mailbox); + enif_free(st); +} diff --git a/nif_helpers.h b/nif_helpers.h new file mode 100644 index 0000000..2eefec6 --- /dev/null +++ b/nif_helpers.h @@ -0,0 +1,139 @@ +// Copyright (c) 2014-2015, Loïc Hoguin +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +#ifndef __NIF_HELPERS_H__ +#define __NIF_HELPERS_H__ + +#include "erl_nif.h" + +#define TO_STRING(i) #i + +// Atoms. + +#define MAX_ATOM_LENGTH 255 + +#define NIF_ATOM_DECL(a) ERL_NIF_TERM atom_ ## a; +#define NIF_ATOM_H_DECL(a) extern ERL_NIF_TERM atom_ ## a; +#define NIF_ATOM_INIT(a) atom_ ## a = enif_make_atom(env, #a); + +// Functions. + +#ifndef NIF_FUNCTION_NAME +#define NIF_FUNCTION_NAME(n) n +#endif + +#define NIF_FUNCTION(f) \ + ERL_NIF_TERM NIF_FUNCTION_NAME(f)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +#define NIF_FUNCTION_ARRAY(f, a) {#f, a, NIF_FUNCTION_NAME(f)}, +#define NIF_FUNCTION_H_DECL(f, a) \ + ERL_NIF_TERM NIF_FUNCTION_NAME(f)(ErlNifEnv*, int, const ERL_NIF_TERM []); + +#define BADARG_IF(cond) if (cond) return enif_make_badarg(env) + +// Resources. + +#ifndef NIF_RES_TYPE +#define NIF_RES_TYPE(t) t +#endif + +#define NIF_RES_DECL(r) ErlNifResourceType* res_ ## r = NULL; +#define NIF_RES_H_DECL(r) \ + extern ErlNifResourceType* res_ ## r; \ + void dtor_ ## r(ErlNifEnv*, void*); \ + typedef struct { \ + NIF_RES_TYPE(r)* v; \ + void* dep; \ + } obj_ ## r; +#define NIF_RES_INIT(r) \ + res_ ## r = enif_open_resource_type(env, NULL, TO_STRING(NIF_RES_TYPE(r)), dtor_ ## r, ERL_NIF_RT_CREATE, NULL); \ + if (!res_ ## r) return -1; + +#define NIF_RES_GET(r, obj) (((obj_ ## r*)obj)->v) +#define NIF_RES_DEP(r, obj) (((obj_ ## r*)obj)->dep) +#define NIF_RES_TO_TERM(r, val, term) NIF_RES_TO_TERM_WITH_DEP(r, val, term, NULL) +#define NIF_RES_TO_TERM_WITH_DEP(r, val, term, dep_res) { \ + obj_ ## r* res = enif_alloc_resource(res_ ## r, sizeof(obj_ ## r)); \ + res->v = val; \ + res->dep = dep_res; \ + term = enif_make_resource(env, res); \ + enif_release_resource(res); \ +} + +// Function generators. + +#define NIF_ATOM_TO_FLAG(a, f) if (enif_is_identical(atom_ ## a, head)) *flags |= f; else +#define NIF_LIST_TO_FLAGS_FUNCTION(f, type, flags_list) \ + int f(ErlNifEnv* env, ERL_NIF_TERM list, type* flags) \ + { \ + ERL_NIF_TERM head; \ + \ + if (!enif_is_list(env, list)) \ + return 0; \ + \ + while (enif_get_list_cell(env, list, &head, &list)) { \ + flags_list(NIF_ATOM_TO_FLAG) /* else */ return 0; \ + } \ + \ + return 1; \ + } + +#define NIF_FLAG_CONS_LIST(a, f) if (flags & f) list = enif_make_list_cell(env, atom_ ## a, list); +#define NIF_FLAGS_TO_LIST_FUNCTION(f, type, flags_list) \ + ERL_NIF_TERM f(ErlNifEnv* env, type flags) \ + { \ + ERL_NIF_TERM list = enif_make_list(env, 0); \ + flags_list(NIF_FLAG_CONS_LIST); \ + return list; \ + } + +#define NIF_ATOM_TO_ENUM(a, e) if (enif_is_identical(atom_ ## a, atom)) { *val = e; return 1; } +#define NIF_ATOM_TO_ENUM_FUNCTION(f, type, enum_list) \ + int f(ErlNifEnv* env, ERL_NIF_TERM atom, type* val) \ + { \ + enum_list(NIF_ATOM_TO_ENUM) \ + \ + return 0; \ + } +#define NIF_ATOM_TO_ENUM_FUNCTION_DECL(f, type) int f(ErlNifEnv*, ERL_NIF_TERM, type*); + +#define NIF_ENUM_TO_ATOM(a, e) if (id == e) return atom_ ## a; +#define NIF_ENUM_TO_ATOM_FUNCTION(f, type, enum_list) \ + ERL_NIF_TERM f(type id) \ + { \ + enum_list(NIF_ENUM_TO_ATOM) \ + return atom_undefined; \ + } +#define NIF_ENUM_TO_ATOM_FUNCTION_DECL(f, type) ERL_NIF_TERM f(type); + +// Threaded NIFs. + +typedef void* nif_thread_arg; + +#ifdef __cplusplus +extern "C" { +#endif + +void* nif_create_main_thread(char*); +void nif_destroy_main_thread(void*); +ERL_NIF_TERM nif_thread_cast(ErlNifEnv*, void (*f)(nif_thread_arg*), int a, ...); +ERL_NIF_TERM nif_thread_call(ErlNifEnv*, ERL_NIF_TERM (*f)(ErlNifEnv*, nif_thread_arg*), int a, ...); + +#ifdef __cplusplus +} +#endif + +#define NIF_CAST_HANDLER(f) static void f(nif_thread_arg* args) +#define NIF_CALL_HANDLER(f) static ERL_NIF_TERM f(ErlNifEnv* env, nif_thread_arg* args) + +#endif diff --git a/plugins.mk b/plugins.mk new file mode 100644 index 0000000..56903d7 --- /dev/null +++ b/plugins.mk @@ -0,0 +1,10 @@ +THIS := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) + +SOURCES += $(THIS)nif_helpers.c +$(C_SRC_OUTPUT_FILE): $(THIS)nif_helpers.o + +ifeq ($(PLATFORM),msys2) + CFLAGS += -I"$(THIS)/compat/" +endif + +CFLAGS += -I"$(THIS)" -- cgit v1.2.3