diff options
Diffstat (limited to 'erts')
-rw-r--r-- | erts/aclocal.m4 | 162 | ||||
-rw-r--r-- | erts/configure.in | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 102 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process_lock.h | 7 | ||||
-rw-r--r-- | erts/emulator/test/nif_SUITE.erl | 1 | ||||
-rwxr-xr-x | erts/etc/win32/cygwin_tools/vc/cc.sh | 2 | ||||
-rw-r--r-- | erts/etc/win32/msys_tools/vc/cc.sh | 2 | ||||
-rw-r--r-- | erts/include/internal/ethread_header_config.h.in | 4 | ||||
-rw-r--r-- | erts/include/internal/i386/ethr_dw_atomic.h | 73 | ||||
-rw-r--r-- | erts/preloaded/ebin/prim_file.beam | bin | 44892 -> 44980 bytes | |||
-rw-r--r-- | erts/preloaded/src/prim_file.erl | 3 |
12 files changed, 262 insertions, 98 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4 index 3d52538933..ec9b66bf29 100644 --- a/erts/aclocal.m4 +++ b/erts/aclocal.m4 @@ -2117,63 +2117,159 @@ esac case "$GCC-$host_cpu" in yes-i86pc | yes-i*86 | yes-x86_64 | yes-amd64) + + if test $ac_cv_sizeof_void_p = 4; then + dw_cmpxchg="cmpxchg8b" + else + dw_cmpxchg="cmpxchg16b" + fi + gcc_dw_cmpxchg_asm=no - AC_MSG_CHECKING([for gcc double word cmpxchg asm support]) - AC_TRY_COMPILE([], + gcc_pic_dw_cmpxchg_asm=no + gcc_cflags_pic=no + gcc_cmpxchg8b_pic_no_clobber_ebx=no + gcc_cmpxchg8b_pic_no_clobber_ebx_register_shortage=no + + save_CFLAGS="$CFLAGS" + + # Check if it works out of the box using passed CFLAGS + # and with -fPIC added to CFLAGS if the passed CFLAGS + # doesn't trigger position independent code + pic_cmpxchg=unknown + while true; do + + case $pic_cmpxchg in + yes) pic_text="pic ";; + *) pic_text="";; + esac + + AC_MSG_CHECKING([for gcc $pic_text$dw_cmpxchg plain asm support]) + + plain_cmpxchg=no + AC_TRY_COMPILE([], [ char xchgd; long new[2], xchg[2], *p; __asm__ __volatile__( -#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__ - "pushl %%ebx\n\t" - "movl %8, %%ebx\n\t" -#endif #if ETHR_SIZEOF_PTR == 4 "lock; cmpxchg8b %0\n\t" #else "lock; cmpxchg16b %0\n\t" #endif "setz %3\n\t" -#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__ - "popl %%ebx\n\t" -#endif - : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd) - : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "3"(new[1]), -#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__ - "r"(new[0]) -#else - "b"(new[0]) -#endif + : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=q"(xchgd) + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "c"(new[1]), "b"(new[0]) : "cc", "memory"); + ], + [plain_cmpxchg=yes]) + AC_MSG_RESULT([$plain_cmpxchg]) + + if test $pic_cmpxchg = yes; then + gcc_pic_dw_cmpxchg_asm=$plain_cmpxchg + break + fi + + gcc_dw_cmpxchg_asm=$plain_cmpxchg + + # If not already compiling to position independent + # code add -fPIC to CFLAGS and do it again. This + # since we want also want to know how to compile + # to position independent code since this might + # cause problems with the use of the EBX register + # as input to the asm on 32-bit x86 and old gcc + # compilers (gcc vsn < 5). + + AC_TRY_COMPILE([], + [ +#if !defined(__PIC__) || !__PIC__ +# error no pic +#endif ], - [gcc_dw_cmpxchg_asm=yes]) - if test $gcc_dw_cmpxchg_asm = no && test $ac_cv_sizeof_void_p = 4; then + [pic_cmpxchg=yes + gcc_cflags_pic=yes], + [pic_cmpxchg=no]) + + if test $pic_cmpxchg = yes; then + gcc_pic_dw_cmpxchg_asm=$gcc_dw_cmpxchg_asm + break + fi + + CFLAGS="$save_CFLAGS -fPIC" + pic_cmpxchg=yes + + done + + if test $gcc_pic_dw_cmpxchg_asm = no && test $ac_cv_sizeof_void_p = 4; then + + AC_MSG_CHECKING([for gcc pic cmpxchg8b asm support with EBX workaround]) + + # Check if we can work around it by managing the ebx + # register explicitly in the asm... + AC_TRY_COMPILE([], + [ + char xchgd; + long new[2], xchg[2], *p; + __asm__ __volatile__( + "pushl %%ebx\n\t" + "movl %8, %%ebx\n\t" + "lock; cmpxchg8b %0\n\t" + "setz %3\n\t" + "popl %%ebx\n\t" + : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=q"(xchgd) + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "c"(new[1]), "r"(new[0]) + : "cc", "memory"); + ], + [gcc_pic_dw_cmpxchg_asm=yes + gcc_cmpxchg8b_pic_no_clobber_ebx=yes]) + + AC_MSG_RESULT([$gcc_pic_dw_cmpxchg_asm]) + + if test $gcc_pic_dw_cmpxchg_asm = no; then + + AC_MSG_CHECKING([for gcc pic cmpxchg8b asm support with EBX and register shortage workarounds]) + # If no optimization is enabled we sometimes get a + # register shortage. Check if we can work around + # this... + + AC_TRY_COMPILE([], [ char xchgd; long new[2], xchg[2], *p; -#if !defined(__PIC__) || !__PIC__ -# error nope -#endif __asm__ __volatile__( - "pushl %%ebx\n\t" - "movl (%7), %%ebx\n\t" - "movl 4(%7), %%ecx\n\t" - "lock; cmpxchg8b %0\n\t" - "setz %3\n\t" - "popl %%ebx\n\t" - : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd) - : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "3"(new) + "pushl %%ebx\n\t" + "movl (%7), %%ebx\n\t" + "movl 4(%7), %%ecx\n\t" + "lock; cmpxchg8b %0\n\t" + "setz %3\n\t" + "popl %%ebx\n\t" + : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd) + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "r"(new) : "cc", "memory"); ], - [gcc_dw_cmpxchg_asm=yes]) - if test "$gcc_dw_cmpxchg_asm" = "yes"; then - AC_DEFINE(ETHR_CMPXCHG8B_REGISTER_SHORTAGE, 1, [Define if you get a register shortage with cmpxchg8b and position independent code]) + [gcc_pic_dw_cmpxchg_asm=yes + gcc_cmpxchg8b_pic_no_clobber_ebx=yes + gcc_cmpxchg8b_pic_no_clobber_ebx_register_shortage=yes]) + + AC_MSG_RESULT([$gcc_pic_dw_cmpxchg_asm]) fi + + if test $gcc_cflags_pic = yes; then + gcc_dw_cmpxchg_asm=$gcc_pic_dw_cmpxchg_asm + fi + + fi + + CFLAGS="$save_CFLAGS" + + if test "$gcc_cmpxchg8b_pic_no_clobber_ebx" = "yes"; then + AC_DEFINE(ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX, 1, [Define if gcc wont let you clobber ebx with cmpxchg8b and position independent code]) + fi + if test "$gcc_cmpxchg8b_pic_no_clobber_ebx_register_shortage" = "yes"; then + AC_DEFINE(ETHR_CMPXCHG8B_REGISTER_SHORTAGE, 1, [Define if you get a register shortage with cmpxchg8b and position independent code]) fi - AC_MSG_RESULT([$gcc_dw_cmpxchg_asm]) if test "$gcc_dw_cmpxchg_asm" = "yes"; then AC_DEFINE(ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT, 1, [Define if you use a gcc that supports the double word cmpxchg instruction]) fi;; diff --git a/erts/configure.in b/erts/configure.in index 4fb725ff00..c7b095d47a 100644 --- a/erts/configure.in +++ b/erts/configure.in @@ -4111,7 +4111,7 @@ AC_ARG_WITH(ssl-rpath, AS_HELP_STRING([--with-ssl-rpath=yes|no|PATHS], [runtime library path for OpenSSL. Default is "yes", which equates to a number of standard locations. If "no", then no runtime - library paths wil be used. Anything else should be a + library paths will be used. Anything else should be a comma separated list of paths.]), [ case X$with_ssl in diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 8e4a42fbe4..99a4011b8f 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -6559,9 +6559,6 @@ suspend_process(Process *c_p, Process *p) if (suspended) { - ASSERT(!(ERTS_PSFLG_RUNNING & state) - || p == erts_get_current_process()); - if (suspended > 0 && erts_system_profile_flags.runnable_procs) { /* 'state' is before our change... */ @@ -8508,9 +8505,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, resume_process(rp, rp_locks); } else { - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, pid_locks|ERTS_PROC_LOCK_STATUS); + pid, ERTS_PROC_LOCK_STATUS); if (!rp) { c_p->flags &= ~F_P2PNR_RESCHED; @@ -8519,40 +8515,84 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(!(c_p->flags & F_P2PNR_RESCHED)); - if (suspend) { - if (suspend_process(c_p, rp)) - goto done; - } - else { - if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - & erts_smp_atomic32_read_acqb(&rp->state))) - goto done; + /* + * Suspend the other process in order to prevent + * it from being selected for normal execution. + * This will however not prevent it from being + * selected for execution of a system task. If + * it is selected for execution of a system task + * we might be blocked for quite a while if the + * try-lock below fails. That is, there is room + * for improvement here... + */ - } + if (!suspend_process(c_p, rp)) { + /* Other process running */ - /* Other process running */ + ASSERT(ERTS_PSFLG_RUNNING + & erts_smp_atomic32_read_nob(&rp->state)); - /* - * If we got pending suspenders and suspend ourselves waiting - * to suspend another process we might deadlock. - * In this case we have to yield, be suspended by - * someone else and then do it all over again. - */ - if (!c_p->pending_suspenders) { - /* Mark rp pending for suspend by c_p */ - add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); - ASSERT(is_nil(c_p->suspendee)); + running: - /* Suspend c_p; when rp is suspended c_p will be resumed. */ - suspend_process(c_p, c_p); - c_p->flags |= F_P2PNR_RESCHED; + /* + * If we got pending suspenders and suspend ourselves waiting + * to suspend another process we might deadlock. + * In this case we have to yield, be suspended by + * someone else and then do it all over again. + */ + if (!c_p->pending_suspenders) { + /* Mark rp pending for suspend by c_p */ + add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); + ASSERT(is_nil(c_p->suspendee)); + + /* Suspend c_p; when rp is suspended c_p will be resumed. */ + suspend_process(c_p, c_p); + c_p->flags |= F_P2PNR_RESCHED; + } + /* Yield (caller is assumed to yield immediately in bif). */ + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + rp = ERTS_PROC_LOCK_BUSY; + } + else { + ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; + if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + if (ERTS_PSFLG_RUNNING_SYS + & erts_smp_atomic32_read_nob(&rp->state)) { + /* Executing system task... */ + resume_process(rp, ERTS_PROC_LOCK_STATUS); + goto running; + } + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + /* + * If we are unlucky, the process just got selected for + * execution of a system task. In this case we may be + * blocked here for quite a while... Execution of system + * tasks are fortunately quite rare events. We try to + * avoid this by checking if it is in a state executing + * system tasks (above), but it will not prevent all + * scenarios for a long block here... + */ + rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, + pid, pid_locks|ERTS_PROC_LOCK_STATUS); + if (!rp) + goto done; + } + + /* + * The previous suspend has prevented the process + * from being selected for normal execution regardless + * of locks held or not held on it... + */ + ASSERT(!(ERTS_PSFLG_RUNNING + & erts_smp_atomic32_read_nob(&rp->state))); + + if (!suspend) + resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS); } - /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS); - rp = ERTS_PROC_LOCK_BUSY; } done: + if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); if (unlock_c_p_status) diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index f1570a835c..ee8a2e1b7b 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1187,7 +1187,7 @@ void erts_check_for_holes(Process* p); #define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \ (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) #define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \ - (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) + (((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) /* * Static flags that do not change after process creation. diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index a64c993e8f..9c59301086 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -523,6 +523,10 @@ erts_smp_proc_lock__(Process *p, ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_proc_lc_lock(p, locks, file, line); +#endif + old_lflgs = erts_smp_proc_raw_trylock__(p, locks); if (old_lflgs != 0) { @@ -544,9 +548,6 @@ erts_smp_proc_lock__(Process *p, #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line); #endif -#ifdef ERTS_ENABLE_LOCK_CHECK - erts_proc_lc_lock(p, locks, file, line); -#endif #ifdef ERTS_PROC_LOCK_DEBUG erts_proc_lock_op_debug(p, locks, 1); diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index 3d478654b1..e10460ce78 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -617,7 +617,6 @@ resource_new_do2(Type) -> ?line {PtrA,BinA} = get_resource(Type, ResA), ?line {PtrB,BinB} = get_resource(Type, ResB), ?line true = (PtrA =/= PtrB), - ?line [] = last_resource_dtor_call(), %% forget ResA and make it garbage {{PtrA,BinA}, {ResB,PtrB,BinB}}. diff --git a/erts/etc/win32/cygwin_tools/vc/cc.sh b/erts/etc/win32/cygwin_tools/vc/cc.sh index 48a579d5f0..651b6e098d 100755 --- a/erts/etc/win32/cygwin_tools/vc/cc.sh +++ b/erts/etc/win32/cygwin_tools/vc/cc.sh @@ -267,7 +267,7 @@ for x in $SOURCES; do echo echo after_sed=`date '+%s'` - echo Made dependencises for $x':' `expr $after_sed '-' $start_time` 's' >&2 + echo Made dependencies for $x':' `expr $after_sed '-' $start_time` 's' >&2 fi else cat $MSG_FILE diff --git a/erts/etc/win32/msys_tools/vc/cc.sh b/erts/etc/win32/msys_tools/vc/cc.sh index ac89aac34e..72005862ed 100644 --- a/erts/etc/win32/msys_tools/vc/cc.sh +++ b/erts/etc/win32/msys_tools/vc/cc.sh @@ -268,7 +268,7 @@ for x in $SOURCES; do echo echo after_sed=`date '+%s'` - echo Made dependencises for $x':' `expr $after_sed '-' $start_time` 's' >&2 + echo Made dependencies for $x':' `expr $after_sed '-' $start_time` 's' >&2 fi else cat $MSG_FILE diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in index 9cabd0591a..f4b08cfced 100644 --- a/erts/include/internal/ethread_header_config.h.in +++ b/erts/include/internal/ethread_header_config.h.in @@ -166,6 +166,10 @@ /* Define if you use a gcc that supports the double word cmpxchg instruction */ #undef ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT +/* Define if gcc wont let you clobber ebx with cmpxchg8b and position + independent code */ +#undef ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX + /* Define if you get a register shortage with cmpxchg8b and position independent code */ #undef ETHR_CMPXCHG8B_REGISTER_SHORTAGE diff --git a/erts/include/internal/i386/ethr_dw_atomic.h b/erts/include/internal/i386/ethr_dw_atomic.h index e8c4119ef0..5444a6345c 100644 --- a/erts/include/internal/i386/ethr_dw_atomic.h +++ b/erts/include/internal/i386/ethr_dw_atomic.h @@ -115,13 +115,19 @@ ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var) return (ethr_sint_t *) ETHR_DW_NATMC_MEM__(var); } -#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__ +#if defined(ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX) && defined(__PIC__) && __PIC__ +#if ETHR_SIZEOF_PTR != 4 +# error unexpected pic issue +#endif /* * When position independent code is used in 32-bit mode, the EBX register - * is used for storage of global offset table address, and we may not - * use it as input or output in an asm. We need to save and restore the - * EBX register explicitly (for some reason gcc doesn't provide this - * service to us). + * is used for storage of global offset table address. When compiling with + * an old gcc (< vsn 5) we may not use it as input or output in an inline + * asm. We then need to save and restore the EBX register explicitly (for + * some reason old gcc compilers didn't provide this service to us). + * ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX will be defined if we need to + * explicitly manage EBX ourselves. + * */ # define ETHR_NO_CLOBBER_EBX__ 1 #else @@ -151,36 +157,53 @@ ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var, ETHR_DW_DBG_ALIGNED__(p); +#if ETHR_NO_CLOBBER_EBX__ && ETHR_CMPXCHG8B_REGISTER_SHORTAGE + /* + * gcc wont let us use ebx as input and we + * get a register shortage + */ + __asm__ __volatile__( -#if ETHR_NO_CLOBBER_EBX__ "pushl %%ebx\n\t" -# if ETHR_CMPXCHG8B_REGISTER_SHORTAGE "movl (%7), %%ebx\n\t" "movl 4(%7), %%ecx\n\t" -# else - "movl %8, %%ebx\n\t" -# endif -#endif - "lock; cmpxchg" ETHR_DW_CMPXCHG_SFX__ " %0\n\t" + "lock; cmpxchg8b %0\n\t" "setz %3\n\t" -#if ETHR_NO_CLOBBER_EBX__ "popl %%ebx\n\t" -#endif : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd) - : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), -#if ETHR_NO_CLOBBER_EBX__ -# if ETHR_CMPXCHG8B_REGISTER_SHORTAGE - "3"(new) -# else - "3"(new[1]), - "r"(new[0]) -# endif + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "r"(new) + : "cc", "memory"); + +#elif ETHR_NO_CLOBBER_EBX__ + /* + * gcc wont let us use ebx as input + */ + + __asm__ __volatile__( + "pushl %%ebx\n\t" + "movl %8, %%ebx\n\t" + "lock; cmpxchg8b %0\n\t" + "setz %3\n\t" + "popl %%ebx\n\t" + : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=q"(xchgd) + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "c"(new[1]), "r"(new[0]) + : "cc", "memory"); + #else - "3"(new[1]), - "b"(new[0]) -#endif + /* + * gcc lets us place values in the registers where + * we want them + */ + + __asm__ __volatile__( + "lock; cmpxchg" ETHR_DW_CMPXCHG_SFX__ " %0\n\t" + "setz %3\n\t" + : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=q"(xchgd) + : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "c"(new[1]), "b"(new[0]) : "cc", "memory"); +#endif + return (int) xchgd; } diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam Binary files differindex 97170551bf..9256e35553 100644 --- a/erts/preloaded/ebin/prim_file.beam +++ b/erts/preloaded/ebin/prim_file.beam diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl index c87b2645ec..ab5359ebbc 100644 --- a/erts/preloaded/src/prim_file.erl +++ b/erts/preloaded/src/prim_file.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2013. All Rights Reserved. +%% Copyright Ericsson AB 2000-2016. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1276,6 +1276,7 @@ lseek_position(_) -> %% Translates the response from the driver into %% {ok, Result} or {error, Reason}. +-dialyzer({no_improper_lists, translate_response/2}). translate_response(?FILE_RESP_OK, []) -> ok; translate_response(?FILE_RESP_ERROR, List) when is_list(List) -> |