diff options
123 files changed, 9392 insertions, 5522 deletions
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index b23b1f628d..fe1a7ba345 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -743,7 +743,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ return (ErlDrvTermData) -1; /* pid does not exist */ } if ((port_num = get_free_port()) < 0) { - errno = ENFILE; + errno = SYSTEM_LIMIT; erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_smp_mtx_unlock(&erts_driver_list_lock); return (ErlDrvTermData) -1; diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c index 36ed108b76..a251b064da 100644 --- a/erts/emulator/drivers/common/efile_drv.c +++ b/erts/emulator/drivers/common/efile_drv.c @@ -156,11 +156,11 @@ static ErlDrvSysInfo sys_info; * DARWIN. The testcase t_sendfile_crashduring reproduces * this error when using +A 10. */ -#if !defined(DARWIN) -#define USE_THRDS_FOR_SENDFILE (sys_info.async_threads > 0) -#else +#if defined(__APPLE__) && defined(__MACH__) #define USE_THRDS_FOR_SENDFILE 0 -#endif /* !DARWIN */ +#else +#define USE_THRDS_FOR_SENDFILE (sys_info.async_threads > 0) +#endif /* defined(__APPLE__) && defined(__MACH__) */ @@ -259,6 +259,7 @@ static void file_stop_select(ErlDrvEvent event, void* _); enum e_timer {timer_idle, timer_again, timer_write}; #ifdef HAVE_SENDFILE enum e_sendfile {sending, not_sending}; +static void free_sendfile(void *data); #endif /* HAVE_SENDFILE */ struct t_data; @@ -445,6 +446,8 @@ struct t_data } fadvise; #ifdef HAVE_SENDFILE struct { + ErlDrvPort port; + ErlDrvPDL q_mtx; int out_fd; off_t offset; Uint64 nbytes; @@ -752,15 +755,6 @@ file_stop(ErlDrvData e) TRACE_C('p'); -#ifdef HAVE_SENDFILE - if (desc->sendfile_state == sending && !USE_THRDS_FOR_SENDFILE) { - driver_select(desc->port,(ErlDrvEvent)(long)desc->d->c.sendfile.out_fd, - ERL_DRV_WRITE|ERL_DRV_USE,0); - } else if (desc->sendfile_state == sending) { - SET_NONBLOCKING(desc->d->c.sendfile.out_fd); - } -#endif /* HAVE_SENDFILE */ - if (desc->fd != FILE_FD_INVALID) { do_close(desc->flags, desc->fd); desc->fd = FILE_FD_INVALID; @@ -1783,26 +1777,31 @@ static void invoke_sendfile(void *data) d->c.sendfile.written += nbytes; - if (result == 1) { - if (USE_THRDS_FOR_SENDFILE) { - d->result_ok = 0; - } else if (d->c.sendfile.nbytes == 0 && nbytes != 0) { - d->result_ok = 1; - } else if ((d->c.sendfile.nbytes - nbytes) != 0) { - d->result_ok = 1; - d->c.sendfile.nbytes -= nbytes; - } else { - d->result_ok = 0; - } + if (result == 1 || (result == 0 && USE_THRDS_FOR_SENDFILE)) { + d->result_ok = 0; } else if (result == 0 && (d->errInfo.posix_errno == EAGAIN || d->errInfo.posix_errno == EINTR)) { + if ((d->c.sendfile.nbytes - nbytes) != 0) { d->result_ok = 1; + if (d->c.sendfile.nbytes != 0) + d->c.sendfile.nbytes -= nbytes; + } else + d->result_ok = 0; } else { d->result_ok = -1; } } static void free_sendfile(void *data) { + struct t_data *d = (struct t_data *)data; + if (USE_THRDS_FOR_SENDFILE) { + SET_NONBLOCKING(d->c.sendfile.out_fd); + } else { + MUTEX_LOCK(d->c.sendfile.q_mtx); + driver_deq(d->c.sendfile.port,1); + MUTEX_UNLOCK(d->c.sendfile.q_mtx); + driver_select(d->c.sendfile.port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 0); + } EF_FREE(data); } @@ -1812,7 +1811,7 @@ static void file_ready_output(ErlDrvData data, ErlDrvEvent event) switch (fd->d->command) { case FILE_SENDFILE: - driver_select(fd->port, event, + driver_select(fd->d->c.sendfile.port, event, (int)ERL_DRV_WRITE,(int) 0); invoke_sendfile((void *)fd->d); file_async_ready(data, (ErlDrvThreadData)fd->d); @@ -1826,6 +1825,15 @@ static void file_stop_select(ErlDrvEvent event, void* _) { } + +static int flush_sendfile(file_descriptor *desc,void *_) { + if (desc->sendfile_state == sending) { + desc->d->result_ok = -1; + desc->d->errInfo.posix_errno = ECONNABORTED; + file_async_ready((ErlDrvData)desc,(ErlDrvThreadData)desc->d); + } + return 1; +} #endif /* HAVE_SENDFILE */ @@ -2248,36 +2256,23 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) #ifdef HAVE_SENDFILE case FILE_SENDFILE: if (d->result_ok == -1) { - desc->sendfile_state = not_sending; if (d->errInfo.posix_errno == ECONNRESET || d->errInfo.posix_errno == ENOTCONN || d->errInfo.posix_errno == EPIPE) reply_string_error(desc,"closed"); else reply_error(desc, &d->errInfo); - if (USE_THRDS_FOR_SENDFILE) { - SET_NONBLOCKING(d->c.sendfile.out_fd); - free_sendfile(data); - } else { - driver_select(desc->port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, - ERL_DRV_USE, 0); - free_sendfile(data); - } - } else if (d->result_ok == 0) { desc->sendfile_state = not_sending; + free_sendfile(data); + } else if (d->result_ok == 0) { reply_Sint64(desc, d->c.sendfile.written); - if (USE_THRDS_FOR_SENDFILE) { - SET_NONBLOCKING(d->c.sendfile.out_fd); - free_sendfile(data); - } else { - driver_select(desc->port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, ERL_DRV_USE, 0); - free_sendfile(data); - } + desc->sendfile_state = not_sending; + free_sendfile(data); } else if (d->result_ok == 1) { // If we are using select to send the rest of the data desc->sendfile_state = sending; desc->d = d; driver_select(desc->port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, - ERL_DRV_USE|ERL_DRV_WRITE, 1); + ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 1); } break; #endif @@ -2655,6 +2650,10 @@ file_flush(ErlDrvData e) { TRACE_C('f'); +#ifdef HAVE_SENDFILE + flush_sendfile(desc, NULL); +#endif + #ifdef DEBUG r = #endif @@ -3454,11 +3453,13 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) { d->fd = desc->fd; d->command = command; d->invoke = invoke_sendfile; - d->free = NULL; + d->free = free_sendfile; d->level = 2; d->c.sendfile.out_fd = (int) out_fd; d->c.sendfile.written = 0; + d->c.sendfile.port = desc->port; + d->c.sendfile.q_mtx = desc->q_mtx; #if SIZEOF_OFF_T == 4 if (offsetH != 0) { @@ -3474,6 +3475,19 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) { if (USE_THRDS_FOR_SENDFILE) { SET_BLOCKING(d->c.sendfile.out_fd); + } else { + /** + * Write a place holder to queue in order to force file_flush + * to be called before the driver is closed. + */ + char tmp[1] = ""; + MUTEX_LOCK(d->c.sendfile.q_mtx); + if (driver_enq(d->c.sendfile.port, tmp, 1)) { + MUTEX_UNLOCK(d->c.sendfile.q_mtx); + reply_posix_error(desc, ENOMEM); + goto done; + } + MUTEX_UNLOCK(d->c.sendfile.q_mtx); } cq_enq(desc, d); diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 47a99fdbe6..d1c2dbf94c 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -553,6 +553,12 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) # define VALGRIND_MAKE_MEM_DEFINED(ptr,size) #endif +/* + Magic errno value used locally for return of {error, system_limit} + - the emulator definition of SYSTEM_LIMIT is not available here. +*/ +#define INET_ERRNO_SYSTEM_LIMIT (15 << 8) + /*---------------------------------------------------------------------------- ** Interface constants. ** @@ -1645,6 +1651,17 @@ static struct erl_drv_entry dummy_sctp_driver_entry = #endif +/* return lowercase string form of errno value */ +static char *errno_str(int err) +{ + switch (err) { + case INET_ERRNO_SYSTEM_LIMIT: + return "system_limit"; + default: + return erl_errno_id(err); + } +} + /* general control reply function */ static ErlDrvSSizeT ctl_reply(int rep, char* buf, ErlDrvSizeT len, char** rbuf, ErlDrvSizeT rsize) @@ -1665,13 +1682,9 @@ static ErlDrvSSizeT ctl_reply(int rep, char* buf, ErlDrvSizeT len, /* general control error reply function */ static ErlDrvSSizeT ctl_error(int err, char** rbuf, ErlDrvSizeT rsize) { - char response[256]; /* Response buffer. */ - char* s; - char* t; + char* s = errno_str(err); - for (s = erl_errno_id(err), t = response; *s; s++, t++) - *t = tolower(*s); - return ctl_reply(INET_REP_ERROR, response, t-response, rbuf, rsize); + return ctl_reply(INET_REP_ERROR, s, strlen(s), rbuf, rsize); } static ErlDrvSSizeT ctl_xerror(char* xerr, char** rbuf, ErlDrvSizeT rsize) @@ -1683,14 +1696,7 @@ static ErlDrvSSizeT ctl_xerror(char* xerr, char** rbuf, ErlDrvSizeT rsize) static ErlDrvTermData error_atom(int err) { - char errstr[256]; - char* s; - char* t; - - for (s = erl_errno_id(err), t = errstr; *s; s++, t++) - *t = tolower(*s); - *t = '\0'; - return driver_mk_atom(errstr); + return driver_mk_atom(errno_str(err)); } @@ -4089,6 +4095,7 @@ static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr) addr->sa_family = AF_INET; return ptr + sizeof(struct in_addr); } +#if defined(HAVE_IN6) && defined(AF_INET6) case INET_AF_INET6: { struct in6_addr *p = &((struct sockaddr_in6*)addr)->sin6_addr; buf_check(ptr,end,sizeof(struct in6_addr)); @@ -4096,6 +4103,7 @@ static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr) addr->sa_family = AF_INET6; return ptr + sizeof(struct in6_addr); } +#endif } error: return NULL; @@ -8051,7 +8059,7 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args) /* Copy a descriptor, by creating a new port with same settings * as the descriptor desc. - * return NULL on error (ENFILE no ports avail) + * return NULL on error (SYSTEM_LIMIT no ports avail) */ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, ErlDrvTermData owner, int* err) @@ -8090,7 +8098,7 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, /* The new port will be linked and connected to the original caller */ port = driver_create_port(port, owner, "tcp_inet", (ErlDrvData) copy_desc); if ((long)port == -1) { - *err = ENFILE; + *err = INET_ERRNO_SYSTEM_LIMIT; FREE(copy_desc); return NULL; } diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c index da4a17db1a..a4b02b845e 100644 --- a/erts/emulator/drivers/common/zlib_drv.c +++ b/erts/emulator/drivers/common/zlib_drv.c @@ -64,6 +64,7 @@ static int zlib_init(void); static ErlDrvData zlib_start(ErlDrvPort port, char* buf); static void zlib_stop(ErlDrvData e); +static void zlib_flush(ErlDrvData e); static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen); static void zlib_outputv(ErlDrvData drv_data, ErlIOVec *ev); @@ -82,7 +83,7 @@ ErlDrvEntry zlib_driver_entry = { NULL, /* timeout */ zlib_outputv, NULL, /* read_async */ - NULL, /* flush */ + zlib_flush, NULL, /* call */ NULL, /* event */ ERL_DRV_EXTENDED_MARKER, @@ -410,6 +411,13 @@ static void zlib_stop(ErlDrvData e) driver_free(d); } +static void zlib_flush(ErlDrvData drv_data) +{ + ZLibData* d = (ZLibData*) drv_data; + + driver_deq(d->port, driver_sizeq(d->port)); +} + static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen) { diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c index 796843a735..dfb6cece14 100644 --- a/erts/emulator/drivers/unix/unix_efile.c +++ b/erts/emulator/drivers/unix/unix_efile.c @@ -1426,6 +1426,14 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, * you would have to emulate it in linux and on BSD/Darwin some complex * calculations have to be made when using a non blocking socket to figure * out how much of the header/file/trailer was sent in each command. + * + * The semantics of the API is this: + * Return value: 1 if all data was sent and the function does not need to + * be called again. 0 if an error occures OR if there is more data which + * has to be sent (EAGAIN or EINTR will be set appropriately) + * + * The amount of data written in a call is returned through nbytes. + * */ int @@ -1446,8 +1454,11 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, *nbytes -= retval; } } while (retval == SENDFILE_CHUNK_SIZE); - *nbytes = written; - return check_error(retval == -1 ? -1 : 0, errInfo); + if (written != 0) { + // -1 is not returned by the linux API so we have to simulate it + retval = -1; + errno = EAGAIN; + } #elif defined(__sun) && defined(__SVR4) && defined(HAVE_SENDFILEV) ssize_t retval; size_t len; @@ -1469,8 +1480,6 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, written += len; } } while (len == SENDFILE_CHUNK_SIZE); - *nbytes = written; - return check_error(retval == -1 ? -1 : 0, errInfo); #elif defined(DARWIN) int retval; off_t len; @@ -1487,8 +1496,6 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, written += len; } } while (len == SENDFILE_CHUNK_SIZE); - *nbytes = written; - return check_error(retval, errInfo); #elif defined(__FreeBSD__) || defined(__DragonFly__) off_t len; int retval; @@ -1504,8 +1511,8 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, written += len; } } while(len == SENDFILE_CHUNK_SIZE); +#endif *nbytes = written; return check_error(retval, errInfo); -#endif } #endif /* HAVE_SENDFILE */ diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl index 4dc2fbaae2..76667772c7 100644 --- a/erts/emulator/test/erts_debug_SUITE.erl +++ b/erts/emulator/test/erts_debug_SUITE.erl @@ -23,13 +23,13 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2, - flat_size/1,flat_size_big/1,df/1, + test_size/1,flat_size_big/1,df/1, instructions/1]). suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> - [flat_size, flat_size_big, df, instructions]. + [test_size, flat_size_big, df, instructions]. groups() -> []. @@ -55,16 +55,58 @@ end_per_testcase(_Func, Config) -> Dog=?config(watchdog, Config), ?t:timetrap_cancel(Dog). -flat_size(Config) when is_list(Config) -> - 0 = erts_debug:flat_size([]), - 0 = erts_debug:flat_size(42), - 2 = erts_debug:flat_size([a|b]), - 1 = erts_debug:flat_size({}), - 2 = erts_debug:flat_size({[]}), - 3 = erts_debug:flat_size({a,b}), - 7 = erts_debug:flat_size({a,[b,c]}), +test_size(Config) when is_list(Config) -> + ConsCell1 = id([a|b]), + ConsCell2 = id(ConsCell1), + ConsCellSz = 2, + + 0 = do_test_size([]), + 0 = do_test_size(42), + ConsCellSz = do_test_size(ConsCell1), + 1 = do_test_size({}), + 2 = do_test_size({[]}), + 3 = do_test_size({a,b}), + 7 = do_test_size({a,[b,c]}), + + %% Test internal consistency of sizes, but without testing + %% exact sizes. + Const = id(42), + AnotherConst = id(7), + + %% Fun environment size = 0 (the smallest fun possible) + SimplestFun = fun() -> ok end, + FunSz0 = do_test_size(SimplestFun), + + %% Fun environment size = 1 + FunSz1 = do_test_size(fun() -> Const end), + FunSz1 = FunSz0 + 1, + + %% Fun environment size = 2 + FunSz2 = do_test_size(fun() -> Const+AnotherConst end), + FunSz2 = FunSz1 + 1, + + FunSz1 = do_test_size(fun() -> ConsCell1 end) - do_test_size(ConsCell1), + + %% Test shared data structures. + do_test_size([ConsCell1|ConsCell1], + 3*ConsCellSz, + 2*ConsCellSz), + do_test_size(fun() -> {ConsCell1,ConsCell2} end, + FunSz2 + 2*ConsCellSz, + FunSz2 + ConsCellSz), + do_test_size({SimplestFun,SimplestFun}, + 2*FunSz0+do_test_size({a,b}), + FunSz0+do_test_size({a,b})), ok. +do_test_size(Term) -> + Sz = erts_debug:flat_size(Term), + Sz = erts_debug:size(Term). + +do_test_size(Term, FlatSz, Sz) -> + FlatSz = erts_debug:flat_size(Term), + Sz = erts_debug:size(Term). + flat_size_big(Config) when is_list(Config) -> %% Build a term whose external size only fits in a big num (on 32-bit CPU). flat_size_big_1(16#11111111111111117777777777777777888889999, 0, 16#FFFFFFF). @@ -96,3 +138,6 @@ instructions(Config) when is_list(Config) -> ?line Is = erts_debug:instructions(), ?line _ = [list_to_atom(I) || I <- Is], ok. + +id(I) -> + I. diff --git a/erts/etc/common/escript.c b/erts/etc/common/escript.c index 6ed79c91e3..e70c95b67c 100644 --- a/erts/etc/common/escript.c +++ b/erts/etc/common/escript.c @@ -377,7 +377,8 @@ main(int argc, char** argv) last_opt = argv; #ifdef __WIN32__ - if (_stricmp(basename, "escript.exe") == 0) { + if ( (_stricmp(basename, "escript.exe") == 0) + ||(_stricmp(basename, "escript") == 0)) { #else if (strcmp(basename, "escript") == 0) { #endif diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam Binary files differindex f382236af7..b2f3ab6c5b 100644 --- a/erts/preloaded/ebin/prim_inet.beam +++ b/erts/preloaded/ebin/prim_inet.beam diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam Binary files differindex bf88a51502..7a1f896d36 100644 --- a/erts/preloaded/ebin/zlib.beam +++ b/erts/preloaded/ebin/zlib.beam diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl index 0cedd284db..14bf3b6c69 100644 --- a/erts/preloaded/src/prim_inet.erl +++ b/erts/preloaded/src/prim_inet.erl @@ -83,8 +83,10 @@ open(Protocol, Family, Type, Req, Data) -> end catch %% The only (?) way to get here is to try to open - %% the sctp driver when it does not exist - error:badarg -> {error,eprotonosupport} + %% the sctp driver when it does not exist (badarg) + error:badarg -> {error, eprotonosupport}; + %% system_limit if out of port slots + error:system_limit -> {error, system_limit} end. enc_family(inet) -> ?INET_AF_INET; diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl index 210532edac..71b730016d 100644 --- a/erts/preloaded/src/zlib.erl +++ b/erts/preloaded/src/zlib.erl @@ -349,10 +349,14 @@ getQSize(Z) -> Compressed :: binary(). compress(Data) -> Z = open(), - deflateInit(Z, default), - Bs = deflate(Z, Data, finish), - deflateEnd(Z), - close(Z), + Bs = try + deflateInit(Z, default), + B = deflate(Z, Data, finish), + deflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs). -spec uncompress(Data) -> Decompressed when @@ -364,10 +368,14 @@ uncompress(Data) -> if Size >= 8 -> Z = open(), - inflateInit(Z), - Bs = inflate(Z, Data), - inflateEnd(Z), - close(Z), + Bs = try + inflateInit(Z), + B = inflate(Z, Data), + inflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs); true -> erlang:error(data_error) @@ -383,10 +391,14 @@ uncompress(Data) -> Compressed :: binary(). zip(Data) -> Z = open(), - deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default), - Bs = deflate(Z, Data, finish), - deflateEnd(Z), - close(Z), + Bs = try + deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default), + B = deflate(Z, Data, finish), + deflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs). -spec unzip(Data) -> Decompressed when @@ -394,10 +406,14 @@ zip(Data) -> Decompressed :: binary(). unzip(Data) -> Z = open(), - inflateInit(Z, -?MAX_WBITS), - Bs = inflate(Z, Data), - inflateEnd(Z), - close(Z), + Bs = try + inflateInit(Z, -?MAX_WBITS), + B = inflate(Z, Data), + inflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs). -spec gzip(Data) -> Compressed when @@ -405,10 +421,14 @@ unzip(Data) -> Compressed :: binary(). gzip(Data) -> Z = open(), - deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default), - Bs = deflate(Z, Data, finish), - deflateEnd(Z), - close(Z), + Bs = try + deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default), + B = deflate(Z, Data, finish), + deflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs). -spec gunzip(Data) -> Decompressed when @@ -416,10 +436,14 @@ gzip(Data) -> Decompressed :: binary(). gunzip(Data) -> Z = open(), - inflateInit(Z, 16+?MAX_WBITS), - Bs = inflate(Z, Data), - inflateEnd(Z), - close(Z), + Bs = try + inflateInit(Z, 16+?MAX_WBITS), + B = inflate(Z, Data), + inflateEnd(Z), + B + after + close(Z) + end, iolist_to_binary(Bs). -spec collect(zstream()) -> iolist(). diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl index 0a77527b2f..63a8adbc63 100644 --- a/lib/common_test/src/ct.erl +++ b/lib/common_test/src/ct.erl @@ -64,7 +64,7 @@ print/1, print/2, print/3, pal/1, pal/2, pal/3, capture_start/0, capture_stop/0, capture_get/0, capture_get/1, - fail/1, fail/2, comment/1, comment/2, + fail/1, fail/2, comment/1, comment/2, make_priv_dir/0, testcases/2, userdata/2, userdata/3, timetrap/1, get_timetrap_info/0, sleep/1]). @@ -673,6 +673,15 @@ send_html_comment(Comment) -> ct_util:set_testdata({comment,Html}), test_server:comment(Html). +%%%----------------------------------------------------------------- +%%% @spec make_priv_dir() -> ok | {error,Reason} +%%% Reason = term() +%%% @doc If the test has been started with the create_priv_dir +%%% option set to manual_per_tc, in order for the test case to use +%%% the private directory, it must first create it by calling +%%% this function. +make_priv_dir() -> + test_server:make_priv_dir(). %%%----------------------------------------------------------------- %%% @spec get_target_name(Handle) -> {ok,TargetName} | {error,Reason} diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl index cdd8a6a596..187794e78b 100644 --- a/lib/common_test/src/ct_framework.erl +++ b/lib/common_test/src/ct_framework.erl @@ -29,7 +29,8 @@ -export([get_logopts/0, format_comment/1, get_html_wrapper/3]). --export([error_in_suite/1, ct_init_per_group/2, ct_end_per_group/2]). +-export([error_in_suite/1, init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2]). -export([make_all_conf/3, make_conf/5]). @@ -54,6 +55,7 @@ init_tc(Mod,Func,Config) -> %% in case Mod == ct_framework, lookup the suite name Suite = get_suite_name(Mod, Config), + %% check if previous testcase was interpreted and has left %% a "dead" trace window behind - if so, kill it case ct_util:get_testdata(interpret) of @@ -63,27 +65,17 @@ init_tc(Mod,Func,Config) -> _ -> ok end, - %% check if we need to add defaults explicitly because - %% there's no init_per_suite exported from Mod - {InitFailed,DoInit} = - case ct_util:get_testdata(curr_tc) of - {Suite,{suite0_failed,_}=Failure} -> - {Failure,false}; - {?MODULE,_} -> % should not really happen - {false,false}; - {Suite,_} -> % Func is not 1st case in suite - {false,false}; - _ when Func == init_per_suite -> % defaults will be added anyway - {false,false}; - _ -> % first case in suite - {false,true} - end, - case InitFailed of - false -> + + case ct_util:get_testdata(curr_tc) of + {Suite,{suite0_failed,{require,Reason}}} -> + {skip,{require_failed_in_suite0,Reason}}; + {Suite,{suite0_failed,_}=Failure} -> + {skip,Failure}; + _ -> ct_util:set_testdata({curr_tc,{Suite,Func}}), case ct_util:read_suite_data({seq,Suite,Func}) of undefined -> - init_tc1(Mod,Func,Config,DoInit); + init_tc1(Mod,Suite,Func,Config); Seq when is_atom(Seq) -> case ct_util:read_suite_data({seq,Suite,Seq}) of [Func|TCs] -> % this is the 1st case in Seq @@ -96,17 +88,13 @@ init_tc(Mod,Func,Config) -> _ -> ok end, - init_tc1(Mod,Func,Config,DoInit); + init_tc1(Mod,Suite,Func,Config); {failed,Seq,BadFunc} -> {skip,{sequence_failed,Seq,BadFunc}} - end; - {_,{require,Reason}} -> - {skip,{require_failed_in_suite0,Reason}}; - _ -> - {skip,InitFailed} + end end. -init_tc1(?MODULE,error_in_suite,[Config0],_) when is_list(Config0) -> +init_tc1(?MODULE,_,error_in_suite,[Config0]) when is_list(Config0) -> ct_logs:init_tc(false), ct_event:notify(#event{name=tc_start, node=node(), @@ -117,14 +105,16 @@ init_tc1(?MODULE,error_in_suite,[Config0],_) when is_list(Config0) -> Reason -> {skip,Reason} end; -init_tc1(Mod,Func,[Config0],DoInit) when is_list(Config0) -> + +init_tc1(Mod,Suite,Func,[Config0]) when is_list(Config0) -> Config1 = case ct_util:read_suite_data(last_saved_config) of - {{Mod,LastFunc},SavedConfig} -> % last testcase + {{Suite,LastFunc},SavedConfig} -> % last testcase [{saved_config,{LastFunc,SavedConfig}} | lists:keydelete(saved_config,1,Config0)]; - {{LastSuite,InitOrEnd},SavedConfig} when InitOrEnd == init_per_suite ; - InitOrEnd == end_per_suite -> + {{LastSuite,InitOrEnd}, + SavedConfig} when InitOrEnd == init_per_suite ; + InitOrEnd == end_per_suite -> %% last suite [{saved_config,{LastSuite,SavedConfig}} | lists:keydelete(saved_config,1,Config0)]; @@ -133,13 +123,14 @@ init_tc1(Mod,Func,[Config0],DoInit) when is_list(Config0) -> end, ct_util:delete_suite_data(last_saved_config), Config = lists:keydelete(watchdog,1,Config1), - if Func /= init_per_suite, DoInit /= true -> - ok; - true -> + + if Func == init_per_suite -> %% delete all default values used in previous suite ct_config:delete_default_config(suite), %% release all name -> key bindings (once per suite) - ct_config:release_allocated() + ct_config:release_allocated(); + Func /= init_per_suite -> + ok end, GroupPath = ?val(tc_group_path, Config, []), @@ -154,9 +145,7 @@ init_tc1(Mod,Func,[Config0],DoInit) when is_list(Config0) -> true -> ct_config:delete_default_config(testcase) end, - %% in case Mod == ct_framework, lookup the suite name - Suite = get_suite_name(Mod, Config), - case add_defaults(Mod,Func,AllGroups,DoInit) of + case add_defaults(Mod,Func,AllGroups) of Error = {suite0_failed,_} -> ct_logs:init_tc(false), ct_event:notify(#event{name=tc_start, @@ -166,35 +155,25 @@ init_tc1(Mod,Func,[Config0],DoInit) when is_list(Config0) -> {error,Error}; {SuiteInfo,MergeResult} -> case MergeResult of - {error,Reason} when DoInit == false -> + {error,Reason} -> ct_logs:init_tc(false), ct_event:notify(#event{name=tc_start, node=node(), data={Mod,FuncSpec}}), {skip,Reason}; _ -> - init_tc2(Mod,Func,SuiteInfo,MergeResult, - Config,DoInit) + init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config) end end; -init_tc1(_Mod,_Func,Args,_DoInit) -> - {ok,Args}. -init_tc2(Mod,Func,SuiteInfo,MergeResult,Config,DoInit) -> - %% if first testcase fails when there's no init_per_suite - %% we must do suite/0 configurations before skipping test - MergedInfo = - case MergeResult of - {error,_} when DoInit == true -> - SuiteInfo; - _ -> - MergeResult - end, +init_tc1(_Mod,_Suite,_Func,Args) -> + {ok,Args}. +init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config) -> %% timetrap must be handled before require - MergedInfo1 = timetrap_first(MergedInfo, [], []), + MergedInfo = timetrap_first(MergeResult, [], []), %% tell logger to use specified style sheet - case lists:keysearch(stylesheet,1,MergedInfo++Config) of + case lists:keysearch(stylesheet,1,MergeResult++Config) of {value,{stylesheet,SSFile}} -> ct_logs:set_stylesheet(Func,add_data_dir(SSFile,Config)); _ -> @@ -209,7 +188,7 @@ init_tc2(Mod,Func,SuiteInfo,MergeResult,Config,DoInit) -> %% list of {Type,Bool} tuples, e.g. {telnet,true}), case ct_util:get_overridden_silenced_connections() of undefined -> - case lists:keysearch(silent_connections,1,MergedInfo++Config) of + case lists:keysearch(silent_connections,1,MergeResult++Config) of {value,{silent_connections,Conns}} -> ct_util:silence_connections(Conns); _ -> @@ -218,18 +197,14 @@ init_tc2(Mod,Func,SuiteInfo,MergeResult,Config,DoInit) -> Conns -> ct_util:silence_connections(Conns) end, - if Func /= init_per_suite, DoInit /= true -> - ct_logs:init_tc(false); - true -> - ct_logs:init_tc(true) - end, + ct_logs:init_tc(Func == init_per_suite), FuncSpec = group_or_func(Func,Config), ct_event:notify(#event{name=tc_start, node=node(), data={Mod,FuncSpec}}), - case catch configure(MergedInfo1,MergedInfo1,SuiteInfo, - {FuncSpec,DoInit},Config) of + case catch configure(MergedInfo,MergedInfo,SuiteInfo, + FuncSpec,Config) of {suite0_failed,Reason} -> ct_util:set_testdata({curr_tc,{Mod,{suite0_failed,{require,Reason}}}}), {skip,{require_failed_in_suite0,Reason}}; @@ -246,7 +221,7 @@ init_tc2(Mod,Func,SuiteInfo,MergeResult,Config,DoInit) -> _ -> case get('$test_server_framework_test') of undefined -> - ct_suite_init(Mod, FuncSpec, FinalConfig); + ct_suite_init(Suite, FuncSpec, FinalConfig); Fun -> case Fun(init_tc, FinalConfig) of NewConfig when is_list(NewConfig) -> @@ -258,21 +233,21 @@ init_tc2(Mod,Func,SuiteInfo,MergeResult,Config,DoInit) -> end end. -ct_suite_init(Mod, Func, [Config]) when is_list(Config) -> - case ct_hooks:init_tc( Mod, Func, Config) of +ct_suite_init(Suite, Func, [Config]) when is_list(Config) -> + case ct_hooks:init_tc(Suite, Func, Config) of NewConfig when is_list(NewConfig) -> {ok, [NewConfig]}; Else -> Else end. -add_defaults(Mod,Func, GroupPath, DoInit) -> +add_defaults(Mod,Func, GroupPath) -> Suite = get_suite_name(Mod, GroupPath), case (catch Suite:suite()) of {'EXIT',{undef,_}} -> SuiteInfo = merge_with_suite_defaults(Suite,[]), SuiteInfoNoCTH = [I || I <- SuiteInfo, element(1,I) =/= ct_hooks], - case add_defaults1(Mod,Func, GroupPath, SuiteInfoNoCTH, DoInit) of + case add_defaults1(Mod,Func, GroupPath, SuiteInfoNoCTH) of Error = {error,_} -> {SuiteInfo,Error}; MergedInfo -> {SuiteInfo,MergedInfo} end; @@ -292,7 +267,7 @@ add_defaults(Mod,Func, GroupPath, DoInit) -> SuiteInfoNoCTH = [I || I <- SuiteInfo1, element(1,I) =/= ct_hooks], case add_defaults1(Mod,Func, GroupPath, - SuiteInfoNoCTH, DoInit) of + SuiteInfoNoCTH) of Error = {error,_} -> {SuiteInfo1,Error}; MergedInfo -> {SuiteInfo1,MergedInfo} end; @@ -313,7 +288,7 @@ add_defaults(Mod,Func, GroupPath, DoInit) -> {suite0_failed,bad_return_value} end. -add_defaults1(Mod,Func, GroupPath, SuiteInfo, DoInit) -> +add_defaults1(Mod,Func, GroupPath, SuiteInfo) -> Suite = get_suite_name(Mod, GroupPath), %% GroupPathInfo (for subgroup on level X) = %% [LevelXGroupInfo, LevelX-1GroupInfo, ..., TopLevelGroupInfo] @@ -325,8 +300,7 @@ add_defaults1(Mod,Func, GroupPath, SuiteInfo, DoInit) -> _ -> [] end end, GroupPath), - Args = if Func == init_per_group; Func == ct_init_per_group; - Func == end_per_group; Func == ct_end_per_group -> + Args = if Func == init_per_group ; Func == end_per_group -> [?val(name, hd(GroupPath))]; true -> [] @@ -347,7 +321,7 @@ add_defaults1(Mod,Func, GroupPath, SuiteInfo, DoInit) -> (default_config == element(1,SDDef)))], case check_for_clashes(TestCaseInfo, GroupPathInfo, SuiteReqs) of [] -> - add_defaults2(Mod,Func, TCAndGroupInfo,SuiteInfo,SuiteReqs, DoInit); + add_defaults2(Mod,Func, TCAndGroupInfo,SuiteInfo,SuiteReqs); Clashes -> {error,{config_name_already_in_use,Clashes}} end. @@ -421,34 +395,25 @@ keysmember([Key,Pos|Next], List) -> keysmember([], _) -> true. -add_defaults2(Mod,init_per_suite, IPSInfo, SuiteInfo,SuiteReqs, false) -> - add_defaults2(Mod,init_per_suite, IPSInfo, SuiteInfo,SuiteReqs, true); +add_defaults2(_Mod,init_per_suite, IPSInfo, SuiteInfo,SuiteReqs) -> + Info = lists:flatten([IPSInfo, SuiteReqs]), + lists:flatten([Info,remove_info_in_prev(Info, [SuiteInfo])]); -add_defaults2(_Mod,IPG, IPGAndGroupInfo, SuiteInfo,SuiteReqs, DoInit) when - IPG == init_per_group ; IPG == ct_init_per_group -> - %% If DoInit == true, we have to process the suite() list, otherwise - %% it has already been handled (see clause for init_per_suite) - case DoInit of - true -> - %% note: we know for sure this is a top level group - Info = lists:flatten([IPGAndGroupInfo, SuiteReqs]), - Info ++ remove_info_in_prev(Info, [SuiteInfo]); - false -> - SuiteInfo1 = - remove_info_in_prev(lists:flatten([IPGAndGroupInfo, - SuiteReqs]), [SuiteInfo]), - %% don't require terms in prev groups (already processed) - case IPGAndGroupInfo of - [IPGInfo] -> - lists:flatten([IPGInfo,SuiteInfo1]); - [IPGInfo | [CurrGroupInfo | PrevGroupInfo]] -> - PrevGroupInfo1 = delete_require_terms(PrevGroupInfo), - lists:flatten([IPGInfo,CurrGroupInfo,PrevGroupInfo1, - SuiteInfo1]) - end +add_defaults2(_Mod,init_per_group, IPGAndGroupInfo, SuiteInfo,SuiteReqs) -> + SuiteInfo1 = + remove_info_in_prev(lists:flatten([IPGAndGroupInfo, + SuiteReqs]), [SuiteInfo]), + %% don't require terms in prev groups (already processed) + case IPGAndGroupInfo of + [IPGInfo] -> + lists:flatten([IPGInfo,SuiteInfo1]); + [IPGInfo | [CurrGroupInfo | PrevGroupInfo]] -> + PrevGroupInfo1 = delete_require_terms(PrevGroupInfo), + lists:flatten([IPGInfo,CurrGroupInfo,PrevGroupInfo1, + SuiteInfo1]) end; -add_defaults2(_Mod,_Func, TCAndGroupInfo, SuiteInfo,SuiteReqs, false) -> +add_defaults2(_Mod,_Func, TCAndGroupInfo, SuiteInfo,SuiteReqs) -> %% Include require elements from test case info and current group, %% but not from previous groups or suite/0 (since we've already required %% those vars). Let test case info elements override group and suite @@ -463,14 +428,7 @@ add_defaults2(_Mod,_Func, TCAndGroupInfo, SuiteInfo,SuiteReqs, false) -> PrevGroupInfo1 = delete_require_terms(PrevGroupInfo), lists:flatten([TCInfo,CurrGroupInfo,PrevGroupInfo1, SuiteInfo1]) - end; - -add_defaults2(_Mod,_Func, TCInfo, SuiteInfo,SuiteReqs, true) -> - %% Here we have to process the suite info list also (no call to - %% init_per_suite before this first test case). This TC can't belong - %% to a group, or the clause for (ct_)init_per_group would've caught this. - Info = lists:flatten([TCInfo, SuiteReqs]), - lists:flatten([Info,remove_info_in_prev(Info, [SuiteInfo])]). + end. delete_require_terms([Info | Prev]) -> Info1 = [T || T <- Info, @@ -558,18 +516,9 @@ configure([],_,_,_,Config) -> %% function and be scoped 'group', or come from the testcase %% info function and then be scoped 'testcase' -required_default(Name,Key,Info,SuiteInfo,{FuncSpec,true}) -> - case try_set_default(Name,Key,SuiteInfo,suite) of - ok -> - ok; - _ -> - required_default(Name,Key,Info,[],{FuncSpec,false}) - end; -required_default(Name,Key,Info,_,{init_per_suite,_}) -> +required_default(Name,Key,Info,_,init_per_suite) -> try_set_default(Name,Key,Info,suite); -required_default(Name,Key,Info,_,{{init_per_group,GrName,_},_}) -> - try_set_default(Name,Key,Info,{group,GrName}); -required_default(Name,Key,Info,_,{{ct_init_per_group,GrName,_},_}) -> +required_default(Name,Key,Info,_,{init_per_group,GrName,_}) -> try_set_default(Name,Key,Info,{group,GrName}); required_default(Name,Key,Info,_,_FuncSpec) -> try_set_default(Name,Key,Info,testcase). @@ -621,6 +570,9 @@ end_tc(Mod,Func,{Result,[Args]}, Return) -> end_tc(Mod,Func,self(),Result,Args,Return). end_tc(Mod,Func,TCPid,Result,Args,Return) -> + %% in case Mod == ct_framework, lookup the suite name + Suite = get_suite_name(Mod, Args), + case lists:keysearch(watchdog,1,Args) of {value,{watchdog,Dog}} -> test_server:timetrap_cancel(Dog); false -> ok @@ -636,60 +588,62 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) -> end, ct_util:delete_testdata(comment), ct_util:delete_suite_data(last_saved_config), - FuncSpec = - case group_or_func(Func,Args) of - {_,GroupName,_Props} = Group -> - if Func == end_per_group; Func == ct_end_per_group -> - ct_config:delete_default_config({group,GroupName}); - true -> ok - end, - case lists:keysearch(save_config,1,Args) of - {value,{save_config,SaveConfig}} -> - ct_util:save_suite_data( - last_saved_config, - {Mod,{group,GroupName}}, - SaveConfig), - Group; - false -> - Group - end; - _ -> - case lists:keysearch(save_config,1,Args) of - {value,{save_config,SaveConfig}} -> - ct_util:save_suite_data(last_saved_config, - {Mod,Func},SaveConfig), - Func; - false -> - Func - end - end, - ct_util:reset_silent_connections(), + + FuncSpec = case group_or_func(Func,Args) of + {_,_GroupName,_} = Group -> Group; + _ -> Func + end, case get('$test_server_framework_test') of undefined -> {FinalResult,FinalNotify} = case ct_hooks:end_tc( - Mod, FuncSpec, Args, Result, Return) of + Suite, FuncSpec, Args, Result, Return) of '$ct_no_change' -> {ok,Result}; FinalResult1 -> {FinalResult1,FinalResult1} end, - % send sync notification so that event handlers may print - % in the log file before it gets closed + %% send sync notification so that event handlers may print + %% in the log file before it gets closed ct_event:sync_notify(#event{name=tc_done, node=node(), data={Mod,FuncSpec, tag_cth(FinalNotify)}}); Fun -> - % send sync notification so that event handlers may print - % in the log file before it gets closed + %% send sync notification so that event handlers may print + %% in the log file before it gets closed ct_event:sync_notify(#event{name=tc_done, node=node(), data={Mod,FuncSpec,tag(Result)}}), FinalResult = Fun(end_tc, Return) + end, + + case FuncSpec of + {_,GroupName,_Props} -> + if Func == end_per_group -> + ct_config:delete_default_config({group,GroupName}); + true -> ok + end, + case lists:keysearch(save_config,1,Args) of + {value,{save_config,SaveConfig}} -> + ct_util:save_suite_data(last_saved_config, + {Suite,{group,GroupName}}, + SaveConfig); + false -> + ok + end; + _ -> + case lists:keysearch(save_config,1,Args) of + {value,{save_config,SaveConfig}} -> + ct_util:save_suite_data(last_saved_config, + {Suite,Func},SaveConfig); + false -> + ok + end end, + ct_util:reset_silent_connections(), case FinalResult of {skip,{sequence_failed,_,_}} -> @@ -706,7 +660,7 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) -> end, case Func of end_per_suite -> - ct_util:match_delete_suite_data({seq,Mod,'_'}); + ct_util:match_delete_suite_data({seq,Suite,'_'}); _ -> ok end, @@ -864,9 +818,7 @@ mark_as_failed1(_,_,_,[]) -> ok. group_or_func(Func, Config) when Func == init_per_group; - Func == end_per_group; - Func == ct_init_per_group; - Func == ct_end_per_group -> + Func == end_per_group -> case ?val(tc_group_properties, Config) of undefined -> {Func,unknown,[]}; @@ -1209,8 +1161,8 @@ make_conf(Mod, Name, Props, TestSpec) -> "end_per_group/2 missing for group " "~p in ~p, using default.", [Name,Mod]), - {{?MODULE,ct_init_per_group}, - {?MODULE,ct_end_per_group}, + {{?MODULE,init_per_group}, + {?MODULE,end_per_group}, [{suite,Mod}]} end, {conf,[{name,Name}|Props++ExtraProps],InitConf,TestSpec,EndConf}. @@ -1470,22 +1422,31 @@ error_in_suite(Config) -> Reason = test_server:lookup_config(error,Config), exit(Reason). +%% if init_per_suite and end_per_suite are missing in the suite, +%% these will be called instead (without any trace of them in the +%% log files), only so that it's possible to call hook functions +%% for configuration +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + %% if the group config functions are missing in the suite, %% use these instead -ct_init_per_group(GroupName, Config) -> +init_per_group(GroupName, Config) -> ct:comment(io_lib:format("start of ~p", [GroupName])), ct_logs:log("TEST INFO", "init_per_group/2 for ~w missing " "in suite, using default.", [GroupName]), Config. -ct_end_per_group(GroupName, _) -> +end_per_group(GroupName, _) -> ct:comment(io_lib:format("end of ~p", [GroupName])), ct_logs:log("TEST INFO", "end_per_group/2 for ~w missing " "in suite, using default.", [GroupName]), ok. - %%%----------------------------------------------------------------- %%% @spec report(What,Data) -> ok @@ -1562,10 +1523,6 @@ report(What,Data) -> ok; {end_per_group,_} -> ok; - {ct_init_per_group,_} -> - ok; - {ct_end_per_group,_} -> - ok; {_,ok} -> add_to_stats(ok); {_,{skipped,{failed,{_,init_per_testcase,_}}}} -> @@ -1602,8 +1559,7 @@ report(What,Data) -> data=Data}), ct_hooks:on_tc_skip(What, Data), if Case /= end_per_suite, - Case /= end_per_group, - Case /= ct_end_per_group -> + Case /= end_per_group -> add_to_stats(auto_skipped); true -> ok diff --git a/lib/common_test/src/ct_hooks.erl b/lib/common_test/src/ct_hooks.erl index c42adbbdd9..2a23cd992b 100644 --- a/lib/common_test/src/ct_hooks.erl +++ b/lib/common_test/src/ct_hooks.erl @@ -70,8 +70,7 @@ terminate(Hooks) -> {skip, Reason :: term()} | {auto_skip, Reason :: term()} | {fail, Reason :: term()}. -init_tc(ct_framework, _Func, Args) -> - Args; + init_tc(Mod, init_per_suite, Config) -> Info = try proplists:get_value(ct_hooks, Mod:suite(),[]) of List when is_list(List) -> @@ -104,27 +103,21 @@ init_tc(_Mod, TC, Config) -> {auto_skip, Reason :: term()} | {fail, Reason :: term()} | ok | '$ct_no_change'. -end_tc(ct_framework, _Func, _Args, Result, _Return) -> - Result; end_tc(Mod, init_per_suite, Config, _Result, Return) -> call(fun call_generic/3, Return, [post_init_per_suite, Mod, Config], '$ct_no_change'); - end_tc(Mod, end_per_suite, Config, Result, _Return) -> call(fun call_generic/3, Result, [post_end_per_suite, Mod, Config], '$ct_no_change'); - end_tc(_Mod, {init_per_group, GroupName, _}, Config, _Result, Return) -> call(fun call_generic/3, Return, [post_init_per_group, GroupName, Config], '$ct_no_change'); - end_tc(Mod, {end_per_group, GroupName, Opts}, Config, Result, _Return) -> Res = call(fun call_generic/3, Result, [post_end_per_group, GroupName, Config], '$ct_no_change'), maybe_stop_locker(Mod, GroupName,Opts), Res; - end_tc(_Mod, TC, Config, Result, _Return) -> call(fun call_generic/3, Result, [post_end_per_testcase, TC, Config], '$ct_no_change'). diff --git a/lib/common_test/src/ct_logs.erl b/lib/common_test/src/ct_logs.erl index b2f669fefe..0cd9b5f7cb 100644 --- a/lib/common_test/src/ct_logs.erl +++ b/lib/common_test/src/ct_logs.erl @@ -920,33 +920,48 @@ insert_dir(D,[D1|Ds]) -> insert_dir(D,[]) -> [D]. -make_last_run_index([Name|Rest], Result, TotSucc, TotFail, UserSkip, AutoSkip, - TotNotBuilt, Missing) -> - case last_test(Name) of +make_last_run_index([Name|Rest], Result, TotSucc, TotFail, + UserSkip, AutoSkip, TotNotBuilt, Missing) -> + case get_run_dirs(Name) of false -> %% Silently skip. - make_last_run_index(Rest, Result, TotSucc, TotFail, UserSkip, AutoSkip, - TotNotBuilt, Missing); - LastLogDir -> + make_last_run_index(Rest, Result, TotSucc, TotFail, + UserSkip, AutoSkip, TotNotBuilt, Missing); + LogDirs -> SuiteName = filename:rootname(filename:basename(Name)), - case make_one_index_entry(SuiteName, LastLogDir, "-", false, Missing) of - {Result1,Succ,Fail,USkip,ASkip,NotBuilt} -> - %% for backwards compatibility - AutoSkip1 = case catch AutoSkip+ASkip of - {'EXIT',_} -> undefined; - Res -> Res - end, - make_last_run_index(Rest, [Result|Result1], TotSucc+Succ, - TotFail+Fail, UserSkip+USkip, AutoSkip1, - TotNotBuilt+NotBuilt, Missing); - error -> - make_last_run_index(Rest, Result, TotSucc, TotFail, UserSkip, AutoSkip, - TotNotBuilt, Missing) - end + {Result1,TotSucc1,TotFail1,UserSkip1,AutoSkip1,TotNotBuilt1} = + make_last_run_index1(SuiteName, LogDirs, Result, + TotSucc, TotFail, + UserSkip, AutoSkip, + TotNotBuilt, Missing), + make_last_run_index(Rest, Result1, TotSucc1, TotFail1, + UserSkip1, AutoSkip1, + TotNotBuilt1, Missing) end; + make_last_run_index([], Result, TotSucc, TotFail, UserSkip, AutoSkip, TotNotBuilt, _) -> {ok, [Result|total_row(TotSucc, TotFail, UserSkip, AutoSkip, TotNotBuilt, false)], {TotSucc,TotFail,UserSkip,AutoSkip,TotNotBuilt}}. + +make_last_run_index1(SuiteName, [LogDir | LogDirs], Result, TotSucc, TotFail, + UserSkip, AutoSkip, TotNotBuilt, Missing) -> + case make_one_index_entry(SuiteName, LogDir, "-", false, Missing) of + {Result1,Succ,Fail,USkip,ASkip,NotBuilt} -> + %% for backwards compatibility + AutoSkip1 = case catch AutoSkip+ASkip of + {'EXIT',_} -> undefined; + Res -> Res + end, + make_last_run_index1(SuiteName, LogDirs, [Result|Result1], TotSucc+Succ, + TotFail+Fail, UserSkip+USkip, AutoSkip1, + TotNotBuilt+NotBuilt, Missing); + error -> + make_last_run_index1(SuiteName, LogDirs, Result, TotSucc, TotFail, + UserSkip, AutoSkip, TotNotBuilt, Missing) + end; +make_last_run_index1(_, [], Result, TotSucc, TotFail, + UserSkip, AutoSkip, TotNotBuilt, _) -> + {Result,TotSucc,TotFail,UserSkip,AutoSkip,TotNotBuilt}. make_one_index_entry(SuiteName, LogDir, Label, All, Missing) -> case count_cases(LogDir) of @@ -1698,8 +1713,8 @@ make_all_suites_index(NewTestData = {_TestName,DirName}) -> sort_logdirs([Dir|Dirs],Groups) -> TestName = filename:rootname(filename:basename(Dir)), case filelib:wildcard(filename:join(Dir,"run.*")) of - [RunDir] -> - Groups1 = insert_test(TestName,{filename:basename(RunDir),RunDir},Groups), + RunDirs = [_|_] -> + Groups1 = sort_logdirs1(TestName,RunDirs,Groups), sort_logdirs(Dirs,Groups1); _ -> % ignore missing run directory sort_logdirs(Dirs,Groups) @@ -1707,6 +1722,12 @@ sort_logdirs([Dir|Dirs],Groups) -> sort_logdirs([],Groups) -> lists:keysort(1,sort_each_group(Groups)). +sort_logdirs1(TestName,[RunDir|RunDirs],Groups) -> + Groups1 = insert_test(TestName,{filename:basename(RunDir),RunDir},Groups), + sort_logdirs1(TestName,RunDirs,Groups1); +sort_logdirs1(_,[],Groups) -> + Groups. + insert_test(Test,IxDir,[{Test,IxDirs}|Groups]) -> [{Test,[IxDir|IxDirs]}|Groups]; insert_test(Test,IxDir,[]) -> @@ -1998,21 +2019,17 @@ notify_and_unlock_file(File) -> end. %%%----------------------------------------------------------------- -%%% @spec last_test(Dir) -> string() | false +%%% @spec get_run_dirs(Dir) -> [string()] | false %%% %%% @doc %%% -last_test(Dir) -> - last_test(filelib:wildcard(filename:join(Dir, "run.[1-2]*")), false). - -last_test([Run|Rest], false) -> - last_test(Rest, Run); -last_test([Run|Rest], Latest) when Run > Latest -> - last_test(Rest, Run); -last_test([_|Rest], Latest) -> - last_test(Rest, Latest); -last_test([], Latest) -> - Latest. +get_run_dirs(Dir) -> + case filelib:wildcard(filename:join(Dir, "run.[1-2]*")) of + [] -> + false; + RunDirs -> + lists:sort(RunDirs) + end. %%%----------------------------------------------------------------- %%% @spec xhtml(HTML, XHTML) -> HTML | XHTML diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl index 45436392d8..72124f6f21 100644 --- a/lib/common_test/src/ct_run.erl +++ b/lib/common_test/src/ct_run.erl @@ -63,6 +63,7 @@ stylesheet, multiply_timetraps = 1, scale_timetraps = false, + create_priv_dir, testspecs = [], tests}). @@ -178,6 +179,10 @@ script_start1(Parent, Args) -> fun([CT]) -> list_to_atom(CT); ([]) -> true end, false, Args), + CreatePrivDir = get_start_opt(create_priv_dir, + fun([PD]) -> list_to_atom(PD); + ([]) -> auto_per_tc + end, Args), EvHandlers = event_handler_args2opts(Args), CTHooks = ct_hooks_args2opts(Args), EnableBuiltinHooks = get_start_opt(enable_builtin_hooks, @@ -255,7 +260,8 @@ script_start1(Parent, Args) -> silent_connections = SilentConns, stylesheet = Stylesheet, multiply_timetraps = MultTT, - scale_timetraps = ScaleTT}, + scale_timetraps = ScaleTT, + create_priv_dir = CreatePrivDir}, %% check if log files should be refreshed or go on to run tests... Result = run_or_refresh(StartOpts, Args), @@ -322,12 +328,21 @@ script_start2(StartOpts = #opts{vts = undefined, Cover = choose_val(StartOpts#opts.cover, SpecStartOpts#opts.cover), - MultTT = choose_val(StartOpts#opts.multiply_timetraps, - SpecStartOpts#opts.multiply_timetraps), - ScaleTT = choose_val(StartOpts#opts.scale_timetraps, - SpecStartOpts#opts.scale_timetraps), - AllEvHs = merge_vals([StartOpts#opts.event_handlers, - SpecStartOpts#opts.event_handlers]), + MultTT = + choose_val(StartOpts#opts.multiply_timetraps, + SpecStartOpts#opts.multiply_timetraps), + ScaleTT = + choose_val(StartOpts#opts.scale_timetraps, + SpecStartOpts#opts.scale_timetraps), + + CreatePrivDir = + choose_val(StartOpts#opts.create_priv_dir, + SpecStartOpts#opts.create_priv_dir), + + AllEvHs = + merge_vals([StartOpts#opts.event_handlers, + SpecStartOpts#opts.event_handlers]), + AllCTHooks = merge_vals( [StartOpts#opts.ct_hooks, SpecStartOpts#opts.ct_hooks]), @@ -354,7 +369,8 @@ script_start2(StartOpts = #opts{vts = undefined, EnableBuiltinHooks, include = AllInclude, multiply_timetraps = MultTT, - scale_timetraps = ScaleTT}} + scale_timetraps = ScaleTT, + create_priv_dir = CreatePrivDir}} end; _ -> {undefined,StartOpts} @@ -567,6 +583,7 @@ script_usage() -> "\n\t[-no_auto_compile]" "\n\t[-multiply_timetraps N]" "\n\t[-scale_timetraps]" + "\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]" "\n\t[-basic_html]\n\n"), io:format("Run tests from command line:\n\n" "\tct_run [-dir TestDir1 TestDir2 .. TestDirN] |" @@ -586,6 +603,7 @@ script_usage() -> "\n\t[-no_auto_compile]" "\n\t[-multiply_timetraps N]" "\n\t[-scale_timetraps]" + "\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]" "\n\t[-basic_html]" "\n\t[-repeat N [-force_stop]] |" "\n\t[-duration HHMMSS [-force_stop]] |" @@ -606,6 +624,7 @@ script_usage() -> "\n\t[-no_auto_compile]" "\n\t[-multiply_timetraps N]" "\n\t[-scale_timetraps]" + "\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]" "\n\t[-basic_html]" "\n\t[-repeat N [-force_stop]] |" "\n\t[-duration HHMMSS [-force_stop]] |" @@ -782,6 +801,9 @@ run_test2(StartOpts) -> MultiplyTT = get_start_opt(multiply_timetraps, value, 1, StartOpts), ScaleTT = get_start_opt(scale_timetraps, value, false, StartOpts), + %% create unique priv dir names + CreatePrivDir = get_start_opt(create_priv_dir, value, StartOpts), + %% auto compile & include files Include = case proplists:get_value(auto_compile, StartOpts) of @@ -842,7 +864,8 @@ run_test2(StartOpts) -> silent_connections = SilentConns, stylesheet = Stylesheet, multiply_timetraps = MultiplyTT, - scale_timetraps = ScaleTT}, + scale_timetraps = ScaleTT, + create_priv_dir = CreatePrivDir}, %% test specification case proplists:get_value(spec, StartOpts) of @@ -889,6 +912,8 @@ run_spec_file(Relaxed, SpecOpts#opts.multiply_timetraps), ScaleTT = choose_val(Opts#opts.scale_timetraps, SpecOpts#opts.scale_timetraps), + CreatePrivDir = choose_val(Opts#opts.create_priv_dir, + SpecOpts#opts.create_priv_dir), AllEvHs = merge_vals([Opts#opts.event_handlers, SpecOpts#opts.event_handlers]), AllInclude = merge_vals([Opts#opts.include, @@ -912,6 +937,7 @@ run_spec_file(Relaxed, testspecs = AbsSpecs, multiply_timetraps = MultTT, scale_timetraps = ScaleTT, + create_priv_dir = CreatePrivDir, ct_hooks = AllCTHooks, enable_builtin_hooks = EnableBuiltinHooks }, @@ -1170,7 +1196,8 @@ get_data_for_node(#testspec{label = Labels, enable_builtin_hooks = EnableBuiltinHooks, include = Incl, multiply_timetraps = MTs, - scale_timetraps = STs}, Node) -> + scale_timetraps = STs, + create_priv_dir = PDs}, Node) -> Label = proplists:get_value(Node, Labels), Profile = proplists:get_value(Node, Profiles), LogDir = case proplists:get_value(Node, LogDirs) of @@ -1184,6 +1211,7 @@ get_data_for_node(#testspec{label = Labels, Cover = proplists:get_value(Node, CoverFs), MT = proplists:get_value(Node, MTs), ST = proplists:get_value(Node, STs), + CreatePrivDir = proplists:get_value(Node, PDs), ConfigFiles = [{?ct_config_txt,F} || {N,F} <- Cfgs, N==Node] ++ [CBF || {N,CBF} <- UsrCfgs, N==Node], EvHandlers = [{H,A} || {N,H,A} <- EvHs, N==Node], @@ -1200,7 +1228,8 @@ get_data_for_node(#testspec{label = Labels, enable_builtin_hooks = EnableBuiltinHooks, include = Include, multiply_timetraps = MT, - scale_timetraps = ST}. + scale_timetraps = ST, + create_priv_dir = CreatePrivDir}. refresh_logs(LogDir) -> {ok,Cwd} = file:get_cwd(), @@ -1384,7 +1413,8 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) -> %% which framework it runs under. case os:getenv("TEST_SERVER_FRAMEWORK") of false -> - os:putenv("TEST_SERVER_FRAMEWORK", "ct_framework"); + os:putenv("TEST_SERVER_FRAMEWORK", "ct_framework"), + os:putenv("TEST_SERVER_FRAMEWORK_NAME", "common_test"); "ct_framework" -> ok; Other -> @@ -1848,6 +1878,8 @@ do_run_test(Tests, Skip, Opts) -> test_server_ctrl:multiply_timetraps(Opts#opts.multiply_timetraps), test_server_ctrl:scale_timetraps(Opts#opts.scale_timetraps), + test_server_ctrl:create_priv_dir(choose_val(Opts#opts.create_priv_dir, + auto_per_run)), ct_event:notify(#event{name=start_info, node=node(), data={NoOfTests,NoOfSuites,NoOfCases}}), @@ -2297,7 +2329,7 @@ ct_hooks_args2opts(Args) -> Acc end,[],Args). -ct_hooks_args2opts([CTH,Arg,Prio,"and"| Rest],Acc) -> +ct_hooks_args2opts([CTH,Arg,Prio,"and"| Rest],Acc) when Arg /= "and" -> ct_hooks_args2opts(Rest,[{list_to_atom(CTH), parse_cth_args(Arg), parse_cth_args(Prio)}|Acc]); @@ -2447,6 +2479,10 @@ opts2args(EnvStartOpts) -> [{scale_timetraps,[]}]; ({scale_timetraps,false}) -> []; + ({create_priv_dir,auto_per_run}) -> + []; + ({create_priv_dir,PD}) when is_atom(PD) -> + [{create_priv_dir,[atom_to_list(PD)]}]; ({force_stop,true}) -> [{force_stop,[]}]; ({force_stop,false}) -> diff --git a/lib/common_test/src/ct_testspec.erl b/lib/common_test/src/ct_testspec.erl index b68cbd3aa1..5b197c0c81 100644 --- a/lib/common_test/src/ct_testspec.erl +++ b/lib/common_test/src/ct_testspec.erl @@ -568,6 +568,21 @@ add_tests([{scale_timetraps,Node,ST}|Ts],Spec) -> add_tests([{scale_timetraps,ST}|Ts],Spec) -> add_tests([{scale_timetraps,all_nodes,ST}|Ts],Spec); +%% --- create_priv_dir --- +add_tests([{create_priv_dir,all_nodes,PD}|Ts],Spec) -> + Tests = lists:map(fun(N) -> {create_priv_dir,N,PD} end, list_nodes(Spec)), + add_tests(Tests++Ts,Spec); +add_tests([{create_priv_dir,Nodes,PD}|Ts],Spec) when is_list(Nodes) -> + Ts1 = separate(Nodes,create_priv_dir,[PD],Ts,Spec#testspec.nodes), + add_tests(Ts1,Spec); +add_tests([{create_priv_dir,Node,PD}|Ts],Spec) -> + PDs = Spec#testspec.create_priv_dir, + PDs1 = [{ref2node(Node,Spec#testspec.nodes),PD} | + lists:keydelete(ref2node(Node,Spec#testspec.nodes),1,PDs)], + add_tests(Ts,Spec#testspec{create_priv_dir=PDs1}); +add_tests([{create_priv_dir,PD}|Ts],Spec) -> + add_tests([{create_priv_dir,all_nodes,PD}|Ts],Spec); + %% --- config --- add_tests([{config,all_nodes,Files}|Ts],Spec) -> Tests = lists:map(fun(N) -> {config,N,Files} end, list_nodes(Spec)), @@ -1158,7 +1173,8 @@ valid_terms() -> {skip_groups,6}, {skip_groups,7}, {skip_cases,5}, - {skip_cases,6} + {skip_cases,6}, + {create_priv_dir,2} ]. %% this function "guesses" if the user has misspelled a term name diff --git a/lib/common_test/src/ct_util.hrl b/lib/common_test/src/ct_util.hrl index bde832811a..082599a9c6 100644 --- a/lib/common_test/src/ct_util.hrl +++ b/lib/common_test/src/ct_util.hrl @@ -43,9 +43,10 @@ include=[], multiply_timetraps=[], scale_timetraps=[], + create_priv_dir=[], alias=[], tests=[], - merge_tests = true }). + merge_tests=true}). -record(cover, {app=none, level=details, diff --git a/lib/common_test/src/vts.erl b/lib/common_test/src/vts.erl index cc8a932887..9dfb0bd6b8 100644 --- a/lib/common_test/src/vts.erl +++ b/lib/common_test/src/vts.erl @@ -766,10 +766,6 @@ report1(tc_done,{_Suite,init_per_group,_},State) -> State; report1(tc_done,{_Suite,end_per_group,_},State) -> State; -report1(tc_done,{_Suite,ct_init_per_group,_},State) -> - State; -report1(tc_done,{_Suite,ct_end_per_group,_},State) -> - State; report1(tc_done,{_Suite,_Case,ok},State) -> State#state{ok=State#state.ok+1}; report1(tc_done,{_Suite,_Case,{failed,_Reason}},State) -> diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile index 284612b8f7..332c444145 100644 --- a/lib/common_test/test/Makefile +++ b/lib/common_test/test/Makefile @@ -29,6 +29,7 @@ MODULES= \ ct_test_support_eh \ ct_userconfig_callback \ ct_smoke_test_SUITE \ + ct_priv_dir_SUITE \ ct_event_handler_SUITE \ ct_config_info_SUITE \ ct_groups_test_1_SUITE \ diff --git a/lib/common_test/test/ct_error_SUITE.erl b/lib/common_test/test/ct_error_SUITE.erl index 053edba846..79ed51bc28 100644 --- a/lib/common_test/test/ct_error_SUITE.erl +++ b/lib/common_test/test/ct_error_SUITE.erl @@ -700,7 +700,7 @@ test_events(timetrap_end_conf) -> [ {?eh,start_logging,{'DEF','RUNDIR'}}, {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, - {?eh,start_info,{1,1,8}}, + {?eh,start_info,{1,1,9}}, {?eh,tc_start,{timetrap_1_SUITE,init_per_suite}}, {?eh,tc_done,{timetrap_1_SUITE,init_per_suite,ok}}, {?eh,tc_start,{timetrap_1_SUITE,tc1}}, @@ -735,6 +735,10 @@ test_events(timetrap_end_conf) -> {?eh,tc_done, {timetrap_1_SUITE,tc8,{failed,{timetrap_timeout,1000}}}}, {?eh,test_stats,{0,8,{0,0}}}, + {?eh,tc_start,{timetrap_1_SUITE,tc9}}, + {?eh,tc_done, + {timetrap_1_SUITE,tc9,{failed,{timetrap_timeout,1000}}}}, + {?eh,test_stats,{0,9,{0,0}}}, {?eh,tc_start,{timetrap_1_SUITE,end_per_suite}}, {?eh,tc_done,{timetrap_1_SUITE,end_per_suite,ok}}, {?eh,test_done,{'DEF','STOP_TIME'}}, diff --git a/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_1_SUITE.erl b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_1_SUITE.erl index faa0a7305c..a44ff6d0bc 100644 --- a/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_1_SUITE.erl +++ b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_1_SUITE.erl @@ -145,8 +145,17 @@ end_per_testcase1(tc8, Config) -> ct:pal("end_per_testcase(tc8): ~p", [Config]), tc8 = ?config(tc, Config), {failed,timetrap_timeout} = ?config(tc_status, Config), + ok; + +end_per_testcase1(tc9, Config) -> + ct:pal("end_per_testcase(tc9): ~p", [Config]), + tc9 = ?config(tc, Config), + %% check that it's possible to send and receive synchronously + %% with the group leader process for end_per_testcase + test_server:stop_node(dummy@somehost), ok. + %%-------------------------------------------------------------------- %% Function: groups() -> [Group] %% Group = {GroupName,Properties,GroupsAndTestCases} @@ -170,7 +179,7 @@ groups() -> %% Reason = term() %%-------------------------------------------------------------------- all() -> - [tc1, tc2, tc3, tc4, tc5, tc6, tc7, tc8]. + [tc1, tc2, tc3, tc4, tc5, tc6, tc7, tc8, tc9]. tc1(_) -> timer:sleep(2000), @@ -205,6 +214,10 @@ tc8(_) -> timetrap_helper:sleep(2000), ok. +tc9(_) -> + sleep(2000), + ok. + %%%----------------------------------------------------------------- sleep(T) -> timer:sleep(T), diff --git a/lib/common_test/test/ct_group_info_SUITE.erl b/lib/common_test/test/ct_group_info_SUITE.erl index 2da8219196..18016c4979 100644 --- a/lib/common_test/test/ct_group_info_SUITE.erl +++ b/lib/common_test/test/ct_group_info_SUITE.erl @@ -440,72 +440,72 @@ test_events(timetrap_all_no_ipg) -> {?eh,tc_done,{group_timetrap_3_SUITE,t1,{failed,{timetrap_timeout,1000}}}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g1,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g1,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g1,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g1,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t11,{failed,{timetrap_timeout,500}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g1,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g1,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g1,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g1,[{suite,group_timetrap_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g2,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g2,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g2,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g2,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t21,{failed,{timetrap_timeout,1500}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g2,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g2,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g2,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g2,[{suite,group_timetrap_3_SUITE}]},ok}}], {?eh,tc_done,{group_timetrap_3_SUITE,t2,{failed,{timetrap_timeout,1000}}}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g3,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g3,[{suite,group_timetrap_3_SUITE}]},ok}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g4,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g4,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g3,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g3,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g4,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g4,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t41,{failed,{timetrap_timeout,250}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g4,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g4,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g4,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g4,[{suite,group_timetrap_3_SUITE}]},ok}}], {?eh,tc_done,{group_timetrap_3_SUITE,t31,{failed,{timetrap_timeout,500}}}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g5,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g5,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g5,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g5,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t51,{failed,{timetrap_timeout,1500}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g5,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g5,[{suite,group_timetrap_3_SUITE}]},ok}}], - {?eh,tc_start,{ct_framework,{ct_end_per_group,g3,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g3,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g5,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g5,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g3,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g3,[{suite,group_timetrap_3_SUITE}]},ok}}], {?eh,tc_done,{group_timetrap_3_SUITE,t3,{failed,{timetrap_timeout,250}}}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g6,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g6,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g6,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g6,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t61,{failed,{timetrap_timeout,500}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g6,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g6,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g6,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g6,[{suite,group_timetrap_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g7,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g7,[{suite,group_timetrap_3_SUITE}]},ok}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g8,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g8,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g7,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g7,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g8,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g8,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t81,{failed,{timetrap_timeout,750}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g8,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g8,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g8,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g8,[{suite,group_timetrap_3_SUITE}]},ok}}], {?eh,tc_done,{group_timetrap_3_SUITE,t71,{failed,{timetrap_timeout,500}}}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g9,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g9,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g9,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g9,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t91,{failed,{timetrap_timeout,250}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g9,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g9,[{suite,group_timetrap_3_SUITE}]},ok}}], - {?eh,tc_start,{ct_framework,{ct_end_per_group,g7,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g7,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g9,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g9,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g7,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g7,[{suite,group_timetrap_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g10,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g10,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g10,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g10,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t101,{failed,{timetrap_timeout,1000}}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g10,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g10,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g10,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g10,[{suite,group_timetrap_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}}, {?eh,tc_done,{group_timetrap_3_SUITE,t111,{failed,{timetrap_timeout,1000}}}}, {?eh,test_stats,{0,14,{0,0}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}}], {?eh,test_done,{'DEF','STOP_TIME'}}, {?eh,stop_logging,[]} @@ -779,78 +779,78 @@ test_events(require_no_ipg) -> {?eh,start_info,{1,1,13}}, {?eh,tc_done,{group_require_3_SUITE,t1,ok}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g1,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g1,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g1,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g1,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t11,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g1,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g1,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g1,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g1,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g2,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g2,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g2,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g2,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t21,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g2,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g2,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g2,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g2,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g3,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g3,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g3,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g3,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t31,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g3,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g3,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g3,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g3,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g4,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g4,[{suite,group_require_3_SUITE}]}, + [{?eh,tc_start,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]}, {skipped,{require_failed,{name_in_use,common2_alias,common2}}}}}, {?eh,tc_auto_skip,{group_require_3_SUITE,t41, {require_failed,{name_in_use,common2_alias,common2}}}}, {?eh,test_stats,{4,0,{0,1}}}, - {?eh,tc_auto_skip,{ct_framework,ct_end_per_group, + {?eh,tc_auto_skip,{ct_framework,end_per_group, {require_failed,{name_in_use,common2_alias,common2}}}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g5,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g5,[{suite,group_require_3_SUITE}]},ok}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g6,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g6,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g5,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g5,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g6,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g6,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t61,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g6,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g6,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g6,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g6,[{suite,group_require_3_SUITE}]},ok}}], {?eh,tc_done,{group_require_3_SUITE,t51,ok}}, - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g7,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g7,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g7,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g7,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t71,ok}}, {?eh,tc_done,{group_require_3_SUITE,t72,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g7,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g7,[{suite,group_require_3_SUITE}]},ok}}], - {?eh,tc_start,{ct_framework,{ct_end_per_group,g5,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g5,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g7,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g7,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g5,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g5,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g8,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g8,[{suite,group_require_3_SUITE}]}, + [{?eh,tc_start,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]}, {skipped,{require_failed,{not_available,non_existing}}}}}, {?eh,tc_auto_skip,{group_require_3_SUITE,t81, {require_failed,{not_available,non_existing}}}}, {?eh,test_stats,{8,0,{0,2}}}, - {?eh,tc_auto_skip,{ct_framework,ct_end_per_group, + {?eh,tc_auto_skip,{ct_framework,end_per_group, {require_failed,{not_available,non_existing}}}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g9,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g9,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g9,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g9,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t91, {skipped,{require_failed,{not_available,non_existing}}}}}, {?eh,test_stats,{8,0,{0,3}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g9,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g9,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g9,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g9,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g10,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g10,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g10,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g10,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t101,ok}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g10,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g10,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g10,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g10,[{suite,group_require_3_SUITE}]},ok}}], - [{?eh,tc_start,{ct_framework,{ct_init_per_group,g11,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,g11,[{suite,group_require_3_SUITE}]},ok}}, + [{?eh,tc_start,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]},ok}}, {?eh,tc_done,{group_require_3_SUITE,t111,ok}}, {?eh,test_stats,{10,0,{0,3}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,g11,[{suite,group_require_3_SUITE}]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,g11,[{suite,group_require_3_SUITE}]},ok}}], + {?eh,tc_start,{ct_framework,{end_per_group,g11,[{suite,group_require_3_SUITE}]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,g11,[{suite,group_require_3_SUITE}]},ok}}], {?eh,test_done,{'DEF','STOP_TIME'}}, {?eh,stop_logging,[]} diff --git a/lib/common_test/test/ct_groups_test_2_SUITE.erl b/lib/common_test/test/ct_groups_test_2_SUITE.erl index 2392b0b850..c3601ba0ce 100644 --- a/lib/common_test/test/ct_groups_test_2_SUITE.erl +++ b/lib/common_test/test/ct_groups_test_2_SUITE.erl @@ -171,16 +171,16 @@ test_events(missing_conf) -> {?eh,start_logging,{'DEF','RUNDIR'}}, {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, {?eh,start_info,{1,1,2}}, - {?eh,tc_start,{ct_framework,{ct_init_per_group,group1,[]}}}, - {?eh,tc_done,{ct_framework,{ct_init_per_group,group1,[]},ok}}, + {?eh,tc_start,{ct_framework,{init_per_group,group1,[]}}}, + {?eh,tc_done,{ct_framework,{init_per_group,group1,[]},ok}}, {?eh,tc_start,{missing_conf_SUITE,tc1}}, {?eh,tc_done,{missing_conf_SUITE,tc1,ok}}, {?eh,test_stats,{1,0,{0,0}}}, {?eh,tc_start,{missing_conf_SUITE,tc2}}, {?eh,tc_done,{missing_conf_SUITE,tc2,ok}}, {?eh,test_stats,{2,0,{0,0}}}, - {?eh,tc_start,{ct_framework,{ct_end_per_group,group1,[]}}}, - {?eh,tc_done,{ct_framework,{ct_end_per_group,group1,[]},ok}}, + {?eh,tc_start,{ct_framework,{end_per_group,group1,[]}}}, + {?eh,tc_done,{ct_framework,{end_per_group,group1,[]},ok}}, {?eh,test_done,{'DEF','STOP_TIME'}}, {?eh,stop_logging,[]} ]; diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl index 2c519f08b5..efe57a7d0b 100644 --- a/lib/common_test/test/ct_hooks_SUITE.erl +++ b/lib/common_test/test/ct_hooks_SUITE.erl @@ -83,10 +83,9 @@ all(suite) -> fail_post_suite_cth, skip_pre_suite_cth, skip_post_suite_cth, recover_post_suite_cth, update_config_cth, state_update_cth, options_cth, same_id_cth, - fail_n_skip_with_minimal_cth, prio_cth + fail_n_skip_with_minimal_cth, prio_cth, no_config ] - ) - . + ). %%-------------------------------------------------------------------- @@ -214,6 +213,10 @@ prio_cth(Config) when is_list(Config) -> [{empty_cth,[1000],1000},{empty_cth,[900],900}, {prio_cth,[1100,100],100},{prio_cth,[1100]}],Config). +no_config(Config) when is_list(Config) -> + do_test(no_config, "ct_no_config_SUITE.erl", + [verify_config_cth],Config). + %%%----------------------------------------------------------------- %%% HELP FUNCTIONS %%%----------------------------------------------------------------- @@ -1078,6 +1081,56 @@ test_events(prio_cth) -> {?eh,test_done,{'DEF','STOP_TIME'}}, {?eh,stop_logging,[]}]; +test_events(no_config) -> + [ + {?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,cth,{empty_cth,init,[verify_config_cth,[]]}}, + {?eh,start_info,{1,1,2}}, + {?eh,tc_start,{ct_framework,init_per_suite}}, + {?eh,cth,{empty_cth,pre_init_per_suite, + [ct_no_config_SUITE,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_init_per_suite, + [ct_no_config_SUITE,'$proplist','$proplist',[]]}}, + {?eh,tc_done,{ct_framework,init_per_suite,ok}}, + {?eh,tc_start,{ct_no_config_SUITE,test_case_1}}, + {?eh,cth,{empty_cth,pre_init_per_testcase, + [test_case_1,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_end_per_testcase, + [test_case_1,'$proplist',ok,[]]}}, + {?eh,tc_done,{ct_no_config_SUITE,test_case_1,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + [{?eh,tc_start,{ct_framework,{init_per_group,test_group,'$proplist'}}}, + {?eh,cth,{empty_cth,pre_init_per_group, + [test_group,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_init_per_group, + [test_group,'$proplist','$proplist',[]]}}, + {?eh,tc_done,{ct_framework, + {init_per_group,test_group,'$proplist'},ok}}, + {?eh,tc_start,{ct_no_config_SUITE,test_case_2}}, + {?eh,cth,{empty_cth,pre_init_per_testcase, + [test_case_2,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_end_per_testcase, + [test_case_2,'$proplist',ok,[]]}}, + {?eh,tc_done,{ct_no_config_SUITE,test_case_2,ok}}, + {?eh,test_stats,{2,0,{0,0}}}, + {?eh,tc_start,{ct_framework,{end_per_group,test_group,'$proplist'}}}, + {?eh,cth,{empty_cth,pre_end_per_group, + [test_group,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_end_per_group, + [test_group,'$proplist',ok,[]]}}, + {?eh,tc_done,{ct_framework,{end_per_group,test_group,'$proplist'},ok}}], + {?eh,tc_start,{ct_framework,end_per_suite}}, + {?eh,cth,{empty_cth,pre_end_per_suite, + [ct_no_config_SUITE,'$proplist',[]]}}, + {?eh,cth,{empty_cth,post_end_per_suite, + [ct_no_config_SUITE,'$proplist',ok,[]]}}, + {?eh,tc_done,{ct_framework,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,cth,{empty_cth,terminate,[[]]}}, + {?eh,stop_logging,[]} + ]; + test_events(ok) -> ok. diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/ct_no_config_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/ct_no_config_SUITE.erl new file mode 100644 index 0000000000..2ad80aa1e7 --- /dev/null +++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/ct_no_config_SUITE.erl @@ -0,0 +1,64 @@ +%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(ct_no_config_SUITE).
+
+%% Note: This directive should only be used in test suites.
+-compile(export_all).
+
+-include("ct.hrl").
+
+%%% This suite is used to verify 2 things:
+%%%
+%%% 1) All hook pre/post functions get called, even if no init/end
+%%% config functions exist in the suite (new from ver 1.6.1, R15B01).
+%%%
+%%% 2) The hook functions can read Config list elements, as well as
+%%% required config variables, even if no init/end config
+%%% functions exist.
+
+suite() ->
+ [{timetrap, {seconds,1}},
+ {ct_hooks, [verify_config_cth]},
+ {require,suite_cfg},
+ {default_config,suite_cfg,?MODULE}].
+
+group(test_group) ->
+ [{require,group_cfg},
+ {default_config,group_cfg,test_group}].
+
+test_case_1() ->
+ [{require,test_case_1_cfg},
+ {default_config,test_case_1_cfg,test_case_1}].
+
+test_case_2() ->
+ [{require,test_case_2_cfg},
+ {default_config,test_case_2_cfg,test_case_2}].
+
+all() ->
+ [test_case_1, {group,test_group}].
+
+groups() ->
+ [{test_group,[],[test_case_2]}].
+
+test_case_1(Config) ->
+ ok.
+
+test_case_2(Config) ->
+ ok.
diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl index 7befcfa57c..2529e806ea 100644 --- a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl +++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl @@ -1,277 +1,277 @@ -%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-%%% @doc Common Test Example Suite Callback module.
-%%%
-%%% <p>This module gives an example of a common test CTH (Common Test Hook).
-%%% There are many ways to add a CTH to a test run, you can do it either in
-%%% the command line using -ct_hook, in a test spec using
-%%% {ct_hook,M} or in the suite it self by returning ct_hook
-%%% from either suite/0, init_per_suite/1, init_per_group/2 and
-%%% init_per_testcase/2. The scope of the CTH is determined by where is it
-%%% started. If it is started in the command line or test spec then it will
-%%% be stopped at the end of all tests. If it is started in init_per_suite,
-%%% it will be stopped after end_per_suite and so on. See terminate
-%%% documentation for a table describing the scoping machanics.
-%%%
-%%% All of callbacks except init/1 in a CTH are optional.</p>
-
--module(empty_cth).
-
-%% CT Hooks
--export([id/1]).
--export([init/2]).
-
--export([pre_init_per_suite/3]).
--export([post_init_per_suite/4]).
--export([pre_end_per_suite/3]).
--export([post_end_per_suite/4]).
-
--export([pre_init_per_group/3]).
--export([post_init_per_group/4]).
--export([pre_end_per_group/3]).
--export([post_end_per_group/4]).
-
--export([pre_init_per_testcase/3]).
--export([post_end_per_testcase/4]).
-
--export([on_tc_fail/3]).
--export([on_tc_skip/3]).
-
--export([terminate/1]).
-
--include_lib("common_test/src/ct_util.hrl").
--include_lib("common_test/include/ct_event.hrl").
-
+%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2010-2011. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% + +%%% @doc Common Test Example Suite Callback module. +%%% +%%% <p>This module gives an example of a common test CTH (Common Test Hook). +%%% There are many ways to add a CTH to a test run, you can do it either in +%%% the command line using -ct_hook, in a test spec using +%%% {ct_hook,M} or in the suite it self by returning ct_hook +%%% from either suite/0, init_per_suite/1, init_per_group/2 and +%%% init_per_testcase/2. The scope of the CTH is determined by where is it +%%% started. If it is started in the command line or test spec then it will +%%% be stopped at the end of all tests. If it is started in init_per_suite, +%%% it will be stopped after end_per_suite and so on. See terminate +%%% documentation for a table describing the scoping machanics. +%%% +%%% All of callbacks except init/1 in a CTH are optional.</p> + +-module(empty_cth). + +%% CT Hooks +-export([id/1]). +-export([init/2]). + +-export([pre_init_per_suite/3]). +-export([post_init_per_suite/4]). +-export([pre_end_per_suite/3]). +-export([post_end_per_suite/4]). + +-export([pre_init_per_group/3]). +-export([post_init_per_group/4]). +-export([pre_end_per_group/3]). +-export([post_end_per_group/4]). + +-export([pre_init_per_testcase/3]). +-export([post_end_per_testcase/4]). + +-export([on_tc_fail/3]). +-export([on_tc_skip/3]). + +-export([terminate/1]). + +-include_lib("common_test/src/ct_util.hrl"). +-include_lib("common_test/include/ct_event.hrl"). + -type config() :: proplists:proplist(). --type reason() :: term().
--type skip_or_fail() :: {skip, reason()} |
- {auto_skip, reason()} |
- {fail, reason()} |
- {'EXIT',reason()}.
-
--record(state, { id = ?MODULE :: term()}).
-
-%% @doc Always called before any other callback function. Use this to initiate
-%% any common state. It should return an state for this CTH.
+-type reason() :: term(). +-type skip_or_fail() :: {skip, reason()} | + {auto_skip, reason()} | + {fail, reason()} | + {'EXIT',reason()}. + +-record(state, { id = ?MODULE :: term()}). + +%% @doc Always called before any other callback function. Use this to initiate +%% any common state. It should return an state for this CTH. -spec init(Id :: term(), Opts :: proplists:proplist()) -> - {ok, State :: #state{}}.
-init(Id, Opts) ->
- gen_event:notify(?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, init, [Id, Opts]}}),
- {ok,Opts}.
-
-%% @doc The ID is used to uniquly identify an CTH instance, if two CTH's
-%% return the same ID the seconds CTH is ignored. This function should NOT
-%% have any side effects as it might be called multiple times by common test.
+ {ok, State :: #state{}}. +init(Id, Opts) -> + gen_event:notify(?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, init, [Id, Opts]}}), + {ok,Opts}. + +%% @doc The ID is used to uniquly identify an CTH instance, if two CTH's +%% return the same ID the seconds CTH is ignored. This function should NOT +%% have any side effects as it might be called multiple times by common test. -spec id(Opts :: proplists:proplist()) -> - Id :: term().
-id(Opts) ->
- gen_event:notify(?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, id, [Opts]}}),
- now().
-
-%% @doc Called before init_per_suite is called. Note that this callback is
-%% only called if the CTH is added before init_per_suite is run (eg. in a test
-%% specification, suite/0 function etc).
-%% You can change the config in the this function.
--spec pre_init_per_suite(Suite :: atom(),
- Config :: config(),
- State :: #state{}) ->
- {config() | skip_or_fail(), NewState :: #state{}}.
-pre_init_per_suite(Suite,Config,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, pre_init_per_suite,
- [Suite,Config,State]}}),
- {Config, State}.
-
-%% @doc Called after init_per_suite.
-%% you can change the return value in this function.
--spec post_init_per_suite(Suite :: atom(),
- Config :: config(),
- Return :: config() | skip_or_fail(),
- State :: #state{}) ->
- {config() | skip_or_fail(), NewState :: #state{}}.
-post_init_per_suite(Suite,Config,Return,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, post_init_per_suite,
- [Suite,Config,Return,State]}}),
- {Return, State}.
-
-%% @doc Called before end_per_suite. The config/state can be changed here,
-%% though it will only affect the *end_per_suite function.
--spec pre_end_per_suite(Suite :: atom(),
- Config :: config() | skip_or_fail(),
- State :: #state{}) ->
- {ok | skip_or_fail(), NewState :: #state{}}.
-pre_end_per_suite(Suite,Config,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, pre_end_per_suite,
- [Suite,Config,State]}}),
- {Config, State}.
-
-%% @doc Called after end_per_suite. Note that the config cannot be
-%% changed here, only the status of the suite.
--spec post_end_per_suite(Suite :: atom(),
- Config :: config(),
- Return :: term(),
- State :: #state{}) ->
- {ok | skip_or_fail(), NewState :: #state{}}.
-post_end_per_suite(Suite,Config,Return,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, post_end_per_suite,
- [Suite,Config,Return,State]}}),
- {Return, State}.
-
-%% @doc Called before each init_per_group.
-%% You can change the config in this function.
--spec pre_init_per_group(Group :: atom(),
- Config :: config(),
- State :: #state{}) ->
- {config() | skip_or_fail(), NewState :: #state{}}.
-pre_init_per_group(Group,Config,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, pre_init_per_group,
- [Group,Config,State]}}),
- {Config, State}.
-
-%% @doc Called after each init_per_group.
-%% You can change the return value in this function.
--spec post_init_per_group(Group :: atom(),
- Config :: config(),
- Return :: config() | skip_or_fail(),
- State :: #state{}) ->
- {config() | skip_or_fail(), NewState :: #state{}}.
-post_init_per_group(Group,Config,Return,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, post_init_per_group,
- [Group,Config,Return,State]}}),
- {Return, State}.
-
-%% @doc Called after each end_per_group. The config/state can be changed here,
-%% though it will only affect the *end_per_group functions.
--spec pre_end_per_group(Group :: atom(),
- Config :: config() | skip_or_fail(),
- State :: #state{}) ->
- {ok | skip_or_fail(), NewState :: #state{}}.
-pre_end_per_group(Group,Config,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, pre_end_per_group,
- [Group,Config,State]}}),
- {Config, State}.
-
-%% @doc Called after each end_per_group. Note that the config cannot be
-%% changed here, only the status of the group.
--spec post_end_per_group(Group :: atom(),
- Config :: config(),
- Return :: term(),
- State :: #state{}) ->
- {ok | skip_or_fail(), NewState :: #state{}}.
-post_end_per_group(Group,Config,Return,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, post_end_per_group,
- [Group,Config,Return,State]}}),
- {Return, State}.
-
-%% @doc Called before each test case.
-%% You can change the config in this function.
--spec pre_init_per_testcase(TC :: atom(),
- Config :: config(),
- State :: #state{}) ->
- {config() | skip_or_fail(), NewState :: #state{}}.
-pre_init_per_testcase(TC,Config,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, pre_init_per_testcase,
- [TC,Config,State]}}),
- {Config, State}.
-
-%% @doc Called after each test case. Note that the config cannot be
-%% changed here, only the status of the test case.
--spec post_end_per_testcase(TC :: atom(),
- Config :: config(),
- Return :: term(),
- State :: #state{}) ->
- {ok | skip_or_fail(), NewState :: #state{}}.
-post_end_per_testcase(TC,Config,Return,State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, post_end_per_testcase,
- [TC,Config,Return,State]}}),
- {Return, State}.
-
-%% @doc Called after post_init_per_suite, post_end_per_suite, post_init_per_group,
-%% post_end_per_group and post_end_per_tc if the suite, group or test case failed.
-%% This function should be used for extra cleanup which might be needed.
-%% It is not possible to modify the config or the status of the test run.
--spec on_tc_fail(TC :: init_per_suite | end_per_suite |
- init_per_group | end_per_group | atom(),
- Reason :: term(), State :: #state{}) ->
- NewState :: #state{}.
-on_tc_fail(TC, Reason, State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, on_tc_fail,
- [TC,Reason,State]}}),
- State.
-
-%% @doc Called when a test case is skipped by either user action
-%% or due to an init function failing. Test case can be
-%% end_per_suite, init_per_group, end_per_group and the actual test cases.
--spec on_tc_skip(TC :: end_per_suite |
- init_per_group | end_per_group | atom(),
- {tc_auto_skip, {failed, {Mod :: atom(), Function :: atom(), Reason :: term()}}} |
- {tc_user_skip, {skipped, Reason :: term()}},
- State :: #state{}) ->
- NewState :: #state{}.
-on_tc_skip(TC, Reason, State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, on_tc_skip,
- [TC,Reason,State]}}),
- State.
-
-%% @doc Called when the scope of the CTH is done, this depends on
-%% when the CTH was specified. This translation table describes when this
-%% function is called.
-%%
-%% | Started in | terminate called |
-%% |---------------------|-------------------------|
-%% | command_line | after all tests are run |
-%% | test spec | after all tests are run |
-%% | suite/0 | after SUITE is done |
-%% | init_per_suite/1 | after SUITE is done |
-%% | init_per_group/2 | after group is done |
-%% |-----------------------------------------------|
-%%
--spec terminate(State :: #state{}) ->
- term().
-terminate(State) ->
- gen_event:notify(
- ?CT_EVMGR_REF, #event{ name = cth, node = node(),
- data = {?MODULE, terminate, [State]}}),
- ok.
+ Id :: term(). +id(Opts) -> + gen_event:notify(?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, id, [Opts]}}), + now(). + +%% @doc Called before init_per_suite is called. Note that this callback is +%% only called if the CTH is added before init_per_suite is run (eg. in a test +%% specification, suite/0 function etc). +%% You can change the config in the this function. +-spec pre_init_per_suite(Suite :: atom(), + Config :: config(), + State :: #state{}) -> + {config() | skip_or_fail(), NewState :: #state{}}. +pre_init_per_suite(Suite,Config,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, pre_init_per_suite, + [Suite,Config,State]}}), + {Config, State}. + +%% @doc Called after init_per_suite. +%% you can change the return value in this function. +-spec post_init_per_suite(Suite :: atom(), + Config :: config(), + Return :: config() | skip_or_fail(), + State :: #state{}) -> + {config() | skip_or_fail(), NewState :: #state{}}. +post_init_per_suite(Suite,Config,Return,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, post_init_per_suite, + [Suite,Config,Return,State]}}), + {Return, State}. + +%% @doc Called before end_per_suite. The config/state can be changed here, +%% though it will only affect the *end_per_suite function. +-spec pre_end_per_suite(Suite :: atom(), + Config :: config() | skip_or_fail(), + State :: #state{}) -> + {ok | skip_or_fail(), NewState :: #state{}}. +pre_end_per_suite(Suite,Config,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, pre_end_per_suite, + [Suite,Config,State]}}), + {Config, State}. + +%% @doc Called after end_per_suite. Note that the config cannot be +%% changed here, only the status of the suite. +-spec post_end_per_suite(Suite :: atom(), + Config :: config(), + Return :: term(), + State :: #state{}) -> + {ok | skip_or_fail(), NewState :: #state{}}. +post_end_per_suite(Suite,Config,Return,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, post_end_per_suite, + [Suite,Config,Return,State]}}), + {Return, State}. + +%% @doc Called before each init_per_group. +%% You can change the config in this function. +-spec pre_init_per_group(Group :: atom(), + Config :: config(), + State :: #state{}) -> + {config() | skip_or_fail(), NewState :: #state{}}. +pre_init_per_group(Group,Config,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, pre_init_per_group, + [Group,Config,State]}}), + {Config, State}. + +%% @doc Called after each init_per_group. +%% You can change the return value in this function. +-spec post_init_per_group(Group :: atom(), + Config :: config(), + Return :: config() | skip_or_fail(), + State :: #state{}) -> + {config() | skip_or_fail(), NewState :: #state{}}. +post_init_per_group(Group,Config,Return,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, post_init_per_group, + [Group,Config,Return,State]}}), + {Return, State}. + +%% @doc Called after each end_per_group. The config/state can be changed here, +%% though it will only affect the *end_per_group functions. +-spec pre_end_per_group(Group :: atom(), + Config :: config() | skip_or_fail(), + State :: #state{}) -> + {ok | skip_or_fail(), NewState :: #state{}}. +pre_end_per_group(Group,Config,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, pre_end_per_group, + [Group,Config,State]}}), + {Config, State}. + +%% @doc Called after each end_per_group. Note that the config cannot be +%% changed here, only the status of the group. +-spec post_end_per_group(Group :: atom(), + Config :: config(), + Return :: term(), + State :: #state{}) -> + {ok | skip_or_fail(), NewState :: #state{}}. +post_end_per_group(Group,Config,Return,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, post_end_per_group, + [Group,Config,Return,State]}}), + {Return, State}. + +%% @doc Called before each test case. +%% You can change the config in this function. +-spec pre_init_per_testcase(TC :: atom(), + Config :: config(), + State :: #state{}) -> + {config() | skip_or_fail(), NewState :: #state{}}. +pre_init_per_testcase(TC,Config,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, pre_init_per_testcase, + [TC,Config,State]}}), + {Config, State}. + +%% @doc Called after each test case. Note that the config cannot be +%% changed here, only the status of the test case. +-spec post_end_per_testcase(TC :: atom(), + Config :: config(), + Return :: term(), + State :: #state{}) -> + {ok | skip_or_fail(), NewState :: #state{}}. +post_end_per_testcase(TC,Config,Return,State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, post_end_per_testcase, + [TC,Config,Return,State]}}), + {Return, State}. + +%% @doc Called after post_init_per_suite, post_end_per_suite, post_init_per_group, +%% post_end_per_group and post_end_per_tc if the suite, group or test case failed. +%% This function should be used for extra cleanup which might be needed. +%% It is not possible to modify the config or the status of the test run. +-spec on_tc_fail(TC :: init_per_suite | end_per_suite | + init_per_group | end_per_group | atom(), + Reason :: term(), State :: #state{}) -> + NewState :: #state{}. +on_tc_fail(TC, Reason, State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, on_tc_fail, + [TC,Reason,State]}}), + State. + +%% @doc Called when a test case is skipped by either user action +%% or due to an init function failing. Test case can be +%% end_per_suite, init_per_group, end_per_group and the actual test cases. +-spec on_tc_skip(TC :: end_per_suite | + init_per_group | end_per_group | atom(), + {tc_auto_skip, {failed, {Mod :: atom(), Function :: atom(), Reason :: term()}}} | + {tc_user_skip, {skipped, Reason :: term()}}, + State :: #state{}) -> + NewState :: #state{}. +on_tc_skip(TC, Reason, State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, on_tc_skip, + [TC,Reason,State]}}), + State. + +%% @doc Called when the scope of the CTH is done, this depends on +%% when the CTH was specified. This translation table describes when this +%% function is called. +%% +%% | Started in | terminate called | +%% |---------------------|-------------------------| +%% | command_line | after all tests are run | +%% | test spec | after all tests are run | +%% | suite/0 | after SUITE is done | +%% | init_per_suite/1 | after SUITE is done | +%% | init_per_group/2 | after group is done | +%% |-----------------------------------------------| +%% +-spec terminate(State :: #state{}) -> + term(). +terminate(State) -> + gen_event:notify( + ?CT_EVMGR_REF, #event{ name = cth, node = node(), + data = {?MODULE, terminate, [State]}}), + ok. diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/verify_config_cth.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/verify_config_cth.erl new file mode 100644 index 0000000000..99ea261e14 --- /dev/null +++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/verify_config_cth.erl @@ -0,0 +1,130 @@ +%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(verify_config_cth).
+
+-include_lib("common_test/src/ct_util.hrl").
+
+%% CT Hooks
+-compile(export_all).
+
+-define(val(K, L), proplists:get_value(K, L)).
+
+id(Opts) ->
+ ?MODULE.
+
+init(Id, Opts) ->
+ {ok, State} = empty_cth:init(Id, Opts),
+ {ok, State}.
+
+pre_init_per_suite(Suite, Config, State) ->
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ empty_cth:pre_init_per_suite(Suite,
+ [{pre_init_per_suite,true} | Config],
+ State).
+
+post_init_per_suite(Suite,Config,Return,State) ->
+ true = ?val(pre_init_per_suite, Return),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ empty_cth:post_init_per_suite(Suite,
+ Config,
+ [{post_init_per_suite,true} | Return],
+ State).
+
+pre_end_per_suite(Suite,Config,State) ->
+ true = ?val(post_init_per_suite, Config),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ empty_cth:pre_end_per_suite(Suite,
+ [{pre_end_per_suite,true} | Config],
+ State).
+
+post_end_per_suite(Suite,Config,Return,State) ->
+ true = ?val(pre_end_per_suite, Config),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ empty_cth:post_end_per_suite(Suite,Config,Return,State).
+
+pre_init_per_group(Group,Config,State) ->
+ true = ?val(post_init_per_suite, Config),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ test_group = ct:get_config(group_cfg),
+ empty_cth:pre_init_per_group(Group,
+ [{pre_init_per_group,true} | Config],
+ State).
+
+post_init_per_group(Group,Config,Return,State) ->
+ true = ?val(pre_init_per_group, Return),
+ test_group = ct:get_config(group_cfg),
+ empty_cth:post_init_per_group(Group,
+ Config,
+ [{post_init_per_group,true} | Return],
+ State).
+
+pre_end_per_group(Group,Config,State) ->
+ true = ?val(post_init_per_group, Config),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ test_group = ct:get_config(group_cfg),
+ empty_cth:pre_end_per_group(Group,
+ [{pre_end_per_group,true} | Config],
+ State).
+
+post_end_per_group(Group,Config,Return,State) ->
+ true = ?val(pre_end_per_group, Config),
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ test_group = ct:get_config(group_cfg),
+ empty_cth:post_end_per_group(Group,Config,Return,State).
+
+pre_init_per_testcase(TC,Config,State) ->
+ true = ?val(post_init_per_suite, Config),
+ case ?val(name, ?val(tc_group_properties, Config)) of
+ undefined ->
+ ok;
+ _ ->
+ true = ?val(post_init_per_group, Config),
+ test_group = ct:get_config(group_cfg)
+ end,
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ CfgKey = list_to_atom(atom_to_list(TC) ++ "_cfg"),
+ TC = ct:get_config(CfgKey),
+ empty_cth:pre_init_per_testcase(TC,
+ [{pre_init_per_testcase,true} | Config],
+ State).
+
+post_end_per_testcase(TC,Config,Return,State) ->
+ true = ?val(post_init_per_suite, Config),
+ true = ?val(pre_init_per_testcase, Config),
+ case ?val(name, ?val(tc_group_properties, Config)) of
+ undefined ->
+ ok;
+ _ ->
+ true = ?val(post_init_per_group, Config),
+ test_group = ct:get_config(group_cfg)
+ end,
+ ct_no_config_SUITE = ct:get_config(suite_cfg),
+ CfgKey = list_to_atom(atom_to_list(TC) ++ "_cfg"),
+ TC = ct:get_config(CfgKey),
+ empty_cth:post_end_per_testcase(TC,Config,Return,State).
+
+on_tc_fail(TC, Reason, State) ->
+ empty_cth:on_tc_fail(TC,Reason,State).
+
+on_tc_skip(TC, Reason, State) ->
+ empty_cth:on_tc_skip(TC,Reason,State).
+
+terminate(State) ->
+ empty_cth:terminate(State).
diff --git a/lib/common_test/test/ct_priv_dir_SUITE.erl b/lib/common_test/test/ct_priv_dir_SUITE.erl new file mode 100644 index 0000000000..f6942d59bf --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE.erl @@ -0,0 +1,277 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2009-2011. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% + +%%%------------------------------------------------------------------- +%%% File: ct_priv_dir_SUITE +%%% +%%% Description: +%%% Test that it works to use the create_priv_dir option. +%%% +%%%------------------------------------------------------------------- +-module(ct_priv_dir_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). + +-define(eh, ct_test_support_eh). + +%%-------------------------------------------------------------------- +%% TEST SERVER CALLBACK FUNCTIONS +%%-------------------------------------------------------------------- + +%%-------------------------------------------------------------------- +%% Description: Since Common Test starts another Test Server +%% instance, the tests need to be performed on a separate node (or +%% there will be clashes with logging processes etc). +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + Config1 = ct_test_support:init_per_suite(Config), + Config1. + +end_per_suite(Config) -> + ct_test_support:end_per_suite(Config). + +init_per_testcase(TestCase, Config) -> + ct_test_support:init_per_testcase(TestCase, Config). + +end_per_testcase(TestCase, Config) -> + ct_test_support:end_per_testcase(TestCase, Config). + +suite() -> [{ct_hooks,[ts_install_cth]}]. + +all() -> + [ + default, + auto_per_run, + auto_per_tc, + manual_per_tc, + spec_default, + spec_auto_per_run, + spec_auto_per_run, + spec_manual_per_tc + ]. + +%%-------------------------------------------------------------------- +%% TEST CASES +%%-------------------------------------------------------------------- + +%%%----------------------------------------------------------------- +%%% +default(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Suite = filename:join(DataDir, "priv_dir_SUITE"), + {Opts,ERPid} = setup([{suite,Suite},{testcase,default}, + {label,default}], Config), + ok = execute(default, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +auto_per_run(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Suite = filename:join(DataDir, "priv_dir_SUITE"), + {Opts,ERPid} = setup([{suite,Suite},{testcase,default}, + {label,auto_per_run}, + {create_priv_dir,auto_per_run}], Config), + ok = execute(auto_per_run, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +auto_per_tc(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Suite = filename:join(DataDir, "priv_dir_SUITE"), + {Opts,ERPid} = setup([{suite,Suite},{testcase,auto_per_tc}, + {label,auto_per_tc}, + {create_priv_dir,auto_per_tc}], Config), + ok = execute(auto_per_tc, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +manual_per_tc(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Suite = filename:join(DataDir, "priv_dir_SUITE"), + {Opts,ERPid} = setup([{suite,Suite},{testcase,manual_per_tc}, + {label,manual_per_tc}, + {create_priv_dir,manual_per_tc}], Config), + ok = execute(manual_per_tc, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +spec_default(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Spec = filename:join(DataDir, "default.spec"), + {Opts,ERPid} = setup([{spec,Spec}, + {label,spec_default}], Config), + ok = execute(spec_default, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +spec_auto_per_run(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Spec = filename:join(DataDir, "auto_per_run.spec"), + {Opts,ERPid} = setup([{spec,Spec}, + {label,spec_auto_per_run}], Config), + ok = execute(spec_auto_per_run, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +spec_auto_per_tc(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Spec = filename:join(DataDir, "auto_per_tc.spec"), + {Opts,ERPid} = setup([{spec,Spec}, + {label,spec_auto_per_tc}], Config), + ok = execute(spec_auto_per_tc, Opts, ERPid, Config). + +%%%----------------------------------------------------------------- +%%% +spec_manual_per_tc(Config) when is_list(Config) -> + DataDir = ?config(data_dir, Config), + Spec = filename:join(DataDir, "manual_per_tc.spec"), + {Opts,ERPid} = setup([{spec,Spec}, + {label,spec_manual_per_tc}], Config), + ok = execute(spec_manual_per_tc, Opts, ERPid, Config). + + +%%%----------------------------------------------------------------- +%%% HELP FUNCTIONS +%%%----------------------------------------------------------------- + +setup(Test, Config) -> + Opts0 = ct_test_support:get_opts(Config), + Level = ?config(trace_level, Config), + EvHArgs = [{cbm,ct_test_support},{trace_level,Level}], + Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test], + ERPid = ct_test_support:start_event_receiver(Config), + {Opts,ERPid}. + +execute(Name, Opts, ERPid, Config) -> + ok = ct_test_support:run(Opts, Config), + Events = ct_test_support:get_events(ERPid, Config), + + ct_test_support:log_events(Name, + reformat(Events, ?eh), + ?config(priv_dir, Config), + Opts), + + TestEvents = events_to_check(Name), + ct_test_support:verify_events(TestEvents, Events, Config). + +reformat(Events, EH) -> + ct_test_support:reformat(Events, EH). + +%%%----------------------------------------------------------------- +%%% TEST EVENTS +%%%----------------------------------------------------------------- +events_to_check(Test) -> + %% 2 tests (ct:run_test + script_start) is default + events_to_check(Test, 2). + +events_to_check(_, 0) -> + []; +events_to_check(Test, N) -> + test_events(Test) ++ events_to_check(Test, N-1). + + +test_events(DEF) when DEF == default ; DEF == auto_per_run -> + [ + {?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,default}}, + {?eh,tc_done,{priv_dir_SUITE,default,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]; + +test_events(auto_per_tc) -> + [{?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,auto_per_tc}}, + {?eh,tc_done,{priv_dir_SUITE,auto_per_tc,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]; + +test_events(manual_per_tc) -> + [{?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,manual_per_tc}}, + {?eh,tc_done,{priv_dir_SUITE,manual_per_tc,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]; + +test_events(SPECDEF) when SPECDEF == spec_default ; + SPECDEF == spec_auto_per_run -> + [{?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,default}}, + {?eh,tc_done,{priv_dir_SUITE,default,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]; + +test_events(spec_auto_per_tc) -> + [{?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,auto_per_tc}}, + {?eh,tc_done,{priv_dir_SUITE,auto_per_tc,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]; + +test_events(spec_manual_per_tc) -> + [{?eh,start_logging,{'DEF','RUNDIR'}}, + {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}, + {?eh,start_info,{1,1,1}}, + {?eh,tc_start,{priv_dir_SUITE,init_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,init_per_suite,ok}}, + {?eh,tc_start,{priv_dir_SUITE,manual_per_tc}}, + {?eh,tc_done,{priv_dir_SUITE,manual_per_tc,ok}}, + {?eh,test_stats,{1,0,{0,0}}}, + {?eh,tc_start,{priv_dir_SUITE,end_per_suite}}, + {?eh,tc_done,{priv_dir_SUITE,end_per_suite,ok}}, + {?eh,test_done,{'DEF','STOP_TIME'}}, + {?eh,stop_logging,[]}]. + diff --git a/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_run.spec b/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_run.spec new file mode 100644 index 0000000000..4dde0ed1f4 --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_run.spec @@ -0,0 +1,5 @@ +{create_priv_dir, auto_per_run}. + +{alias, curr, "./"}. + +{cases, curr, priv_dir_SUITE, default}.
\ No newline at end of file diff --git a/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_tc.spec b/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_tc.spec new file mode 100644 index 0000000000..c265500865 --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE_data/auto_per_tc.spec @@ -0,0 +1,5 @@ +{create_priv_dir, auto_per_tc}. + +{alias, curr, "./"}. + +{cases, curr, priv_dir_SUITE, auto_per_tc}.
\ No newline at end of file diff --git a/lib/common_test/test/ct_priv_dir_SUITE_data/default.spec b/lib/common_test/test/ct_priv_dir_SUITE_data/default.spec new file mode 100644 index 0000000000..2f053e792f --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE_data/default.spec @@ -0,0 +1,3 @@ +{alias, curr, "./"}. + +{cases, curr, priv_dir_SUITE, default}.
\ No newline at end of file diff --git a/lib/common_test/test/ct_priv_dir_SUITE_data/manual_per_tc.spec b/lib/common_test/test/ct_priv_dir_SUITE_data/manual_per_tc.spec new file mode 100644 index 0000000000..4f98734d5f --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE_data/manual_per_tc.spec @@ -0,0 +1,5 @@ +{create_priv_dir, manual_per_tc}. + +{alias, curr, "./"}. + +{cases, curr, priv_dir_SUITE, manual_per_tc}.
\ No newline at end of file diff --git a/lib/common_test/test/ct_priv_dir_SUITE_data/priv_dir_SUITE.erl b/lib/common_test/test/ct_priv_dir_SUITE_data/priv_dir_SUITE.erl new file mode 100644 index 0000000000..423cb2999b --- /dev/null +++ b/lib/common_test/test/ct_priv_dir_SUITE_data/priv_dir_SUITE.erl @@ -0,0 +1,127 @@ +%%%------------------------------------------------------------------- +%%% @author Peter Andersson <[email protected]> +%%% @copyright (C) 2012, Peter Andersson +%%% @doc +%%% +%%% @end +%%% Created : 23 Jan 2012 by Peter Andersson <[email protected]> +%%%------------------------------------------------------------------- +-module(priv_dir_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). + +%%-------------------------------------------------------------------- +%% @spec suite() -> Info +%% Info = [tuple()] +%% @end +%%-------------------------------------------------------------------- +suite() -> + [{timetrap,{seconds,30}}]. + +%%-------------------------------------------------------------------- +%% @spec init_per_suite(Config0) -> +%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} +%% Config0 = Config1 = [tuple()] +%% Reason = term() +%% @end +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + Config. + +%%-------------------------------------------------------------------- +%% @spec end_per_suite(Config0) -> void() | {save_config,Config1} +%% Config0 = Config1 = [tuple()] +%% @end +%%-------------------------------------------------------------------- +end_per_suite(_Config) -> + ok. + +%%-------------------------------------------------------------------- +%% @spec init_per_group(GroupName, Config0) -> +%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} +%% GroupName = atom() +%% Config0 = Config1 = [tuple()] +%% Reason = term() +%% @end +%%-------------------------------------------------------------------- +init_per_group(_GroupName, Config) -> + Config. + +%%-------------------------------------------------------------------- +%% @spec end_per_group(GroupName, Config0) -> +%% void() | {save_config,Config1} +%% GroupName = atom() +%% Config0 = Config1 = [tuple()] +%% @end +%%-------------------------------------------------------------------- +end_per_group(_GroupName, _Config) -> + ok. + +%%-------------------------------------------------------------------- +%% @spec init_per_testcase(TestCase, Config0) -> +%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} +%% TestCase = atom() +%% Config0 = Config1 = [tuple()] +%% Reason = term() +%% @end +%%-------------------------------------------------------------------- +init_per_testcase(_TestCase, Config) -> + Config. + +%%-------------------------------------------------------------------- +%% @spec end_per_testcase(TestCase, Config0) -> +%% void() | {save_config,Config1} | {fail,Reason} +%% TestCase = atom() +%% Config0 = Config1 = [tuple()] +%% Reason = term() +%% @end +%%-------------------------------------------------------------------- +end_per_testcase(_TestCase, _Config) -> + ok. + +%%-------------------------------------------------------------------- +%% @spec groups() -> [Group] +%% Group = {GroupName,Properties,GroupsAndTestCases} +%% GroupName = atom() +%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}] +%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase] +%% TestCase = atom() +%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}} +%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail | +%% repeat_until_any_ok | repeat_until_any_fail +%% N = integer() | forever +%% @end +%%-------------------------------------------------------------------- +groups() -> + []. + +%%-------------------------------------------------------------------- +%% @spec all() -> GroupsAndTestCases | {skip,Reason} +%% GroupsAndTestCases = [{group,GroupName} | TestCase] +%% GroupName = atom() +%% TestCase = atom() +%% Reason = term() +%% @end +%%-------------------------------------------------------------------- +all() -> + []. + +default(Config) -> + PrivDir = proplists:get_value(priv_dir, Config), + "log_private" = filename:basename(PrivDir), + {ok,_} = file:list_dir(PrivDir). + +auto_per_tc(Config) -> + PrivDir = proplists:get_value(priv_dir, Config), + ["log_private",_] = string:tokens(filename:basename(PrivDir), "."), + {ok,_} = file:list_dir(PrivDir). + +manual_per_tc(Config) -> + PrivDir = proplists:get_value(priv_dir, Config), + ["log_private",_] = string:tokens(filename:basename(PrivDir), "."), + {error,_} = file:list_dir(PrivDir), + ok = ct:make_priv_dir(), + {ok,_} = file:list_dir(PrivDir). + diff --git a/lib/common_test/test/ct_test_server_if_1_SUITE.erl b/lib/common_test/test/ct_test_server_if_1_SUITE.erl index efc0309781..8825d84884 100644 --- a/lib/common_test/test/ct_test_server_if_1_SUITE.erl +++ b/lib/common_test/test/ct_test_server_if_1_SUITE.erl @@ -242,25 +242,28 @@ test_events(ts_if_1) -> {?eh,tc_auto_skip,{ts_if_5_SUITE,end_per_suite, {require_failed_in_suite0,{not_available,undef_variable}}}}, - {?eh,tc_start,{ts_if_6_SUITE,tc1}}, - {?eh,tc_done,{ts_if_6_SUITE,tc1,{failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}}, - {?eh,test_stats,{3,5,{6,8}}}, + {?eh,tc_start,{ct_framework,init_per_suite}}, + {?eh,tc_done,{ct_framework,init_per_suite, + {failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}}, + {?eh,tc_auto_skip,{ts_if_6_SUITE,tc1, + {failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}}, + {?eh,test_stats,{3,5,{5,9}}}, {?eh,tc_start,{ts_if_7_SUITE,tc1}}, {?eh,tc_done,{ts_if_7_SUITE,tc1,ok}}, - {?eh,test_stats,{4,5,{6,8}}}, + {?eh,test_stats,{4,5,{5,9}}}, {?eh,tc_start,{ts_if_8_SUITE,tc1}}, {?eh,tc_done,{ts_if_8_SUITE,tc1,{failed,{error,failed_on_purpose}}}}, - {?eh,test_stats,{4,6,{6,8}}}, + {?eh,test_stats,{4,6,{5,9}}}, {?eh,tc_user_skip,{skipped_by_spec_1_SUITE,all,"should be skipped"}}, - {?eh,test_stats,{4,6,{7,8}}}, + {?eh,test_stats,{4,6,{6,9}}}, {?eh,tc_start,{skipped_by_spec_2_SUITE,init_per_suite}}, {?eh,tc_done,{skipped_by_spec_2_SUITE,init_per_suite,ok}}, {?eh,tc_user_skip,{skipped_by_spec_2_SUITE,tc1,"should be skipped"}}, - {?eh,test_stats,{4,6,{8,8}}}, + {?eh,test_stats,{4,6,{7,9}}}, {?eh,tc_start,{skipped_by_spec_2_SUITE,end_per_suite}}, {?eh,tc_done,{skipped_by_spec_2_SUITE,end_per_suite,ok}}, diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml index 2d95f96ac7..95b23e4aef 100644 --- a/lib/kernel/doc/src/error_logger.xml +++ b/lib/kernel/doc/src/error_logger.xml @@ -49,7 +49,7 @@ that events are logged to file instead, or not logged at all, see <seealso marker="kernel_app">kernel(6)</seealso>.</p> <p>Also the SASL application, if started, adds its own event - handler, which by default writes supervisor-, crash- and progress + handler, which by default writes supervisor, crash and progress reports to tty. See <seealso marker="sasl:sasl_app">sasl(6)</seealso>.</p> <p>It is recommended that user defined applications should report diff --git a/lib/kernel/doc/src/gen_tcp.xml b/lib/kernel/doc/src/gen_tcp.xml index 8a5d40bb16..9f362841ce 100644 --- a/lib/kernel/doc/src/gen_tcp.xml +++ b/lib/kernel/doc/src/gen_tcp.xml @@ -235,7 +235,9 @@ do_recv(Sock, Bs) -> <p>Returns <c>{ok, <anno>Socket</anno>}</c> if a connection is established, or <c>{error, closed}</c> if <c><anno>ListenSocket</anno></c> is closed, or <c>{error, timeout}</c> if no connection is established - within the specified time. May also return a POSIX error + within the specified time, + or <c>{error, system_limit}</c> if all available ports in the + Erlang emulator are in use. May also return a POSIX error value if something else goes wrong, see inet(3) for possible error values.</p> <p>Packets can be sent to the returned socket <c><anno>Socket</anno></c> diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml index 1a05b4ba99..86950b3ecc 100644 --- a/lib/kernel/doc/src/inet.xml +++ b/lib/kernel/doc/src/inet.xml @@ -149,7 +149,7 @@ fe80::204:acff:fe17:bf38 <fsummary>Return a descriptive string for an error reason</fsummary> <desc> <p>Returns a diagnostic error string. See the section below - for possible <c><anno>Posix</anno></c> values and the corresponding + for possible Posix values and the corresponding strings.</p> </desc> </func> diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml index 0f71a4f0f2..63fdc223f4 100644 --- a/lib/kernel/doc/src/kernel_app.xml +++ b/lib/kernel/doc/src/kernel_app.xml @@ -104,7 +104,7 @@ that node. <c>Value</c> is one of:</p> <taglist> <tag><c>never</c></tag> - <item>Connections are never automatically connected, they + <item>Connections are never automatically established, they must be explicitly connected. See <c>net_kernel(3)</c>.</item> <tag><c>once</c></tag> <item>Connections will be established automatically, but only diff --git a/lib/kernel/src/erts_debug.erl b/lib/kernel/src/erts_debug.erl index 7d6a5ade94..16a773898d 100644 --- a/lib/kernel/src/erts_debug.erl +++ b/lib/kernel/src/erts_debug.erl @@ -53,6 +53,11 @@ size(Tuple, Seen0, Sum0) when is_tuple(Tuple) -> Sum = Sum0 + 1 + tuple_size(Tuple), tuple_size(1, tuple_size(Tuple), Tuple, Seen, Sum) end; +size(Fun, Seen0, Sum) when is_function(Fun) -> + case remember_term(Fun, Seen0) of + seen -> {Sum,Seen0}; + Seen -> fun_size(Fun, Seen, Sum) + end; size(Term, Seen0, Sum) -> case erts_debug:flat_size(Term) of 0 -> {Sum,Seen0}; @@ -68,6 +73,21 @@ tuple_size(I, Sz, _, Seen, Sum) when I > Sz -> tuple_size(I, Sz, Tuple, Seen0, Sum0) -> {Sum,Seen} = size(element(I, Tuple), Seen0, Sum0), tuple_size(I+1, Sz, Tuple, Seen, Sum). + +fun_size(Fun, Seen, Sum) -> + case erlang:fun_info(Fun, type) of + {type,external} -> + {Sum + erts_debug:flat_size(Fun),Seen}; + {type,local} -> + Sz = erts_debug:flat_size(fun() -> ok end), + {env,Env} = erlang:fun_info(Fun, env), + fun_size_1(Env, Seen, Sum+Sz+length(Env)) + end. + +fun_size_1([H|T], Seen0, Sum0) -> + {Sum,Seen} = size(H, Seen0, Sum0), + fun_size_1(T, Seen, Sum); +fun_size_1([], Seen, Sum) -> {Sum,Seen}. remember_term(Term, Seen) -> case gb_trees:lookup(Term, Seen) of diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl index 4d6c7f5f1d..56d0451e44 100644 --- a/lib/kernel/src/gen_tcp.erl +++ b/lib/kernel/src/gen_tcp.erl @@ -175,7 +175,7 @@ try_connect([], _Port, _Opts, _Timer, _Mod, Err) -> Port :: inet:port_number(), Options :: [listen_option()], ListenSocket :: socket(), - Reason :: inet:posix(). + Reason :: system_limit | inet:posix(). listen(Port, Opts) -> Mod = mod(Opts, undefined), @@ -194,7 +194,7 @@ listen(Port, Opts) -> -spec accept(ListenSocket) -> {ok, Socket} | {error, Reason} when ListenSocket :: socket(), Socket :: socket(), - Reason :: closed | timeout | inet:posix(). + Reason :: closed | timeout | system_limit | inet:posix(). accept(S) -> case inet_db:lookup_socket(S) of @@ -208,7 +208,7 @@ accept(S) -> ListenSocket :: socket(), Timeout :: timeout(), Socket :: socket(), - Reason :: closed | timeout | inet:posix(). + Reason :: closed | timeout | system_limit | inet:posix(). accept(S, Time) when is_port(S) -> case inet_db:lookup_socket(S) of diff --git a/lib/kernel/src/global.erl b/lib/kernel/src/global.erl index fa97614eca..7da33859bf 100644 --- a/lib/kernel/src/global.erl +++ b/lib/kernel/src/global.erl @@ -280,13 +280,13 @@ unregister_name(Name) -> gen_server:call(global_name_server, {registrar, Fun}, infinity) end. --spec re_register_name(Name, Pid) -> _ when +-spec re_register_name(Name, Pid) -> 'yes' when Name :: term(), Pid :: pid(). re_register_name(Name, Pid) when is_pid(Pid) -> re_register_name(Name, Pid, fun random_exit_name/3). --spec re_register_name(Name, Pid, Resolve) -> _ when +-spec re_register_name(Name, Pid, Resolve) -> 'yes' when Name :: term(), Pid :: pid(), Resolve :: method(). @@ -1965,7 +1965,7 @@ resolve_it(Method, Name, Pid1, Pid2) -> minmax(P1,P2) -> if node(P1) < node(P2) -> {P1, P2}; true -> {P2, P1} end. --spec random_exit_name(Name, Pid1, Pid2) -> 'none' when +-spec random_exit_name(Name, Pid1, Pid2) -> pid() when Name :: term(), Pid1 :: pid(), Pid2 :: pid(). @@ -1976,7 +1976,7 @@ random_exit_name(Name, Pid, Pid2) -> exit(Max, kill), Min. --spec random_notify_name(Name, Pid1, Pid2) -> 'none' when +-spec random_notify_name(Name, Pid1, Pid2) -> pid() when Name :: term(), Pid1 :: pid(), Pid2 :: pid(). @@ -2175,7 +2175,7 @@ get_own_nodes() -> start_the_registrar() -> spawn_link(fun() -> loop_the_registrar() end). - + loop_the_registrar() -> receive {trans_all_known, Fun, From} -> diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl index 49f64a9236..4de3c1c6b1 100644 --- a/lib/kernel/src/inet.erl +++ b/lib/kernel/src/inet.erl @@ -1218,11 +1218,13 @@ port_list(Name) -> %% utils %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --spec format_error(Posix) -> string() when - Posix :: posix(). +-spec format_error(Reason) -> string() when + Reason :: posix() | system_limit. format_error(exbadport) -> "invalid port state"; format_error(exbadseq) -> "bad command sequence"; +format_error(system_limit) -> + "a system limit was hit, probably not enough ports"; format_error(Tag) -> erl_posix_msg:message(Tag). diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl index 3da4b07c05..d9bba0dbb0 100644 --- a/lib/kernel/test/gen_tcp_misc_SUITE.erl +++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl @@ -39,7 +39,8 @@ accept_timeouts_in_order/1,accept_timeouts_in_order2/1, accept_timeouts_in_order3/1,accept_timeouts_mixed/1, killing_acceptor/1,killing_multi_acceptors/1,killing_multi_acceptors2/1, - several_accepts_in_one_go/1,active_once_closed/1, send_timeout/1, send_timeout_active/1, + several_accepts_in_one_go/1, accept_system_limit/1, + active_once_closed/1, send_timeout/1, send_timeout_active/1, otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, otp_9389/1]). @@ -71,7 +72,7 @@ all() -> accept_timeouts_in_order, accept_timeouts_in_order2, accept_timeouts_in_order3, accept_timeouts_mixed, killing_acceptor, killing_multi_acceptors, - killing_multi_acceptors2, several_accepts_in_one_go, + killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit, active_once_closed, send_timeout, send_timeout_active, otp_7731, zombie_sockets, otp_7816, otp_8102, otp_9389]. @@ -1837,6 +1838,54 @@ wait_until_accepting(Proc,N) -> end. +accept_system_limit(suite) -> + []; +accept_system_limit(doc) -> + ["Check that accept returns {error, system_limit} " + "(and not {error, enfile}) when running out of ports"]; +accept_system_limit(Config) when is_list(Config) -> + ?line {ok, LS} = gen_tcp:listen(0, []), + ?line {ok, TcpPort} = inet:port(LS), + ?line Connector = spawn_link(fun () -> connector(TcpPort) end), + ?line ok = acceptor(LS, false, []), + ?line Connector ! stop, + ok. + +acceptor(LS, GotSL, A) -> + case gen_tcp:accept(LS, 1000) of + {ok, S} -> + acceptor(LS, GotSL, [S|A]); + {error, system_limit} -> + acceptor(LS, true, A); + {error, timeout} when GotSL -> + ok; + {error, timeout} -> + error + end. + +connector(TcpPort) -> + ManyPorts = open_ports([]), + ConnF = fun (Port) -> + case catch gen_tcp:connect({127,0,0,1}, TcpPort, []) of + {ok, Sock} -> + Sock; + _Error -> + port_close(Port) + end + end, + R = [ConnF(Port) || Port <- lists:sublist(ManyPorts, 10)], + receive stop -> R end. + +open_ports(L) -> + case catch open_port({spawn_driver, "ram_file_drv"}, []) of + Port when is_port(Port) -> + open_ports([Port|L]); + {'EXIT', {system_limit, _}} -> + {L1, L2} = lists:split(5, L), + [port_close(Port) || Port <- L1], + L2 + end. + active_once_closed(suite) -> []; diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl index 6d0848ee05..03704b0c04 100644 --- a/lib/kernel/test/sendfile_SUITE.erl +++ b/lib/kernel/test/sendfile_SUITE.erl @@ -26,7 +26,9 @@ all() -> [t_sendfile_small - ,t_sendfile_big + ,t_sendfile_big_all + ,t_sendfile_big_size + ,t_sendfile_many_small ,t_sendfile_partial ,t_sendfile_offset ,t_sendfile_sendafter @@ -38,20 +40,25 @@ all() -> ]. init_per_suite(Config) -> - Priv = ?config(priv_dir, Config), - SFilename = filename:join(Priv, "sendfile_small.html"), - {ok, DS} = file:open(SFilename,[write,raw]), - file:write(DS,"yo baby yo"), - file:sync(DS), - file:close(DS), - BFilename = filename:join(Priv, "sendfile_big.html"), - {ok, DB} = file:open(BFilename,[write,raw]), - [file:write(DB,[<<0:(10*8*1024*1024)>>]) || _I <- lists:seq(1,51)], - file:sync(DB), - file:close(DB), - [{small_file, SFilename}, - {file_opts,[raw,binary]}, - {big_file, BFilename}|Config]. + case {os:type(),os:version()} of + {{unix,sunos}, {5,8,_}} -> + {skip, "Solaris 8 not supported for now"}; + _ -> + Priv = ?config(priv_dir, Config), + SFilename = filename:join(Priv, "sendfile_small.html"), + {ok, DS} = file:open(SFilename,[write,raw]), + file:write(DS,"yo baby yo"), + file:sync(DS), + file:close(DS), + BFilename = filename:join(Priv, "sendfile_big.html"), + {ok, DB} = file:open(BFilename,[write,raw]), + [file:write(DB,[<<0:(10*8*1024*1024)>>]) || _I <- lists:seq(1,51)], + file:sync(DB), + file:close(DB), + [{small_file, SFilename}, + {file_opts,[raw,binary]}, + {big_file, BFilename}|Config] + end. end_per_suite(Config) -> file:delete(proplists:get_value(big_file, Config)). @@ -91,7 +98,34 @@ t_sendfile_small(Config) when is_list(Config) -> ok = sendfile_send(Send). -t_sendfile_big(Config) when is_list(Config) -> +t_sendfile_many_small(Config) when is_list(Config) -> + Filename = proplists:get_value(small_file, Config), + FileOpts = proplists:get_value(file_opts, Config, []), + + error_logger:add_report_handler(?MODULE,[self()]), + + Send = fun(Sock) -> + {Size,_} = sendfile_file_info(Filename), + N = 10000, + {ok,D} = file:open(Filename,[read|FileOpts]), + [begin + {ok,Size} = file:sendfile(D,Sock,0,0,[]) + end || _I <- lists:seq(1,N)], + file:close(D), + Size*N + end, + + ok = sendfile_send({127,0,0,1}, Send, 0), + + receive + {stolen,Reason} -> + exit(Reason) + after 200 -> + ok + end. + + +t_sendfile_big_all(Config) when is_list(Config) -> Filename = proplists:get_value(big_file, Config), Send = fun(Sock) -> @@ -103,6 +137,20 @@ t_sendfile_big(Config) when is_list(Config) -> ok = sendfile_send({127,0,0,1}, Send, 0). +t_sendfile_big_size(Config) -> + Filename = proplists:get_value(big_file, Config), + FileOpts = proplists:get_value(file_opts, Config, []), + + SendAll = fun(Sock) -> + {ok, #file_info{size = Size}} = + file:read_file_info(Filename), + {ok,D} = file:open(Filename,[read|FileOpts]), + {ok, Size} = file:sendfile(D, Sock,0,Size,[]), + Size + end, + + ok = sendfile_send({127,0,0,1}, SendAll, 0). + t_sendfile_partial(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), @@ -310,6 +358,10 @@ sendfile_server(ClientPid, Orig) -> -define(SENDFILE_TIMEOUT, 10000). sendfile_do_recv(Sock, Bs) -> + TimeoutMul = case os:type() of + {win32, _} -> 6; + _ -> 1 + end, receive stop when Bs /= 0,is_integer(Bs) -> gen_tcp:close(Sock), @@ -333,7 +385,7 @@ sendfile_do_recv(Sock, Bs) -> {tcp_closed, Sock} when is_integer(Bs) -> ct:log("Stopped due to close"), {ok, Bs} - after ?SENDFILE_TIMEOUT -> + after ?SENDFILE_TIMEOUT * TimeoutMul -> ct:log("Sendfile timeout"), timeout end. diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl index 74bafe8935..e0fd0f5d19 100644 --- a/lib/kernel/test/zlib_SUITE.erl +++ b/lib/kernel/test/zlib_SUITE.erl @@ -73,6 +73,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [{group, api}, {group, examples}, {group, func}, smp, + otp_9981, otp_7359]. groups() -> @@ -964,6 +965,24 @@ otp_7359_def_inf(Data,{DefSize,InfSize}) -> ?line ok = zlib:close(ZInf), ok. +otp_9981(Config) when is_list(Config) -> + Ports = lists:sort(erlang:ports()), + Invalid = <<"My invalid data">>, + catch zlib:compress(invalid), + Ports = lists:sort(erlang:ports()), + catch zlib:uncompress(Invalid), + Ports = lists:sort(erlang:ports()), + catch zlib:zip(invalid), + Ports = lists:sort(erlang:ports()), + catch zlib:unzip(Invalid), + Ports = lists:sort(erlang:ports()), + catch zlib:gzip(invalid), + Ports = lists:sort(erlang:ports()), + catch zlib:gunzip(Invalid), + Ports = lists:sort(erlang:ports()), + ok. + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Helps with testing directly %%%%%%%%%%%%% diff --git a/lib/megaco/doc/src/notes.xml b/lib/megaco/doc/src/notes.xml index d9c575885f..393064fbb5 100644 --- a/lib/megaco/doc/src/notes.xml +++ b/lib/megaco/doc/src/notes.xml @@ -4,7 +4,7 @@ <chapter> <header> <copyright> - <year>2000</year><year>2011</year> + <year>2000</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -36,6 +36,75 @@ section is the version number of Megaco.</p> + <section><title>Megaco 3.16.0.1</title> + + <p>Version 3.16.0.1 supports code replacement in runtime from/to + version 3.16, 3.15.1.1, 3.15.1 and 3.15.</p> + + <section> + <title>Improvements and new features</title> + +<!-- + <p>-</p> +--> + + <list type="bulleted"> + <item> + <p>Fixed some faulty test cases. </p> +<!-- + <p>Own Id: OTP-9795</p> +--> + </item> + + <item> + <p>Removed use of deprecated system flag, + <c>scheduler_bind_type</c>, in the measurement tool + <c>mstone1</c>. </p> + <p>Own Id: OTP-9949</p> + </item> + + </list> + + </section> + + <section> + <title>Fixed bugs and malfunctions</title> + + <p>-</p> + + <!-- + <list type="bulleted"> + <item> + <p>Fixing miscellaneous things detected by dialyzer. </p> + <p>Own Id: OTP-9075</p> + </item> + + </list> + --> + + </section> + + <section> + <title>Incompatibilities</title> + <p>-</p> + +<!-- + <list type="bulleted"> + <item> + <p>Due to the change in the flex driver API, + we may no longer be able to build and/or use + the flex driver without reentrant support. </p> + <p>Own Id: OTP-9795</p> + </item> + + </list> +--> + + </section> + + </section> <!-- 3.16.0.1 --> + + <section><title>Megaco 3.16</title> <p>Version 3.16 supports code replacement in runtime from/to @@ -50,7 +119,7 @@ <list type="bulleted"> <item> - <p>Minor improvemnts to the emasurement tool mstone1. </p> + <p>Minor improvements to the measurement tool <c>mstone1</c>. </p> <p>Own Id: OTP-9604</p> </item> diff --git a/lib/megaco/examples/meas/meas.sh.skel.src b/lib/megaco/examples/meas/meas.sh.skel.src index c7bd6cdd2a..ecf463b9c6 100644 --- a/lib/megaco/examples/meas/meas.sh.skel.src +++ b/lib/megaco/examples/meas/meas.sh.skel.src @@ -2,7 +2,7 @@ # %CopyrightBegin% # -# Copyright Ericsson AB 2007-2011. All Rights Reserved. +# Copyright Ericsson AB 2007-2012. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -32,6 +32,7 @@ STOP="-s init stop" ERL="erl \ -noshell \ + +sbt tnnps \ -pa $MEAS_HOME \ $MEAS_DEFAULT \ $STOP" diff --git a/lib/megaco/examples/meas/megaco_codec_mstone1.erl b/lib/megaco/examples/meas/megaco_codec_mstone1.erl index 9ab7822df8..2133cd633c 100644 --- a/lib/megaco/examples/meas/megaco_codec_mstone1.erl +++ b/lib/megaco/examples/meas/megaco_codec_mstone1.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2005-2009. All Rights Reserved. +%% Copyright Ericsson AB 2005-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -191,7 +191,6 @@ mstone_init(MessagePackage, Factor, Codecs, DrvInclude) -> do_mstone(MessagePackage, Factor, Codecs, DrvInclude) -> io:format("~n", []), - ?LIB:set_default_sched_bind(), ?LIB:display_os_info(), ?LIB:display_system_info(), ?LIB:display_app_info(), diff --git a/lib/megaco/examples/meas/megaco_codec_mstone2.erl b/lib/megaco/examples/meas/megaco_codec_mstone2.erl index f3588f2e3d..54b889345f 100644 --- a/lib/megaco/examples/meas/megaco_codec_mstone2.erl +++ b/lib/megaco/examples/meas/megaco_codec_mstone2.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2006-2009. All Rights Reserved. +%% Copyright Ericsson AB 2006-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -40,7 +40,7 @@ %% When the finished timer expires, it will stop respawing %% the worker processes, and instead just wait for them all %% to finish. -%% The test is finished by printing the statistics. +%% The test is finishes by printing the statistics. %% - A worker process for each codec combination. %% This process is spawned by the loader process. It receives %% at start a list of messages. It shall decode and then @@ -162,7 +162,6 @@ parse_message_package(BadMessagePackage) -> mstone_init(MessagePackage, DrvInclude) -> io:format("~n", []), - ?LIB:set_default_sched_bind(), ?LIB:display_os_info(), ?LIB:display_system_info(), ?LIB:display_app_info(), diff --git a/lib/megaco/examples/meas/megaco_codec_mstone_lib.erl b/lib/megaco/examples/meas/megaco_codec_mstone_lib.erl index 040af9826b..ca8016d65f 100644 --- a/lib/megaco/examples/meas/megaco_codec_mstone_lib.erl +++ b/lib/megaco/examples/meas/megaco_codec_mstone_lib.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2006-2010. All Rights Reserved. +%% Copyright Ericsson AB 2006-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -30,7 +30,6 @@ -compile({no_auto_import,[error/1]}). -export([start_flex_scanner/0, stop_flex_scanner/1, expanded_messages/2, expanded_messages/3, - set_default_sched_bind/0, display_os_info/0, display_system_info/0, display_alloc_info/0, @@ -70,16 +69,6 @@ detect_version(Codec, Conf, Bin) -> %%---------------------------------------------------------------------- %% -%% S c h e d u l e r b i n d t y p e -%% -%%---------------------------------------------------------------------- - -set_default_sched_bind() -> - (catch erlang:system_flag(scheduler_bind_type, default_bind)). - - -%%---------------------------------------------------------------------- -%% %% D i s p l a y O s I n f o %% %%---------------------------------------------------------------------- diff --git a/lib/megaco/examples/meas/mstone1.sh.skel.src b/lib/megaco/examples/meas/mstone1.sh.skel.src index 54a6c61a58..b1935acef6 100644 --- a/lib/megaco/examples/meas/mstone1.sh.skel.src +++ b/lib/megaco/examples/meas/mstone1.sh.skel.src @@ -3,7 +3,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 2007-2011. All Rights Reserved. +# Copyright Ericsson AB 2007-2012. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -55,6 +55,15 @@ Options: flex scanner will be used nd - only codec config(s) without drivers will be used od - only codec config(s) with drivers will be used + -sbt <bind-type> Set scheduler bind type. See erl man page for more info. + tnnps - Thread no node processor spread (default) + u - Unbound + ns - No spread + ts - Thread spread + ps - Processor spread + s - Spread + nnts - No node thread spread + nnps - No node processor spread -- everything after this is just passed on to erl. " @@ -67,6 +76,7 @@ MODULE=megaco_codec_mstone1 STARTF="start" FACTOR="" MSG_PACK=time_test +SBT="+sbt tnnps" while test $# != 0; do # echo "DBG: Value = $1" @@ -107,6 +117,17 @@ while test $# != 0; do exit 0 esac;; + -sbt) + case $2 in + tnnps|u|ns|ts|ps|s|nnts|nnps) + SBT="+sbt $2"; + shift ; shift ;; + *) + echo "unknown scheduler bind type: $2"; + echo "$usage" ; + exit 0 + esac;; + -f) if [ "x$SCHED" != "x" ]; then echo "option(s) -s and -f cannot both be given" ; @@ -177,6 +198,7 @@ if [ $TYPE = factor ]; then ERL="erl \ -noshell \ + $SBT \ $PHS \ $ATP \ $SMP_OPTS \ @@ -218,6 +240,7 @@ elif [ $TYPE = sched ]; then ERL="erl \ -noshell \ + $SBT \ $PHS \ $ATP \ $SMP_OPTS \ diff --git a/lib/megaco/src/app/megaco.appup.src b/lib/megaco/src/app/megaco.appup.src index 3c9740818a..7f89fa8bc2 100644 --- a/lib/megaco/src/app/megaco.appup.src +++ b/lib/megaco/src/app/megaco.appup.src @@ -2,7 +2,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2001-2011. All Rights Reserved. +%% Copyright Ericsson AB 2001-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -142,10 +142,17 @@ %% | %% v %% 3.16 +%% | +%% v +%% 3.16.0.1 %% %% {"%VSN%", [ + {"3.16", + [ + ] + }, {"3.15.1.1", [ {restart_application, megaco} @@ -163,6 +170,10 @@ } ], [ + {"3.16", + [ + ] + }, {"3.15.1.1", [ {restart_application, megaco} diff --git a/lib/megaco/test/megaco_mess_test.erl b/lib/megaco/test/megaco_mess_test.erl index 8bafab1aba..663ac8c329 100644 --- a/lib/megaco/test/megaco_mess_test.erl +++ b/lib/megaco/test/megaco_mess_test.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2011. All Rights Reserved. +%% Copyright Ericsson AB 1999-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -324,9 +324,6 @@ min(M) -> timer:minutes(M). %% Test server callbacks -% init_per_testcase(pending_ack = Case, Config) -> -% put(dbg,true), -% megaco_test_lib:init_per_testcase(Case, Config); init_per_testcase(otp_7189 = Case, Config) -> C = lists:keydelete(tc_timeout, 1, Config), megaco_test_lib:init_per_testcase(Case, [{tc_timeout, min(2)} |C]); @@ -337,9 +334,6 @@ init_per_testcase(Case, Config) -> C = lists:keydelete(tc_timeout, 1, Config), megaco_test_lib:init_per_testcase(Case, [{tc_timeout, min(1)} |C]). -% end_per_testcase(pending_ack = Case, Config) -> -% erase(dbg), -% megaco_test_lib:end_per_testcase(Case, Config); end_per_testcase(Case, Config) -> megaco_test_lib:end_per_testcase(Case, Config). @@ -434,6 +428,7 @@ connect(suite) -> connect(doc) -> []; connect(Config) when is_list(Config) -> + %% ?SKIP("Needs a re-write..."), ?ACQUIRE_NODES(1, Config), PrelMid = preliminary_mid, MgMid = ipv4_mid(4711), @@ -457,16 +452,25 @@ connect(Config) when is_list(Config) -> ?VERIFY(bad_send_mod, megaco:conn_info(PrelCH, send_mod)), SC = service_change_request(), case megaco:call(PrelCH, [SC], []) of - {error, - {send_message_failed, - {'EXIT', {undef, [{bad_send_mod, send_message, [sh, _]} | _]}}}} -> + {_Version, + {error, + {send_message_failed, + {'EXIT', {undef, [{bad_send_mod, send_message, [sh, _]} | _]}}}} + } -> + %% R14B and previous + ?LOG("expected send failure (1)", []), ok; %% As of R15, we also get some extra info (e.g. line numbers) - {error, - {send_message_failed, - {'EXIT', {undef, [{bad_send_mod, send_message, [sh, _], _} | _]}}}} -> + {_Version, + {error, + {send_message_failed, + {'EXIT', {undef, [{bad_send_mod, send_message, [sh, _], _} | _]}}}} + } -> + %% R15B and later + ?LOG("expected send failure (2)", []), ok; + Unexpected -> ?ERROR(Unexpected) end, diff --git a/lib/megaco/vsn.mk b/lib/megaco/vsn.mk index bb6f5f554a..11a951a23e 100644 --- a/lib/megaco/vsn.mk +++ b/lib/megaco/vsn.mk @@ -2,7 +2,7 @@ # %CopyrightBegin% # -# Copyright Ericsson AB 1997-2011. All Rights Reserved. +# Copyright Ericsson AB 1997-2012. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -18,6 +18,6 @@ # %CopyrightEnd% APPLICATION = megaco -MEGACO_VSN = 3.16 +MEGACO_VSN = 3.16.0.1 PRE_VSN = APP_VSN = "$(APPLICATION)-$(MEGACO_VSN)$(PRE_VSN)" diff --git a/lib/observer/doc/src/etop.xml b/lib/observer/doc/src/etop.xml index 78047caab3..4eb5603549 100644 --- a/lib/observer/doc/src/etop.xml +++ b/lib/observer/doc/src/etop.xml @@ -114,6 +114,37 @@ Default: <c>on</c></item> </description> <funcs> <func> + <name>start() -> ok</name> + <fsummary>Start etop</fsummary> + <desc> + <p>This function starts <c>etop</c>. + Note that etop is preferably started with the etop and getop scripts</p> + </desc> + </func> + <func> + <name>start(Options) -> ok</name> + <fsummary>Start etop</fsummary> + <type> + <v>Options = [Option]</v> + <v>Option = {Key, Value}</v> + <v>Key = atom()</v> + <v>Value = term()</v> + </type> + <desc> + <p>This function starts <c>etop</c>. Use + <seealso marker="#help/0">help/0</seealso> to see a + description of the possible options.</p> + </desc> + </func> + <func> + <name>help() -> ok</name> + <fsummary>Print etop's help</fsummary> + <desc> + <p>This function prints the help of <c>etop</c> and + its options.</p> + </desc> + </func> + <func> <name>config(Key,Value) -> Result</name> <fsummary>Change tool's configuration</fsummary> <type> diff --git a/lib/observer/src/etop.erl b/lib/observer/src/etop.erl index 0bf1d68534..7ec0fedbb2 100644 --- a/lib/observer/src/etop.erl +++ b/lib/observer/src/etop.erl @@ -31,9 +31,9 @@ help() -> io:format( - "Usage of the erlang top program~n" - "Options are set as command line parameters as in -node a@host -..~n" - "or as parameter to etop:start([{node, a@host}, {...}])~n" + "Usage of the Erlang top program~n~n" + "Options are set as command line parameters as in -node my@host~n" + "or as parameters to etop:start([{node, my@host}, {...}]).~n~n" "Options are:~n" " node atom Required The erlang node to measure ~n" " port integer The used port, NOTE: due to a bug this program~n" diff --git a/lib/snmp/doc/src/notes.xml b/lib/snmp/doc/src/notes.xml index 704ff0a20f..2d045faa0f 100644 --- a/lib/snmp/doc/src/notes.xml +++ b/lib/snmp/doc/src/notes.xml @@ -34,6 +34,88 @@ <section> + <title>SNMP Development Toolkit 4.22</title> + <p>Version 4.22 supports code replacement in runtime from/to + version 4.21.7 4.21.6 4.21.5, 4.21.4, 4.21.3, 4.21.2, 4.21.1 and 4.21. </p> + + <section> + <title>Improvements and new features</title> +<!-- + <p>-</p> +--> + + <list type="bulleted"> + <item> + <p>[compiler] The table information the MIB compiler provides with + augmented tables has been extended with <c>nbr_of_cols</c>, + <c>first_accessible</c> and <c>not_accessible</c>. </p> + <p>Own Id: OTP-9969</p> + </item> + + <item> + <p>Added the <c>log_to_io</c> audit-trail-log converter function + to the api modules of both the + <seealso marker="snmpm#log_to_io">manager</seealso> + and + <seealso marker="snmpa#log_to_io">agent</seealso>. </p> + <p>Own Id: OTP-9940</p> + </item> + + <item> + <p>[manager] Introduced a new transport module, + <c>snmpm_net_if_mt</c>, + which handles all incomming and outgoing + traffic in newly created processes. The message/request is + processed and then the process exits. </p> + <p>Own Id: OTP-9876</p> + </item> + + <item> + <p>[agent] Documenting previously existing but undocumented function, + <seealso marker="snmp_generic#get_table_info">snmp_generic:get_table_info/2</seealso>. </p> + <p>Own Id: OTP-9942</p> + </item> + + <item> + <p>[agent] Improve error handling while reading agent config files. + Some files contain mandatory information and is therefor themself + mandatory. </p> + <p>Own Id: OTP-9943</p> + </item> + </list> + + </section> + + <section> + <title>Fixed Bugs and Malfunctions</title> + <p>-</p> + + <!-- + <list type="bulleted"> + <item> + <p>[agent] Simultaneous + <seealso marker="snmpa#backup">snmpa:backup/1,2</seealso> + calls can interfere. + The master agent did not check if a backup was already in + progress when a backup request was accepted. </p> + <p>Own Id: OTP-9884</p> + <p>Aux Id: Seq 11995</p> + </item> + + </list> + --> + + </section> + + <section> + <title>Incompatibilities</title> + <p>-</p> + </section> + + </section> <!-- 4.22 --> + + + <section> <title>SNMP Development Toolkit 4.21.7</title> <p>Version 4.21.7 supports code replacement in runtime from/to version 4.21.6, 4.21.5, 4.21.4, 4.21.3, 4.21.2, 4.21.1, 4.21, 4.20.1 and @@ -47,14 +129,14 @@ <list type="bulleted"> <item> <p>[agent] DoS attack using GET-BULK with large value of - MaxRepetitions. - A preventive method has been implementing by simply - limit the number of varbinds that can be included in - a Get-BULK response message. This is specified by the - new config option, - <seealso marker="snmp_app#agent_gb_max_vbs">gb_max_vbs</seealso>. - </p> - <p>Own Id: OTP-9700</p> + MaxRepetitions. + A preventive method has been implementing by simply + limit the number of varbinds that can be included in + a Get-BULK response message. This is specified by the + new config option, + <seealso marker="snmp_app#agent_gb_max_vbs">gb_max_vbs</seealso>. + </p> + <p>Own Id: OTP-9700</p> </item> </list> @@ -63,7 +145,7 @@ </section> <section> - <title>Reported Fixed Bugs and Malfunctions</title> + <title>Fixed Bugs and Malfunctions</title> <!-- <p>-</p> --> @@ -71,10 +153,10 @@ <list type="bulleted"> <item> <p>[agent] Simultaneous - <seealso marker="snmpa#backup">snmpa:backup/1,2</seealso> - calls can interfere. - The master agent did not check if a backup was already in - progress when a backup request was accepted. </p> + <seealso marker="snmpa#backup">snmpa:backup/1,2</seealso> + calls can interfere. + The master agent did not check if a backup was already in + progress when a backup request was accepted. </p> <p>Own Id: OTP-9884</p> <p>Aux Id: Seq 11995</p> </item> @@ -106,14 +188,14 @@ <list type="bulleted"> <item> <p>[agent] DoS attack using GET-BULK with large value of - MaxRepetitions. - A preventive method has been implementing by simply - limit the number of varbinds that can be included in - a Get-BULK response message. This is specified by the - new config option, - <seealso marker="snmp_app#agent_gb_max_vbs">gb_max_vbs</seealso>. - </p> - <p>Own Id: OTP-9700</p> + MaxRepetitions. + A preventive method has been implementing by simply + limit the number of varbinds that can be included in + a Get-BULK response message. This is specified by the + new config option, + <seealso marker="snmp_app#agent_gb_max_vbs">gb_max_vbs</seealso>. + </p> + <p>Own Id: OTP-9700</p> </item> </list> @@ -121,7 +203,7 @@ </section> <section> - <title>Reported Fixed Bugs and Malfunctions</title> + <title>Fixed Bugs and Malfunctions</title> <!-- <p>-</p> --> @@ -129,12 +211,12 @@ <list type="bulleted"> <item> <p>[agent] Mib server cache gclimit update function incorrectly calls - age update function. - The gclimit update function, - <seealso marker="snmpa#update_mibs_cache_gclimit">update_mibs_cache_gclimit/1</seealso>, - <em>incorrectly</em> called the age update function, - <seealso marker="snmpa#update_mibs_cache_age">update_mibs_cache_age/2</seealso>. </p> - <p>Johan Claesson</p> + age update function. + The gclimit update function, + <seealso marker="snmpa#update_mibs_cache_gclimit">update_mibs_cache_gclimit/1</seealso>, + <em>incorrectly</em> called the age update function, + <seealso marker="snmpa#update_mibs_cache_age">update_mibs_cache_age/2</seealso>. </p> + <p>Johan Claesson</p> <p>Own Id: OTP-9868</p> </item> diff --git a/lib/snmp/doc/src/snmp.xml b/lib/snmp/doc/src/snmp.xml index af0833f005..3e6610891f 100644 --- a/lib/snmp/doc/src/snmp.xml +++ b/lib/snmp/doc/src/snmp.xml @@ -1,10 +1,10 @@ -<?xml version="1.0" encoding="latin1" ?> +<?xml version="1.0" encoding="iso-8859-1" ?> <!DOCTYPE erlref SYSTEM "erlref.dtd"> <erlref> <header> <copyright> - <year>1996</year><year>2009</year> + <year>1996</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -391,6 +391,30 @@ version of the protocol data unit. There is a new line between <c>Vsn</c> and <c>PDU</c>.</p> + <marker id="log_to_io"></marker> + </desc> + </func> + + <func> + <name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name> + <fsummary>Convert an Audit Trail Log to text format</fsummary> + <type> + <v>LogDir = string()</v> + <v>Mibs = [MibName]</v> + <v>MibName = string()</v> + <v>LogName = string()</v> + <v>LogFile = string()</v> + <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v> + <v>Reason = term()</v> + </type> + <desc> + <p>Converts an Audit Trail Log to a readable format and + prints it on stdio. See + <seealso marker="snmp#log_to_txt">log_to_txt</seealso> + above for more info.</p> + <marker id="change_log_size"></marker> </desc> </func> diff --git a/lib/snmp/doc/src/snmp_app.xml b/lib/snmp/doc/src/snmp_app.xml index f6abe783b3..d11eaa2771 100644 --- a/lib/snmp/doc/src/snmp_app.xml +++ b/lib/snmp/doc/src/snmp_app.xml @@ -35,8 +35,8 @@ <appsummary>The SNMP Application</appsummary> <description> <p>This chapter describes the <c>snmp</c> - application in OTP. The SNMP application provides the following - services:</p> + application in OTP. The SNMP application provides the following + services:</p> <list type="bulleted"> <item> <p>a multilingual extensible SNMP agent</p> @@ -589,16 +589,16 @@ {no_reuse, no_reuse()} | {filter, manager_net_if_filter_options()} </c></p> <p>These options are actually specific to the used module. - The ones shown here are applicable to the default - <c>manager_net_if_module()</c>.</p> + The ones shown here are applicable to the default + <c>manager_net_if_module()</c>.</p> <p>For defaults see the options in <c>manager_net_if_option()</c>.</p> </item> <marker id="manager_ni_module"></marker> <tag><c><![CDATA[manager_net_if_module() = atom() <optional>]]></c></tag> <item> - <p>Module which handles the network interface part for the - SNMP manager. Must implement the + <p>The module which handles the network interface part for the + SNMP manager. It must implement the <seealso marker="snmpm_network_interface">snmpm_network_interface</seealso> behaviour.</p> <p>Default is <c>snmpm_net_if</c>.</p> </item> diff --git a/lib/snmp/doc/src/snmp_config.xml b/lib/snmp/doc/src/snmp_config.xml index 0a49b7a62e..b756f3da7d 100644 --- a/lib/snmp/doc/src/snmp_config.xml +++ b/lib/snmp/doc/src/snmp_config.xml @@ -586,18 +586,18 @@ {no_reuse, no_reuse()} | {filter, manager_net_if_filter_options()}</c></p> <p>These options are actually specific to the used module. - The ones shown here are applicable to the default - <c>manager_net_if_module()</c>.</p> + The ones shown here are applicable to the default + <c>manager_net_if_module()</c>. </p> <p>For defaults see the options in <c>manager_net_if_option()</c>.</p> </item> <marker id="manager_ni_module"></marker> <tag><c><![CDATA[manager_net_if_module() = atom() <optional>]]></c></tag> <item> - <p>Module which handles the network interface part for the - SNMP manager. Must implement the - <seealso marker="snmpm_network_interface">snmpm_network_interface</seealso> behaviour.</p> - <p>Default is <c>snmpm_net_if</c>.</p> + <p>The module which handles the network interface part for the + SNMP manager. It must implement the + <seealso marker="snmpm_network_interface">snmpm_network_interface</seealso> behaviour. </p> + <p>Default is <c>snmpm_net_if</c>. </p> </item> <marker id="manager_ni_filter_opts"></marker> diff --git a/lib/snmp/doc/src/snmp_generic.xml b/lib/snmp/doc/src/snmp_generic.xml index 77f3cefaa2..79a22323d9 100644 --- a/lib/snmp/doc/src/snmp_generic.xml +++ b/lib/snmp/doc/src/snmp_generic.xml @@ -1,10 +1,10 @@ -<?xml version="1.0" encoding="latin1" ?> +<?xml version="1.0" encoding="iso-8859-1" ?> <!DOCTYPE erlref SYSTEM "erlref.dtd"> <erlref> <header> <copyright> - <year>1996</year><year>2009</year> + <year>1996</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -34,15 +34,16 @@ <module>snmp_generic</module> <modulesummary>Generic Functions for Implementing SNMP Objects in a Database</modulesummary> <description> - <p>The module <c>snmp_generic</c> contains generic functions for implementing tables - (and variables) using the SNMP built-in database or Mnesia. These - default functions are used if no instrumentation function is - provided for a managed object in a MIB. Sometimes, it might be - necessary to customize the behaviour of the default functions. For - example, in some situations a trap should be sent if a row is - deleted or modified, or some hardware is to be informed, when - information is changed. - </p> + <marker id="description"></marker> + <p>The module <c>snmp_generic</c> contains generic functions for + implementing tables (and variables) using the SNMP built-in database + or Mnesia. These default functions are used if no instrumentation + function is provided for a managed object in a MIB. Sometimes, + it might be necessary to customize the behaviour of the default + functions. For example, in some situations a trap should be sent + if a row is deleted or modified, or some hardware is to be informed, + when information is changed. </p> + <p>The overall structure is shown in the following figure:</p> <pre> +---------------+ @@ -93,6 +94,7 @@ </description> <section> + <marker id="data_types"></marker> <title>DATA TYPES</title> <p>In the functions defined below, the following types are used:</p> <code type="none"> @@ -118,7 +120,10 @@ value() = term() case of a <c>set</c> operation. </p> </item> </taglist> + + <marker id="get_status_col2"></marker> </section> + <funcs> <func> <name>get_status_col(Name, Cols)</name> @@ -136,8 +141,11 @@ value() = term() <p>This function can be used in instrumentation functions for <c>is_set_ok</c>, <c>undo</c> or <c>set</c> to check if the status column of a table is modified.</p> + + <marker id="get_index_types"></marker> </desc> </func> + <func> <name>get_index_types(Name)</name> <fsummary>Get the index types of <c>Name</c></fsummary> @@ -147,9 +155,36 @@ value() = term() <desc> <p>Gets the index types of <c>Name</c></p> <p>This function can be used in instrumentation functions to - retrieve the index types part of the table info.</p> + retrieve the index types part of the table info.</p> + + <marker id="get_table_info"></marker> </desc> </func> + + <func> + <name>get_table_info(Name, Item) -> table_info_result()</name> + <fsummary>Get table info item of MIB table <c>Name</c></fsummary> + <type> + <v>Name = name()</v> + <v>Item = table_item() | all</v> + <v>table_item() = nbr_of_cols | defvals | status_col | not_accessible | + index_types | first_accessible | first_own_index</v> + <v>table_info_result() = Value | [{table_item(), Value}]</v> + <v>Value = term()</v> + </type> + <desc> + <p>Get a specific table info item or, if <c>Item</c> has the + value <c>all</c>, a two tuple list (property list) is instead + returned with all the items and their respctive values of the + given table. </p> + + <p>This function can be used in instrumentation functions to + retrieve a given part of the table info.</p> + + <marker id="table_func"></marker> + </desc> + </func> + <func> <name>table_func(Op1, NameDb)</name> <name>table_func(Op2, RowIndex, Cols, NameDb) -> Ret</name> @@ -190,8 +225,11 @@ value() = term() <p>The function returns according to the specification of an instrumentation function. </p> + + <marker id="table_get_elements"></marker> </desc> </func> + <func> <name>table_get_elements(NameDb, RowIndex, Cols) -> Values</name> <fsummary>Get elements in a table row</fsummary> @@ -204,8 +242,11 @@ value() = term() <desc> <p>Returns a list with values for all columns in <c>Cols</c>. If a column is undefined, its value is <c>noinit</c>.</p> + + <marker id="table_next"></marker> </desc> </func> + <func> <name>table_next(NameDb, RestOid) -> RowIndex | endOfTable</name> <fsummary>Find the next row in the table</fsummary> @@ -217,8 +258,11 @@ value() = term() <desc> <p>Finds the indices of the next row in the table. <c>RestOid</c> does not have to specify an existing row.</p> + + <marker id="table_row_exists"></marker> </desc> </func> + <func> <name>table_row_exists(NameDb, RowIndex) -> bool()</name> <fsummary>Check if a row in a table exists</fsummary> @@ -228,8 +272,11 @@ value() = term() </type> <desc> <p>Checks if a row in a table exists.</p> + + <marker id="table_set_elements"></marker> </desc> </func> + <func> <name>table_set_elements(NameDb, RowIndex, Cols) -> bool()</name> <fsummary>Set elements in a table row</fsummary> @@ -246,8 +293,11 @@ value() = term() <c>mnesia:write</c> to store the values. This means that this function must be called from within a transaction (<c>mnesia:transaction/1</c> or <c>mnesia:dirty/1</c>).</p> + + <marker id="variable_func"></marker> </desc> </func> + <func> <name>variable_func(Op1, NameDb)</name> <name>variable_func(Op2, Val, NameDb) -> Ret</name> @@ -268,8 +318,11 @@ value() = term() the database. </p> <p>The function returns according to the specification of an instrumentation function. </p> + + <marker id="variable_get"></marker> </desc> </func> + <func> <name>variable_get(NameDb) -> {value, Value} | undefined</name> <fsummary>Get the value of a variable</fsummary> @@ -279,8 +332,11 @@ value() = term() </type> <desc> <p>Gets the value of a variable.</p> + + <marker id="variable_set"></marker> </desc> </func> + <func> <name>variable_set(NameDb, NewVal) -> true | false</name> <fsummary>Set a value for a variable</fsummary> @@ -299,6 +355,7 @@ value() = term() </funcs> <section> + <marker id="example"></marker> <title>Example</title> <p>The following example shows an implementation of a table which is stored in Mnesia, but with some checks performed at set-request diff --git a/lib/snmp/doc/src/snmp_manager_netif.xml b/lib/snmp/doc/src/snmp_manager_netif.xml index 2738ca76c1..169e20d10b 100644 --- a/lib/snmp/doc/src/snmp_manager_netif.xml +++ b/lib/snmp/doc/src/snmp_manager_netif.xml @@ -1,10 +1,10 @@ -<?xml version="1.0" encoding="latin1" ?> +<?xml version="1.0" encoding="iso-8859-1" ?> <!DOCTYPE chapter SYSTEM "chapter.dtd"> <chapter> <header> <copyright> - <year>2004</year><year>2009</year> + <year>2004</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -35,31 +35,34 @@ <image file="snmp_manager_netif_1.gif"> <icaption>The Purpose of Manager Net if</icaption> </image> + <p>The Network Interface (Net if) process delivers SNMP PDUs to the - manager server, and receives SNMP PDUs from the manager server. - The most common behaviour of a Net if process is that is receives - request PDU from the manager server, encodes the PDU into bytes - and transmits the bytes onto the network to an agent. When the - reply from the agent is received by the Net if process, which it - decodes into an SNMP PDU, which it sends to the manager server. - </p> + manager server, and receives SNMP PDUs from the manager server. + The most common behaviour of a Net if process is that is receives + request PDU from the manager server, encodes the PDU into bytes + and transmits the bytes onto the network to an agent. When the + reply from the agent is received by the Net if process, which it + decodes into an SNMP PDU, which it sends to the manager server. </p> + <p>However, that simple behaviour can be modified in numerous - ways. For example, the Net if process can apply some kind of - encrypting/decrypting scheme on the bytes. - </p> - <p>It is also possible to write your own Net if process. The default - Net if process is implemented in the module <c>snmpm_net_if</c> and - it uses UDP as the transport protocol. - </p> - <p>This section describes how to write a Net if process. - </p> + ways. For example, the Net if process can apply some kind of + encrypting/decrypting scheme on the bytes. </p> + + <p>The snmp application provides two different modules, + <c>snmpm_net_if</c> (the default) and <c>snmpm_net_if_mt</c>, + both uses the UDP as the transport protocol. The difference + between the two modules is that the latter is "multi-threaded", + i.e. for each message/request a new process is created that + process the message/request and then exits. </p> + + <p>It is also possible to write your own Net if process, + this section describes how to write a Net if processdo that.</p> <section> <marker id="mandatory_functions"></marker> <title>Mandatory Functions</title> <p>A Net if process must implement the SNMP manager - <seealso marker="snmpm_network_interface">network interface behaviour</seealso>. - </p> + <seealso marker="snmpm_network_interface">network interface behaviour</seealso>. </p> </section> <section> diff --git a/lib/snmp/doc/src/snmpa.xml b/lib/snmp/doc/src/snmpa.xml index 2322af28cc..86fde03205 100644 --- a/lib/snmp/doc/src/snmpa.xml +++ b/lib/snmp/doc/src/snmpa.xml @@ -501,6 +501,7 @@ notification_delivery_info() = #snmpa_notification_delivery_info{} </func> <func> + <name>log_to_txt(LogDir)</name> <name>log_to_txt(LogDir, Mibs)</name> <name>log_to_txt(LogDir, Mibs, OutFile) -> ok | {error, Reason}</name> <name>log_to_txt(LogDir, Mibs, OutFile, LogName) -> ok | {error, Reason}</name> @@ -528,6 +529,37 @@ notification_delivery_info() = #snmpa_notification_delivery_info{} See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso> for more info.</p> + <marker id="log_to_io"></marker> + </desc> + </func> + + <func> + <name>log_to_io(LogDir) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name> + <fsummary>Convert an Audit Trail Log to text format</fsummary> + <type> + <v>LogDir = string()</v> + <v>Mibs = [MibName]</v> + <v>MibName = string()</v> + <v>LogName = string()</v> + <v>LogFile = string()</v> + <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v> + <v>Reason = disk_log_open_error() | file_open_error() | term()</v> + <v>disk_log_open_error() = {LogName, term()}</v> + <v>file_open_error() = {OutFile, term()}</v> + </type> + <desc> + <p>Converts an Audit Trail Log to a readable format and + prints it on stdio. + <c>LogName</c> defaults to "snmpa_log". + <c>LogFile</c> defaults to "snmpa.log". + See <seealso marker="snmp#log_to_io">snmp:log_to_io</seealso> + for more info.</p> + <marker id="change_log_size"></marker> </desc> </func> diff --git a/lib/snmp/doc/src/snmpm.xml b/lib/snmp/doc/src/snmpm.xml index c36a1b2a24..9bbb6cdbdb 100644 --- a/lib/snmp/doc/src/snmpm.xml +++ b/lib/snmp/doc/src/snmpm.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>2004</year><year>2011</year> + <year>2004</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -1237,6 +1237,38 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1 See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso> for more info.</p> + <marker id="log_to_io"></marker> + </desc> + </func> + + <func> + <name>log_to_io(LogDir) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name> + <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name> + <fsummary>Convert an Audit Trail Log to text format</fsummary> + <type> + <v>LogDir = string()</v> + <v>Mibs = [MibName]</v> + <v>MibName = string()</v> + <v>LogName = string()</v> + <v>LogFile = string()</v> + <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v> + <v>Reason = disk_log_open_error() | file_open_error() | term()</v> + <v>disk_log_open_error() = {LogName, term()}</v> + <v>file_open_error() = {OutFile, term()}</v> + </type> + <desc> + <p>Converts an Audit Trail Log to a readable format and + prints it on stdio. + <c>LogName</c> defaults to "snmpm_log". + <c>LogFile</c> defaults to "snmpm.log". + See <seealso marker="snmp#log_to_io">snmp:log_to_io</seealso> + for more info.</p> + <marker id="change_log_size"></marker> </desc> </func> diff --git a/lib/snmp/doc/src/snmpm_network_interface_filter.xml b/lib/snmp/doc/src/snmpm_network_interface_filter.xml index ea1e183848..5f80cec94e 100644 --- a/lib/snmp/doc/src/snmpm_network_interface_filter.xml +++ b/lib/snmp/doc/src/snmpm_network_interface_filter.xml @@ -1,10 +1,10 @@ -<?xml version="1.0" encoding="latin1" ?> +<?xml version="1.0" encoding="iso-8859-1" ?> <!DOCTYPE erlref SYSTEM "erlref.dtd"> <erlref> <header> <copyright> - <year>2007</year><year>2009</year> + <year>2007</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -72,9 +72,10 @@ </list> <p>Note that the network interface filter is something which is used by the network interface implementation provided by the application - (<c>snmpm_net_if</c>). The default filter accepts all messages.</p> + (<c>snmpm_net_if</c> and <c>snmpm_net_if_mt</c>). + The default filter accepts all messages.</p> <p>A network interface filter can e.g. be used during testing or for load - regulation. </p> + regulation. </p> </description> <section> diff --git a/lib/snmp/examples/ex2/snmp_ex2_manager.erl b/lib/snmp/examples/ex2/snmp_ex2_manager.erl index ff873327bc..1b247d713d 100644 --- a/lib/snmp/examples/ex2/snmp_ex2_manager.erl +++ b/lib/snmp/examples/ex2/snmp_ex2_manager.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2006-2010. All Rights Reserved. +%% Copyright Ericsson AB 2006-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -39,7 +39,7 @@ %% Manager callback API: -export([handle_error/3, - handle_agent/4, + handle_agent/5, handle_pdu/4, handle_trap/3, handle_inform/3, @@ -265,16 +265,17 @@ handle_snmp_callback(handle_error, {ReqId, Reason}) -> "~n Reason: ~p" "~n", [ReqId, Reason]), ok; -handle_snmp_callback(handle_agent, {Addr, Port, SnmpInfo}) -> +handle_snmp_callback(handle_agent, {Addr, Port, Type, SnmpInfo}) -> {ES, EI, VBs} = SnmpInfo, io:format("*** UNKNOWN AGENT ***" "~n Address: ~p" "~n Port: ~p" + "~n Type: ~p" "~n SNMP Info: " "~n Error Status: ~w" "~n Error Index: ~w" "~n Varbinds: ~p" - "~n", [Addr, Port, ES, EI, VBs]), + "~n", [Addr, Port, Type, ES, EI, VBs]), ok; handle_snmp_callback(handle_pdu, {TargetName, ReqId, SnmpResponse}) -> {ES, EI, VBs} = SnmpResponse, @@ -382,8 +383,8 @@ handle_error(ReqId, Reason, Server) when is_pid(Server) -> ignore. -handle_agent(Addr, Port, SnmpInfo, Server) when is_pid(Server) -> - report_callback(Server, handle_agent, {Addr, Port, SnmpInfo}), +handle_agent(Addr, Port, Type, SnmpInfo, Server) when is_pid(Server) -> + report_callback(Server, handle_agent, {Addr, Port, Type, SnmpInfo}), ignore. diff --git a/lib/snmp/include/snmp_types.hrl b/lib/snmp/include/snmp_types.hrl index 4adb24361c..fce087347f 100644 --- a/lib/snmp/include/snmp_types.hrl +++ b/lib/snmp/include/snmp_types.hrl @@ -78,23 +78,36 @@ %%----------------------------------------------------------------- %% TableInfo - stored in snmp_symbolic_store for use by the -%% generic table functions. +%% generic table functions. For an ordinary table, the +%% types will be the following: %% nbr_of_cols is an integer +%% pos_integer() %% defvals is a list of {Col, Defval}, ordered by column %% number +%% [{Col :: integer(), Defval :: term()}] %% status_col is an integer +%% pos_integer() %% not_accessible a sorted list of columns (> first_accessible) %% that are 'not-accessible' -%% indextypes is a list of #asn1_type for the index-columns, -%% ordered by column number -%% first_accessible is an integer, the first non-accessible -%% column +%% [pos_integer()] +%% index_types is a list of #asn1_type for the index-columns, +%% ordered by column number or an "augment"-tuple +%% [asn1_type()] +%% first_accessible is an integer, the first accessible column +%% pos_integer() %% first_own_index is an integer. 0 if there is no such index for %% this table. %% This is not the same as the last integer in the oid! %% Example: If a table has one own index (oid.1), one %% column (oid.2) and one imported index then %% first_own_index will be 2. +%% non_neg_integer() +%% For a augmented table, it will instead look like this: +%% index_types {augments, {atom(), asn1_type()}} +%% nbr_of_cols pos_integer() +%% not_accessible [pos_integer()] +%% first_accessible pos_integer() +%% %%----------------------------------------------------------------- -record(table_info, @@ -192,7 +205,7 @@ %%---------------------------------------------------------------------- -record(mib, {misc = [], - mib_format_version = "3.2", + mib_format_version = "3.3", name = "", module_identity, %% Not in SMIv1, and only with +module_identity mes = [], diff --git a/lib/snmp/src/agent/snmp_community_mib.erl b/lib/snmp/src/agent/snmp_community_mib.erl index 77307aa7ad..d7d41aca31 100644 --- a/lib/snmp/src/agent/snmp_community_mib.erl +++ b/lib/snmp/src/agent/snmp_community_mib.erl @@ -28,6 +28,7 @@ -export([add_community/5, add_community/6, delete_community/1]). -export([check_community/1]). +-include("snmpa_internal.hrl"). -include("SNMP-COMMUNITY-MIB.hrl"). -include("SNMP-TARGET-MIB.hrl"). -include("SNMPv2-TC.hrl"). @@ -120,10 +121,17 @@ init_tabs(Comms) -> read_community_config_files(Dir) -> ?vdebug("read community config file",[]), - Gen = fun(_) -> ok end, - Filter = fun(Comms) -> Comms end, - Check = fun(Entry) -> check_community(Entry) end, - [Comms] = + FileName = "community.conf", + Gen = fun(D, Reason) -> + warning_msg("failed reading config file ~s" + "~n Config Dir: ~s" + "~n Reason: ~p", + [FileName, D, Reason]), + ok + end, + Filter = fun(Comms) -> Comms end, + Check = fun(Entry) -> check_community(Entry) end, + [Comms] = snmp_conf:read_files(Dir, [{Gen, Filter, Check, "community.conf"}]), Comms. @@ -601,5 +609,8 @@ set_sname(_) -> %% Keep it, if already set. error(Reason) -> throw({error, Reason}). +warning_msg(F, A) -> + ?snmpa_warning("[COMMUNITY-MIB]: " ++ F, A). + config_err(F, A) -> snmpa_error:config_err("[COMMUNITY-MIB]: " ++ F, A). diff --git a/lib/snmp/src/agent/snmp_framework_mib.erl b/lib/snmp/src/agent/snmp_framework_mib.erl index 0d7866d94d..cc191bd956 100644 --- a/lib/snmp/src/agent/snmp_framework_mib.erl +++ b/lib/snmp/src/agent/snmp_framework_mib.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2010. All Rights Reserved. +%% Copyright Ericsson AB 1999-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -115,7 +115,9 @@ do_configure(Dir) -> read_internal_config_files(Dir) -> ?vdebug("read context config file",[]), - Gen = fun(D) -> convert_context(D) end, + Gen = fun(D, Reason) -> + convert_context(D, Reason) + end, Filter = fun(Contexts) -> Contexts end, Check = fun(Entry) -> check_context(Entry) end, [Ctxs] = snmp_conf:read_files(Dir, [{Gen, Filter, Check, "context.conf"}]), @@ -123,10 +125,17 @@ read_internal_config_files(Dir) -> read_agent(Dir) -> - ?vdebug("read agent config file",[]), - Check = fun(Entry) -> check_agent(Entry) end, - File = filename:join(Dir, "agent.conf"), - Agent = snmp_conf:read(File, Check), + ?vdebug("read agent config file", []), + FileName = "agent.conf", + Check = fun(Entry) -> check_agent(Entry) end, + File = filename:join(Dir, FileName), + Agent = + try + snmp_conf:read(File, Check) + catch + throw:{error, Reason} -> + error({failed_reading_config_file, Dir, FileName, Reason}) + end, sort_agent(Agent). @@ -146,9 +155,9 @@ sort_agent(L) -> %%----------------------------------------------------------------- %% Generate a context.conf file. %%----------------------------------------------------------------- -convert_context(Dir) -> +convert_context(Dir, _Reason) -> config_err("missing context.conf file => generating a default file", []), - File = filename:join(Dir,"context.conf"), + File = filename:join(Dir, "context.conf"), case file:open(File, [write]) of {ok, Fid} -> ok = io:format(Fid, "~s\n", [context_header()]), @@ -165,7 +174,7 @@ context_header() -> io_lib:format("%% This file was automatically generated by " "snmp_config v~s ~w-~2.2.0w-~2.2.0w " "~2.2.0w:~2.2.0w:~2.2.0w\n", - [?version,Y,Mo,D,H,Mi,S]). + [?version, Y, Mo, D, H, Mi, S]). %%----------------------------------------------------------------- diff --git a/lib/snmp/src/agent/snmp_notification_mib.erl b/lib/snmp/src/agent/snmp_notification_mib.erl index 720ac749b8..37e09f5d3e 100644 --- a/lib/snmp/src/agent/snmp_notification_mib.erl +++ b/lib/snmp/src/agent/snmp_notification_mib.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2011. All Rights Reserved. +%% Copyright Ericsson AB 1998-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -27,6 +27,7 @@ -export([add_notify/3, delete_notify/1]). -export([check_notify/1]). +-include("snmpa_internal.hrl"). -include("SNMP-NOTIFICATION-MIB.hrl"). -include("SNMPv2-TC.hrl"). -include("snmp_tables.hrl"). @@ -104,7 +105,14 @@ do_reconfigure(Dir) -> read_notify_config_files(Dir) -> ?vdebug("read notify config file",[]), - Gen = fun(_) -> ok end, + FileName = "notify.conf", + Gen = fun(D, Reason) -> + info_msg("failed reading config file ~s" + "~n Config Dir: ~s" + "~n Reason: ~p", + [FileName, D, Reason]), + ok + end, Filter = fun(Notifs) -> Notifs end, Check = fun(Entry) -> check_notify(Entry) end, [Notifs] = @@ -112,7 +120,7 @@ read_notify_config_files(Dir) -> Notifs. check_notify({Name, Tag, Type}) -> - snmp_conf:check_string(Name,{gt,0}), + snmp_conf:check_string(Name, {gt, 0}), snmp_conf:check_string(Tag), {ok, Val} = snmp_conf:check_atom(Type, [{trap, 1}, {inform, 2}]), Notify = {Name, Tag, Val, @@ -451,12 +459,15 @@ set_sname() -> set_sname(get(sname)). set_sname(undefined) -> - put(sname,conf); + put(sname, conf); set_sname(_) -> %% Keep it, if already set. ok. error(Reason) -> throw({error, Reason}). +info_msg(F, A) -> + ?snmpa_info("[NOTIFICATION-MIB]: " ++ F, A). + config_err(F, A) -> snmpa_error:config_err("[NOTIFICATION-MIB]: " ++ F, A). diff --git a/lib/snmp/src/agent/snmp_standard_mib.erl b/lib/snmp/src/agent/snmp_standard_mib.erl index b6834d278c..ca93546923 100644 --- a/lib/snmp/src/agent/snmp_standard_mib.erl +++ b/lib/snmp/src/agent/snmp_standard_mib.erl @@ -143,20 +143,35 @@ do_reconfigure(Dir) -> %% Func: read_standard/1 %% Args: Dir is the directory with trailing dir_separator where %% the configuration files can be found. -%% Purpose: Reads th standard configuration file. +%% Purpose: Reads the standard configuration file. %% Returns: A list of standard variables %% Fails: If an error occurs, the process will die with Reason %% configuration_error. +%% This file is mandatory, as it contains mandatory +%% config options for which there are no default values. %%----------------------------------------------------------------- read_standard(Dir) -> ?vdebug("check standard config file",[]), - Gen = fun(_) -> ok end, - Filter = fun(Standard) -> sort_standard(Standard) end, - Check = fun(Entry) -> check_standard(Entry) end, + FileName = "standard.conf", + Gen = fun(D, Reason) -> + throw({error, {failed_reading_config_file, + D, FileName, + list_dir(Dir), Reason}}) + end, + Filter = fun(Standard) -> sort_standard(Standard) end, + Check = fun(Entry) -> check_standard(Entry) end, [Standard] = - snmp_conf:read_files(Dir, [{Gen, Filter, Check, "standard.conf"}]), + snmp_conf:read_files(Dir, [{Gen, Filter, Check, FileName}]), Standard. +list_dir(Dir) -> + case file:list_dir(Dir) of + {ok, Files} -> + Files; + Error -> + Error + end. + %%----------------------------------------------------------------- %% Make sure that each mandatory standard attribute is present, and @@ -548,6 +563,7 @@ snmp_set_serial_no(set, NewVal) -> snmp_generic:variable_func(set, (NewVal + 1) rem 2147483648, {snmpSetSerialNo, volatile}). + %%----------------------------------------------------------------- %% This is the instrumentation function for sysOrTable %%----------------------------------------------------------------- @@ -588,4 +604,4 @@ error(Reason) -> throw({error, Reason}). config_err(F, A) -> - snmpa_error:config_err("[STANDARD-MIB]: " ++ F, A). + snmpa_error:config_err("[STANDARD-MIB] " ++ F, A). diff --git a/lib/snmp/src/agent/snmp_target_mib.erl b/lib/snmp/src/agent/snmp_target_mib.erl index a45db89c09..b01d536caa 100644 --- a/lib/snmp/src/agent/snmp_target_mib.erl +++ b/lib/snmp/src/agent/snmp_target_mib.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2011. All Rights Reserved. +%% Copyright Ericsson AB 1998-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -132,12 +132,12 @@ do_reconfigure(Dir) -> read_target_config_files(Dir) -> - ?vdebug("check target address config file",[]), - TAGen = fun(_D) -> ok end, + ?vdebug("check target address and parameter config file(s)",[]), + TAGen = fun(_D, _Reason) -> ok end, TAFilter = fun(Addr) -> Addr end, TACheck = fun(Entry) -> check_target_addr(Entry) end, - TPGen = fun(_D) -> ok end, + TPGen = fun(_D, _Reason) -> ok end, TPFilter = fun(Params) -> Params end, TPCheck = fun(Entry) -> check_target_params(Entry) end, diff --git a/lib/snmp/src/agent/snmp_user_based_sm_mib.erl b/lib/snmp/src/agent/snmp_user_based_sm_mib.erl index 69cebc858b..2e801622e7 100644 --- a/lib/snmp/src/agent/snmp_user_based_sm_mib.erl +++ b/lib/snmp/src/agent/snmp_user_based_sm_mib.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2010. All Rights Reserved. +%% Copyright Ericsson AB 1999-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -136,7 +136,7 @@ do_reconfigure(Dir) -> read_usm_config_files(Dir) -> ?vdebug("read usm config file",[]), - Gen = fun(D) -> generate_usm(D) end, + Gen = fun(D, Reason) -> generate_usm(D, Reason) end, Filter = fun(Usms) -> Usms end, Check = fun(Entry) -> check_usm(Entry) end, [Usms] = @@ -144,7 +144,7 @@ read_usm_config_files(Dir) -> Usms. -generate_usm(Dir) -> +generate_usm(Dir, _Reason) -> info_msg("Incomplete configuration. Generating empty usm.conf.", []), USMFile = filename:join(Dir, "usm.conf"), ok = file:write_file(USMFile, list_to_binary([])). diff --git a/lib/snmp/src/agent/snmp_view_based_acm_mib.erl b/lib/snmp/src/agent/snmp_view_based_acm_mib.erl index 2cee91b081..479a44795f 100644 --- a/lib/snmp/src/agent/snmp_view_based_acm_mib.erl +++ b/lib/snmp/src/agent/snmp_view_based_acm_mib.erl @@ -115,7 +115,7 @@ do_reconfigure(Dir) -> read_vacm_config_files(Dir) -> ?vdebug("read vacm config file",[]), - Gen = fun(_) -> ok end, + Gen = fun(_D, _Reason) -> ok end, Filter = fun(Vacms) -> Sec2Group = [X || {vacmSecurityToGroup, X} <- Vacms], Access = [X || {vacmAccess, X} <- Vacms], diff --git a/lib/snmp/src/agent/snmpa.erl b/lib/snmp/src/agent/snmpa.erl index c400aaddf7..b45a47ec6b 100644 --- a/lib/snmp/src/agent/snmpa.erl +++ b/lib/snmp/src/agent/snmpa.erl @@ -83,8 +83,11 @@ -export([add_agent_caps/2, del_agent_caps/1, get_agent_caps/0]). %% Audit Trail Log functions --export([log_to_txt/2, log_to_txt/3, log_to_txt/4, +-export([log_to_txt/1, + log_to_txt/2, log_to_txt/3, log_to_txt/4, log_to_txt/5, log_to_txt/6, log_to_txt/7, + log_to_io/1, log_to_io/2, log_to_io/3, + log_to_io/4, log_to_io/5, log_to_io/6, log_info/0, change_log_size/1, get_log_type/0, get_log_type/1, @@ -780,6 +783,8 @@ get_agent_caps() -> %%% Audit Trail Log functions %%%----------------------------------------------------------------- +log_to_txt(LogDir) -> + log_to_txt(LogDir, []). log_to_txt(LogDir, Mibs) -> OutFile = "snmpa_log.txt", LogName = ?audit_trail_log_name, @@ -800,6 +805,23 @@ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop). +log_to_io(LogDir) -> + log_to_io(LogDir, []). +log_to_io(LogDir, Mibs) -> + LogName = ?audit_trail_log_name, + LogFile = ?audit_trail_log_file, + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName) -> + LogFile = ?audit_trail_log_file, + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName, LogFile) -> + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> + snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start). +log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> + snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop). + + log_info() -> LogName = ?audit_trail_log_name, snmp_log:info(LogName). diff --git a/lib/snmp/src/agent/snmpa_agent.erl b/lib/snmp/src/agent/snmpa_agent.erl index 9cc986cf47..9d30e332f1 100644 --- a/lib/snmp/src/agent/snmpa_agent.erl +++ b/lib/snmp/src/agent/snmpa_agent.erl @@ -1281,7 +1281,7 @@ handle_call({backup, BackupDir}, From, #state{backup = undefined} = S) -> ?vtrace("backup server: ~p", [BackupServer]), {noreply, S#state{backup = {BackupServer, From}}}; -handle_call({backup, _BackupDir}, From, #state{backup = Backup} = S) -> +handle_call({backup, _BackupDir}, _From, #state{backup = Backup} = S) -> ?vinfo("backup already in progress: ~p", [Backup]), {reply, {error, backup_in_progress}, S}; diff --git a/lib/snmp/src/agent/snmpa_local_db.erl b/lib/snmp/src/agent/snmpa_local_db.erl index 1ec8dd3874..2c0cad807a 100644 --- a/lib/snmp/src/agent/snmpa_local_db.erl +++ b/lib/snmp/src/agent/snmpa_local_db.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2011. All Rights Reserved. +%% Copyright Ericsson AB 1996-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -515,7 +515,7 @@ handle_call({backup, BackupDir}, From, {reply, Error, State} end; -handle_call({backup, _BackupDir}, From, #state{backup = Backup} = S) -> +handle_call({backup, _BackupDir}, _From, #state{backup = Backup} = S) -> ?vinfo("backup already in progress: ~p", [Backup]), {reply, {error, backup_in_progress}, S}; diff --git a/lib/snmp/src/agent/snmpa_mib.erl b/lib/snmp/src/agent/snmpa_mib.erl index 574467d38f..575a018c0c 100644 --- a/lib/snmp/src/agent/snmpa_mib.erl +++ b/lib/snmp/src/agent/snmpa_mib.erl @@ -580,7 +580,7 @@ handle_call({backup, BackupDir}, From, {reply, Error, State} end; -handle_call({backup, _BackupDir}, From, #state{backup = Backup} = S) -> +handle_call({backup, _BackupDir}, _From, #state{backup = Backup} = S) -> ?vinfo("backup already in progress: ~p", [Backup]), {reply, {error, backup_in_progress}, S}; diff --git a/lib/snmp/src/app/snmp.app.src b/lib/snmp/src/app/snmp.app.src index a880a14696..b11c1ef934 100644 --- a/lib/snmp/src/app/snmp.app.src +++ b/lib/snmp/src/app/snmp.app.src @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2009. All Rights Reserved. +%% Copyright Ericsson AB 1996-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -91,6 +91,7 @@ snmpm_mpd, snmpm_net_if, snmpm_net_if_filter, + snmpm_net_if_mt, snmpm_network_interface, snmpm_network_interface_filter, snmpm_server, diff --git a/lib/snmp/src/app/snmp.appup.src b/lib/snmp/src/app/snmp.appup.src index c8e5eec6db..8360d88c94 100644 --- a/lib/snmp/src/app/snmp.appup.src +++ b/lib/snmp/src/app/snmp.appup.src @@ -22,66 +22,181 @@ %% ----- U p g r a d e ------------------------------------------------------- [ + {"4.21.7", + [ + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, + {update, snmpa_mib, soft, soft_purge, soft_purge, []}, + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} + ] + }, {"4.21.6", [ + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21.5", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21.4", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21.3", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21.2", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, @@ -89,17 +204,35 @@ {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21.1", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, @@ -108,91 +241,46 @@ {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, {update, snmpa_agent, soft, soft_purge, soft_purge, []}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []} + {update, snmp_note_store, soft, soft_purge, soft_purge, []}, + + {add_module, snmpm_net_if_mt} ] }, {"4.21", [ - {load_module, snmpa, soft_purge, soft_purge, []}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, {update, snmpa_agent, soft, soft_purge, soft_purge, []}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []} - ] - }, - {"4.20.1", - [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, - {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - - {load_module, snmpa_vacm, soft_purge, soft_purge, []}, - {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, - {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, - {load_module, snmpm, soft_purge, soft_purge, - [snmpm_server, snmpm_config, snmp_config]}, - {load_module, snmp_conf, soft_purge, soft_purge, []}, - {load_module, snmp_config, soft_purge, soft_purge, []}, - {load_module, snmpm_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config, snmpm_config]}, - {load_module, snmpa_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config]}, - {load_module, snmpa_conf, soft_purge, soft_purge, [snmp_config]}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []}, - {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, - {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mpd]}, - {update, snmpm_config, soft, soft_purge, soft_purge, [snmp_conf]}, - {update, snmpm_server, soft, soft_purge, soft_purge, - [snmpm_net_if, snmpm_mpd, snmpm_config]}, - {update, snmpm_net_if, soft, soft_purge, soft_purge, - [snmp_conf, snmpm_mpd, snmpm_config]} - ] - }, - {"4.20", - [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, - {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, + {update, snmp_note_store, soft, soft_purge, soft_purge, []}, - {load_module, snmpa_vacm, soft_purge, soft_purge, []}, - {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, - {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, - [snmpa_mib_lib, snmp_conf]}, - {load_module, snmpm, soft_purge, soft_purge, - [snmpm_server, snmpm_config, snmp_config]}, - {load_module, snmp_conf, soft_purge, soft_purge, []}, - {load_module, snmp_config, soft_purge, soft_purge, []}, - {load_module, snmpm_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config, snmpm_config]}, - {load_module, snmpa_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config]}, - {load_module, snmpa_conf, soft_purge, soft_purge, [snmp_config]}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []}, - {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, - {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mpd]}, - {update, snmpm_config, soft, soft_purge, soft_purge, [snmp_conf]}, - {update, snmpm_server, soft, soft_purge, soft_purge, - [snmpm_net_if, snmpm_mpd, snmpm_config]}, - {update, snmpm_net_if, soft, soft_purge, soft_purge, - [snmp_conf, snmpm_mpd, snmpm_config]} + {add_module, snmpm_net_if_mt} ] } ], @@ -200,66 +288,181 @@ %% ------D o w n g r a d e --------------------------------------------------- [ + {"4.21.7", + [ + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, + {update, snmpa_mib, soft, soft_purge, soft_purge, []}, + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} + ] + }, {"4.21.6", [ + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21.5", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21.4", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21.3", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21.2", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, @@ -267,17 +470,35 @@ {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, []} + {update, snmpa_agent, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21.1", [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmpa_mib_lib]}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, @@ -286,90 +507,46 @@ {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, {update, snmpa_agent, soft, soft_purge, soft_purge, []}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []} + {update, snmp_note_store, soft, soft_purge, soft_purge, []}, + + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] }, {"4.21", [ - {load_module, snmpa, soft_purge, soft_purge, []}, + {load_module, snmp_config, soft_purge, soft_purge, []}, + {load_module, snmp_conf, soft_purge, soft_purge, []}, + {load_module, snmp_community_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_framework_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_notification_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_standard_mib, soft_purge, soft_purge, [snmp_conf]}, + {load_module, snmp_user_based_sm_mib, soft_purge, soft_purge, [snmp_conf]}, + + {load_module, snmp, soft_purge, soft_purge, [snmp_log]}, + {load_module, snmpm, soft_purge, soft_purge, [snmp]}, + {load_module, snmp_log, soft_purge, soft_purge, []}, + {load_module, snmp_verbosity, soft_purge, soft_purge, []}, + {load_module, snmpm_mpd, soft_purge, soft_purge, []}, + + {load_module, snmpa, soft_purge, soft_purge, [snmp]}, {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, + {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, + [snmp_conf]}, {load_module, snmpa_vacm, soft_purge, soft_purge, []}, {load_module, snmpa_mpd, soft_purge, soft_purge, []}, {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, []}, + {load_module, snmp_target_mib, soft_purge, soft_purge, + [snmp_conf, snmpa_mib_lib]}, {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, {update, snmpa_mib, soft, soft_purge, soft_purge, []}, {update, snmpa_agent, soft, soft_purge, soft_purge, []}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []} - ] - }, - {"4.20.1", - [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, - {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - - {load_module, snmpa_vacm, soft_purge, soft_purge, []}, - {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, - {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, - {load_module, snmpm, soft_purge, soft_purge, - [snmpm_server, snmpm_config, snmp_config]}, - {load_module, snmp_conf, soft_purge, soft_purge, []}, - {load_module, snmp_config, soft_purge, soft_purge, []}, - {load_module, snmpm_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config, snmpm_config]}, - {load_module, snmpa_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config]}, - {load_module, snmpa_conf, soft_purge, soft_purge, [snmp_config]}, {update, snmp_note_store, soft, soft_purge, soft_purge, []}, - {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, - {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mpd]}, - {update, snmpm_config, soft, soft_purge, soft_purge, [snmp_conf]}, - {update, snmpm_server, soft, soft_purge, soft_purge, - [snmpm_net_if, snmpm_mpd, snmpm_config]}, - {update, snmpm_net_if, soft, soft_purge, soft_purge, - [snmp_conf, snmpm_mpd, snmpm_config]} - ] - }, - {"4.20", - [ - {load_module, snmpa, soft_purge, soft_purge, []}, - {load_module, snmpa_mib_lib, soft_purge, soft_purge, []}, - {update, snmpa_supervisor, soft, soft_purge, soft_purge, []}, - {load_module, snmpa_vacm, soft_purge, soft_purge, []}, - {load_module, snmpa_set_lib, soft_purge, soft_purge, []}, - {load_module, snmpa_trap, soft_purge, soft_purge, []}, - {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge, []}, - {load_module, snmp_target_mib, soft_purge, soft_purge, [snmp_conf]}, - {load_module, snmpm, soft_purge, soft_purge, - [snmpm_server, snmpm_config, snmp_config]}, - {load_module, snmp_conf, soft_purge, soft_purge, []}, - {load_module, snmp_config, soft_purge, soft_purge, []}, - {load_module, snmpm_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config, snmpm_config]}, - {load_module, snmpa_mpd, soft_purge, soft_purge, - [snmp_conf, snmp_config]}, - {load_module, snmpa_conf, soft_purge, soft_purge, [snmp_config]}, - {update, snmp_note_store, soft, soft_purge, soft_purge, []}, - {load_module, snmp_generic_mnesia, soft_purge, soft_purge, []}, - {update, snmpa_local_db, soft, soft_purge, soft_purge, []}, - {update, snmpa_mib, soft, soft_purge, soft_purge, []}, - {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_mpd]}, - {update, snmpm_config, soft, soft_purge, soft_purge, [snmp_conf]}, - {update, snmpm_server, soft, soft_purge, soft_purge, - [snmpm_net_if, snmpm_mpd, snmpm_config]}, - {update, snmpm_net_if, soft, soft_purge, soft_purge, - [snmp_conf, snmpm_mpd, snmpm_config]} + {remove, {snmpm_net_if_mt, soft_purge, soft_purge}} ] } ] diff --git a/lib/snmp/src/app/snmp.erl b/lib/snmp/src/app/snmp.erl index 3e0f05e604..fc03757e3f 100644 --- a/lib/snmp/src/app/snmp.erl +++ b/lib/snmp/src/app/snmp.erl @@ -50,6 +50,7 @@ read_mib/1, log_to_txt/5, log_to_txt/6, log_to_txt/7, + log_to_io/4, log_to_io/5, log_to_io/6, change_log_size/2, octet_string_to_bits/1, bits_to_octet_string/1, @@ -843,6 +844,12 @@ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) -> log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> snmp_log:log_to_txt(LogName, LogFile, LogDir, Mibs, OutFile, Start, Stop). +log_to_io(LogDir, Mibs, LogName, LogFile) -> + snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs). +log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> + snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs, Start). +log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> + snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs, Start, Stop). change_log_size(LogName, NewSize) -> snmp_log:change_size(LogName, NewSize). diff --git a/lib/snmp/src/compile/snmpc_lib.erl b/lib/snmp/src/compile/snmpc_lib.erl index 38bb8f3ac6..6e833b6c44 100644 --- a/lib/snmp/src/compile/snmpc_lib.erl +++ b/lib/snmp/src/compile/snmpc_lib.erl @@ -704,23 +704,29 @@ check_trap_name(EnterpriseName, Line, MEs) -> %% This information is needed to be able to create default instrumentation %% functions for tables. %%---------------------------------------------------------------------- -make_table_info(Line, _TableName, {augments,SrcTableEntry}, ColumnMEs) -> +make_table_info(Line, TableName, {augments, SrcTableEntry}, ColumnMEs) -> ColMEs = lists:keysort(#me.oid, ColumnMEs), - %% Nbr_of_Cols = length(ColMEs), + Nbr_of_Cols = length(ColMEs), MEs = ColMEs ++ (get(cdata))#cdata.mes, - Aug = case lookup(SrcTableEntry,MEs) of + Aug = case lookup(SrcTableEntry, MEs) of false -> print_error("Cannot AUGMENT the non-existing table entry ~p", - [SrcTableEntry],Line), + [SrcTableEntry], Line), {augments, error}; - {value,ME} -> - {augments, {SrcTableEntry,translate_type(ME#me.asn1_type)}} + {value, ME} -> + {augments, {SrcTableEntry, translate_type(ME#me.asn1_type)}} end, - #table_info{index_types = Aug}; -make_table_info(Line, TableName, {indexes,[]}, _ColumnMEs) -> + FirstNonIdxCol = augments_first_non_index_column(ColMEs), + NoAccs = list_not_accessible(FirstNonIdxCol, ColMEs), + FirstAcc = first_accessible(TableName, ColMEs), + #table_info{nbr_of_cols = Nbr_of_Cols, + first_accessible = FirstAcc, + not_accessible = NoAccs, + index_types = Aug}; +make_table_info(Line, TableName, {indexes, []}, _ColumnMEs) -> print_error("Table ~w lacks indexes.", [TableName],Line), #table_info{}; -make_table_info(Line, TableName, {indexes,Indexes}, ColumnMEs) -> +make_table_info(Line, TableName, {indexes, Indexes}, ColumnMEs) -> ColMEs = lists:keysort(#me.oid, ColumnMEs), NonImpliedIndexes = lists:map(fun non_implied_name/1, Indexes), test_read_create_access(ColMEs, Line, dummy), @@ -860,11 +866,17 @@ get_asn1_type(ColumnName, MEs, Line) -> end. test_index_positions(Line, Indexes, ColMEs) -> - TLI = lists:filter(fun (IndexName) -> - is_table_local_index(IndexName,ColMEs) end, - Indexes), + IsTLI = fun(IndexName) -> is_table_local_index(IndexName, ColMEs) end, + TLI = lists:filter(IsTLI, Indexes), test_index_positions_impl(Line, TLI, ColMEs). +%% An table that augments another cannot conatin any index, +%% so the first non-index column is always the first column. +augments_first_non_index_column([]) -> + none; +augments_first_non_index_column([#me{oid=Col}|_ColMEs]) -> + Col. + %% Returns the first non-index column | none test_index_positions_impl(_Line, [], []) -> none; test_index_positions_impl(_Line, [], [#me{oid=Col}|_ColMEs]) -> diff --git a/lib/snmp/src/manager/depend.mk b/lib/snmp/src/manager/depend.mk index 0e7e9e3df7..2e7783c8ed 100644 --- a/lib/snmp/src/manager/depend.mk +++ b/lib/snmp/src/manager/depend.mk @@ -2,7 +2,7 @@ # %CopyrightBegin% # -# Copyright Ericsson AB 2004-2009. All Rights Reserved. +# Copyright Ericsson AB 2004-2012. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -49,6 +49,13 @@ $(EBIN)/snmpm_net_if.$(EMULATOR): \ snmpm_net_if.erl \ snmpm_network_interface.erl +$(EBIN)/snmpm_net_if_mt.$(EMULATOR): \ + ../../include/snmp_types.hrl \ + ../misc/snmp_debug.hrl \ + ../misc/snmp_verbosity.hrl \ + snmpm_net_if_mt.erl \ + snmpm_network_interface.erl + $(EBIN)/snmpm_server.$(EMULATOR): \ ../../include/snmp_types.hrl \ ../../include/STANDARD-MIB.hrl \ diff --git a/lib/snmp/src/manager/modules.mk b/lib/snmp/src/manager/modules.mk index 79f3dd65d9..d46545ea3f 100644 --- a/lib/snmp/src/manager/modules.mk +++ b/lib/snmp/src/manager/modules.mk @@ -2,7 +2,7 @@ # %CopyrightBegin% # -# Copyright Ericsson AB 2004-2009. All Rights Reserved. +# Copyright Ericsson AB 2004-2012. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -31,6 +31,7 @@ MODULES = \ snmpm_mpd \ snmpm_misc_sup \ snmpm_net_if \ + snmpm_net_if_mt \ snmpm_net_if_filter \ snmpm_server \ snmpm_server_sup \ diff --git a/lib/snmp/src/manager/snmpm.erl b/lib/snmp/src/manager/snmpm.erl index 6d2ac8d747..89eaee9f80 100644 --- a/lib/snmp/src/manager/snmpm.erl +++ b/lib/snmp/src/manager/snmpm.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2004-2011. All Rights Reserved. +%% Copyright Ericsson AB 2004-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -75,8 +75,11 @@ %% %% Logging + log_to_txt/1, log_to_txt/2, log_to_txt/3, log_to_txt/4, log_to_txt/5, log_to_txt/6, log_to_txt/7, + log_to_io/1, log_to_io/2, log_to_io/3, + log_to_io/4, log_to_io/5, log_to_io/6, change_log_size/1, get_log_type/0, set_log_type/1, @@ -1371,6 +1374,9 @@ cancel_async_request(UserId, ReqId) -> %%%----------------------------------------------------------------- %%% Audit Trail Log functions (for backward compatibility) %%%----------------------------------------------------------------- + +log_to_txt(LogDir) -> + log_to_txt(LogDir, []). log_to_txt(LogDir, Mibs) -> OutFile = "snmpm_log.txt", LogName = ?audit_trail_log_name, @@ -1391,6 +1397,23 @@ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop). +log_to_io(LogDir) -> + log_to_io(LogDir, []). +log_to_io(LogDir, Mibs) -> + LogName = ?audit_trail_log_name, + LogFile = ?audit_trail_log_file, + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName) -> + LogFile = ?audit_trail_log_file, + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName, LogFile) -> + snmp:log_to_io(LogDir, Mibs, LogName, LogFile). +log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> + snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start). +log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> + snmp:log_to_io(LogDir, Mibs, LogFile, Start, Stop). + + change_log_size(NewSize) -> LogName = ?audit_trail_log_name, snmp:change_log_size(LogName, NewSize). diff --git a/lib/snmp/src/manager/snmpm_config.erl b/lib/snmp/src/manager/snmpm_config.erl index c2e57abddb..5bbf9e5542 100644 --- a/lib/snmp/src/manager/snmpm_config.erl +++ b/lib/snmp/src/manager/snmpm_config.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2004-2011. All Rights Reserved. +%% Copyright Ericsson AB 2004-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in diff --git a/lib/snmp/src/manager/snmpm_mpd.erl b/lib/snmp/src/manager/snmpm_mpd.erl index 103c87d32b..883224143e 100644 --- a/lib/snmp/src/manager/snmpm_mpd.erl +++ b/lib/snmp/src/manager/snmpm_mpd.erl @@ -110,9 +110,9 @@ process_msg(Msg, Domain, Addr, Port, State, NoteStore, Logger) -> #message{version = 'version-2', vsn_hdr = Community, data = Data} when State#state.v2c =:= true -> HS = ?empty_msg_size + length(Community), - (catch process_v1_v2c_msg('version-2', NoteStore, Msg, - Domain, Addr, Port, - Community, Data, HS, Logger)); + process_v1_v2c_msg('version-2', NoteStore, Msg, + Domain, Addr, Port, + Community, Data, HS, Logger); %% Version 3 #message{version = 'version-3', vsn_hdr = H, data = Data} diff --git a/lib/snmp/src/manager/snmpm_net_if_mt.erl b/lib/snmp/src/manager/snmpm_net_if_mt.erl new file mode 100644 index 0000000000..3e87f6a7fb --- /dev/null +++ b/lib/snmp/src/manager/snmpm_net_if_mt.erl @@ -0,0 +1,1259 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2004-2012. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% + +-module(snmpm_net_if_mt). + +-behaviour(gen_server). +-behaviour(snmpm_network_interface). + + +%% Network Interface callback functions +-export([ + start_link/2, + stop/1, + send_pdu/6, % Backward compatibillity + send_pdu/7, % Backward compatibillity + send_pdu/8, + + inform_response/4, + + note_store/2, + + info/1, + verbosity/2, + %% system_info_updated/2, + get_log_type/1, set_log_type/2, + filter_reset/1 + ]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + code_change/3, terminate/2]). + +-define(SNMP_USE_V3, true). +-include("snmp_types.hrl"). +-include("snmpm_internal.hrl"). +-include("snmpm_atl.hrl"). +-include("snmp_debug.hrl"). + +%% -define(VMODULE,"NET_IF"). +-include("snmp_verbosity.hrl"). + +-record(state, + { + server, + note_store, + sock, + mpd_state, + log, + irb = auto, % auto | {user, integer()} + irgc, + filter + }). + + +-define(DEFAULT_FILTER_MODULE, snmpm_net_if_filter). +-define(DEFAULT_FILTER_OPTS, [{module, ?DEFAULT_FILTER_MODULE}]). + +-ifdef(snmp_debug). +-define(GS_START_LINK(Args), + gen_server:start_link(?MODULE, Args, [{debug,[trace]}])). +-else. +-define(GS_START_LINK(Args), + gen_server:start_link(?MODULE, Args, [])). +-endif. + + +-define(IRGC_TIMEOUT, timer:minutes(5)). + +-define(ATL_SEQNO_INITIAL, 1). +-define(ATL_SEQNO_MAX, 2147483647). + + +%%%------------------------------------------------------------------- +%%% API +%%%------------------------------------------------------------------- +start_link(Server, NoteStore) -> + ?d("start_link -> entry with" + "~n Server: ~p" + "~n NoteStore: ~p", [Server, NoteStore]), + Args = [Server, NoteStore], + ?GS_START_LINK(Args). + +stop(Pid) -> + call(Pid, stop). + +send_pdu(Pid, Pdu, Vsn, MsgData, Addr, Port) -> + send_pdu(Pid, Pdu, Vsn, MsgData, Addr, Port, ?DEFAULT_EXTRA_INFO). + +send_pdu(Pid, Pdu, Vsn, MsgData, Addr, Port, ExtraInfo) -> + Domain = snmpm_config:default_transport_domain(), + send_pdu(Pid, Pdu, Vsn, MsgData, Domain, Addr, Port, ExtraInfo). + +send_pdu(Pid, Pdu, Vsn, MsgData, Domain, Addr, Port, ExtraInfo) + when is_record(Pdu, pdu) -> + ?d("send_pdu -> entry with" + "~n Pid: ~p" + "~n Pdu: ~p" + "~n Vsn: ~p" + "~n MsgData: ~p" + "~n Domain: ~p" + "~n Addr: ~p" + "~n Port: ~p", [Pid, Pdu, Vsn, MsgData, Domain, Addr, Port]), + cast(Pid, {send_pdu, Pdu, Vsn, MsgData, Domain, Addr, Port, ExtraInfo}). + +note_store(Pid, NoteStore) -> + call(Pid, {note_store, NoteStore}). + +inform_response(Pid, Ref, Addr, Port) -> + cast(Pid, {inform_response, Ref, Addr, Port}). + +info(Pid) -> + call(Pid, info). + +verbosity(Pid, V) -> + call(Pid, {verbosity, V}). + +%% system_info_updated(Pid, What) -> +%% call(Pid, {system_info_updated, What}). + +get_log_type(Pid) -> + call(Pid, get_log_type). + +set_log_type(Pid, NewType) -> + call(Pid, {set_log_type, NewType}). + +filter_reset(Pid) -> + cast(Pid, filter_reset). + + +%%%------------------------------------------------------------------- +%%% Callback functions from gen_server +%%%------------------------------------------------------------------- + +%%-------------------------------------------------------------------- +%% Func: init/1 +%% Returns: {ok, State} | +%% {ok, State, Timeout} | +%% ignore | +%% {stop, Reason} +%%-------------------------------------------------------------------- +init([Server, NoteStore]) -> + ?d("init -> entry with" + "~n Server: ~p" + "~n NoteStore: ~p", [Server, NoteStore]), + case (catch do_init(Server, NoteStore)) of + {error, Reason} -> + {stop, Reason}; + {ok, State} -> + {ok, State} + end. + +do_init(Server, NoteStore) -> + process_flag(trap_exit, true), + + %% -- Prio -- + {ok, Prio} = snmpm_config:system_info(prio), + process_flag(priority, Prio), + + %% -- Create inform request table -- + %% This should really be protected, but it also needs to + %% be writable for the worker processes, so... + ets:new(snmpm_inform_request_table, + [set, public, named_table, {keypos, 1}]), + + %% -- Verbosity -- + {ok, Verbosity} = snmpm_config:system_info(net_if_verbosity), + put(sname, mnif), + put(verbosity, Verbosity), + ?vlog("starting", []), + + %% -- MPD -- + {ok, Vsns} = snmpm_config:system_info(versions), + MpdState = snmpm_mpd:init(Vsns), + ?vdebug("MpdState: ~w", [MpdState]), + + %% -- Module dependent options -- + {ok, Opts} = snmpm_config:system_info(net_if_options), + + %% -- Inform response behaviour -- + {ok, IRB} = snmpm_config:system_info(net_if_irb), + IrGcRef = irgc_start(IRB), + + %% -- Socket -- + SndBuf = get_opt(Opts, sndbuf, default), + RecBuf = get_opt(Opts, recbuf, default), + BindTo = get_opt(Opts, bind_to, false), + NoReuse = get_opt(Opts, no_reuse, false), + {ok, Port} = snmpm_config:system_info(port), + {ok, Sock} = do_open_port(Port, SndBuf, RecBuf, BindTo, NoReuse), + + %% Flow control -- + FilterOpts = get_opt(Opts, filter, []), + FilterMod = create_filter(FilterOpts), + ?vdebug("FilterMod: ~w", [FilterMod]), + + %% -- Audit trail log --- + {ok, ATL} = snmpm_config:system_info(audit_trail_log), + Log = do_init_log(ATL), + ?vdebug("Log: ~w", [Log]), + + %% -- Initiate counters --- + init_counters(), + + %% -- We are done --- + State = #state{server = Server, + note_store = NoteStore, + mpd_state = MpdState, + sock = Sock, + log = Log, + irb = IRB, + irgc = IrGcRef, + filter = FilterMod}, + ?vdebug("started", []), + {ok, State}. + + +%% Open port +do_open_port(Port, SendSz, RecvSz, BindTo, NoReuse) -> + ?vtrace("do_open_port -> entry with" + "~n Port: ~p" + "~n SendSz: ~p" + "~n RecvSz: ~p" + "~n BindTo: ~p" + "~n NoReuse: ~p", [Port, SendSz, RecvSz, BindTo, NoReuse]), + IpOpts1 = bind_to(BindTo), + IpOpts2 = no_reuse(NoReuse), + IpOpts3 = recbuf(RecvSz), + IpOpts4 = sndbuf(SendSz), + IpOpts = [binary | IpOpts1 ++ IpOpts2 ++ IpOpts3 ++ IpOpts4], + OpenRes = + case init:get_argument(snmpm_fd) of + {ok, [[FdStr]]} -> + Fd = list_to_integer(FdStr), + gen_udp:open(0, [{fd, Fd}|IpOpts]); + error -> + gen_udp:open(Port, IpOpts) + end, + case OpenRes of + {error, _} = Error -> + throw(Error); + OK -> + OK + end. + +bind_to(true) -> + case snmpm_config:system_info(address) of + {ok, Addr} when is_list(Addr) -> + [{ip, list_to_tuple(Addr)}]; + {ok, Addr} -> + [{ip, Addr}]; + _ -> + [] + end; +bind_to(_) -> + []. + +no_reuse(false) -> + [{reuseaddr, true}]; +no_reuse(_) -> + []. + +recbuf(default) -> + []; +recbuf(Sz) -> + [{recbuf, Sz}]. + +sndbuf(default) -> + []; +sndbuf(Sz) -> + [{sndbuf, Sz}]. + + +create_filter(Opts) when is_list(Opts) -> + case get_opt(Opts, module, ?DEFAULT_FILTER_MODULE) of + ?DEFAULT_FILTER_MODULE = Mod -> + Mod; + Module -> + snmpm_network_interface_filter:verify(Module), + Module + end; +create_filter(BadOpts) -> + throw({error, {bad_filter_opts, BadOpts}}). + + +%% ---------------------------------------------------------------------- +%% Audit Trail Logger +%% ---------------------------------------------------------------------- + +%% Open log +do_init_log(false) -> + ?vtrace("do_init_log(false) -> entry", []), + undefined; +do_init_log(true) -> + ?vtrace("do_init_log(true) -> entry", []), + {ok, Type} = snmpm_config:system_info(audit_trail_log_type), + {ok, Dir} = snmpm_config:system_info(audit_trail_log_dir), + {ok, Size} = snmpm_config:system_info(audit_trail_log_size), + {ok, Repair} = snmpm_config:system_info(audit_trail_log_repair), + Name = ?audit_trail_log_name, + File = filename:absname(?audit_trail_log_file, Dir), + case snmpm_config:system_info(audit_trail_log_seqno) of + {ok, true} -> + Initial = ?ATL_SEQNO_INITIAL, + Max = ?ATL_SEQNO_MAX, + Module = snmpm_config, + Function = increment_counter, + Args = [atl_seqno, Initial, Max], + SeqNoGen = {Module, Function, Args}, + case snmp_log:create(Name, File, SeqNoGen, Size, Repair, true) of + {ok, Log} -> + ?vdebug("log created: ~w", [Log]), + {Name, Log, Type}; + {error, Reason} -> + throw({error, {failed_create_audit_log, Reason}}) + end; + _ -> + case snmp_log:create(Name, File, Size, Repair, true) of + {ok, Log} -> + ?vdebug("log created: ~w", [Log]), + {Name, Log, Type}; + {error, Reason} -> + throw({error, {failed_create_audit_log, Reason}}) + end + end. + + +%% ---------------------------------------------------------------------- + +%%-------------------------------------------------------------------- +%% Func: handle_call/3 +%% Returns: {reply, Reply, State} | +%% {reply, Reply, State, Timeout} | +%% {noreply, State} | +%% {noreply, State, Timeout} | +%% {stop, Reason, Reply, State} | (terminate/2 is called) +%% {stop, Reason, State} (terminate/2 is called) +%%-------------------------------------------------------------------- +handle_call({verbosity, Verbosity}, _From, State) -> + ?vlog("received verbosity request", []), + put(verbosity, Verbosity), + {reply, ok, State}; + +%% handle_call({system_info_updated, What}, _From, State) -> +%% ?vlog("received system_info_updated request with What = ~p", [What]), +%% {NewState, Reply} = handle_system_info_updated(State, What), +%% {reply, Reply, NewState}; + +handle_call(get_log_type, _From, State) -> + ?vlog("received get-log-type request", []), + Reply = (catch handle_get_log_type(State)), + {reply, Reply, State}; + +handle_call({set_log_type, NewType}, _From, State) -> + ?vlog("received set-log-type request with NewType = ~p", [NewType]), + {NewState, Reply} = (catch handle_set_log_type(State, NewType)), + {reply, Reply, NewState}; + +handle_call({note_store, Pid}, _From, State) -> + ?vlog("received new note_store: ~w", [Pid]), + {reply, ok, State#state{note_store = Pid}}; + +handle_call(stop, _From, State) -> + ?vlog("received stop request", []), + Reply = ok, + {stop, normal, Reply, State}; + +handle_call(info, _From, State) -> + ?vlog("received info request", []), + Reply = get_info(State), + {reply, Reply, State}; + +handle_call(Req, From, State) -> + warning_msg("received unknown request (from ~p): ~n~p", [Req, From]), + {reply, {error, {invalid_request, Req}}, State}. + + +%%-------------------------------------------------------------------- +%% Func: handle_cast/2 +%% Returns: {noreply, State} | +%% {noreply, State, Timeout} | +%% {stop, Reason, State} (terminate/2 is called) +%%-------------------------------------------------------------------- +handle_cast({send_pdu, Pdu, Vsn, MsgData, Domain, Addr, Port, ExtraInfo}, + State) -> + ?vlog("received send_pdu message with" + "~n Pdu: ~p" + "~n Vsn: ~p" + "~n MsgData: ~p" + "~n Domain: ~p" + "~n Addr: ~p" + "~n Port: ~p", [Pdu, Vsn, MsgData, Domain, Addr, Port]), + maybe_process_extra_info(ExtraInfo), + handle_send_pdu(Pdu, Vsn, MsgData, Domain, Addr, Port, State), + {noreply, State}; + +handle_cast({inform_response, Ref, Addr, Port}, State) -> + ?vlog("received inform_response message with" + "~n Ref: ~p" + "~n Addr: ~p" + "~n Port: ~p", [Ref, Addr, Port]), + handle_inform_response(Ref, Addr, Port, State), + {noreply, State}; + +handle_cast(filter_reset, State) -> + ?vlog("received filter_reset message", []), + reset_counters(), + {noreply, State}; + +handle_cast(Msg, State) -> + warning_msg("received unknown message: ~n~p", [Msg]), + {noreply, State}. + + +%%-------------------------------------------------------------------- +%% Func: handle_info/2 +%% Returns: {noreply, State} | +%% {noreply, State, Timeout} | +%% {stop, Reason, State} (terminate/2 is called) +%%-------------------------------------------------------------------- +handle_info({udp, Sock, Ip, Port, Bytes}, #state{sock = Sock} = State) -> + ?vlog("received ~w bytes from ~p:~p", [size(Bytes), Ip, Port]), + handle_udp(Ip, Port, Bytes, State), + {noreply, State}; + +handle_info(inform_response_gc, State) -> + ?vlog("received inform_response_gc message", []), + State2 = handle_inform_response_gc(State), + {noreply, State2}; + +handle_info({disk_log, _Node, Log, Info}, State) -> + ?vlog("received disk_log message: " + "~n Info: ~p", [Info]), + State2 = handle_disk_log(Log, Info, State), + {noreply, State2}; + +handle_info({'DOWN', _MRef, process, Pid, {net_if_worker, ExitStatus}}, + State) -> + ?vdebug("received DOWN message from net_if-worker: " + "~n ExitStatus: ~p", [ExitStatus]), + handle_worker_exit(Pid, ExitStatus), + {noreply, State}; + +handle_info(Info, State) -> + warning_msg("received unknown info: ~n~p", [Info]), + {noreply, State}. + + +%%-------------------------------------------------------------------- +%% Func: terminate/2 +%% Purpose: Shutdown the server +%% Returns: any (ignored by gen_server) +%%-------------------------------------------------------------------- +terminate(Reason, #state{log = Log, irgc = IrGcRef}) -> + ?vdebug("terminate: ~p", [Reason]), + irgc_stop(IrGcRef), + %% Close logs + do_close_log(Log), + ok. + + +do_close_log({Log, _Type}) -> + (catch snmp_log:sync(Log)), + (catch snmp_log:close(Log)), + ok; +do_close_log(_) -> + ok. + + +%%---------------------------------------------------------------------- +%% Func: code_change/3 +%% Purpose: Convert process state when code is changed +%% Returns: {ok, NewState} +%%---------------------------------------------------------------------- + +code_change(_Vsn, State, _Extra) -> + ?d("code_change -> entry with" + "~n Vsn: ~p" + "~n State: ~p" + "~n Extra: ~p", [_Vsn, State, _Extra]), + {ok, State}. + + +%%%------------------------------------------------------------------- +%%% Internal functions +%%%------------------------------------------------------------------- + +handle_udp(Addr, Port, Bytes, State) -> + Verbosity = get(verbosity), + spawn_opt(fun() -> + Log = worker_init(State, Verbosity), + Res = (catch maybe_handle_recv_msg( + Addr, Port, Bytes, + State#state{log = Log})), + worker_exit(udp, {Addr, Port}, Res) + end, + [monitor]). + + +maybe_handle_recv_msg(Addr, Port, Bytes, #state{filter = FilterMod} = State) -> + case (catch FilterMod:accept_recv(Addr, Port)) of + false -> + %% Drop the received packet + inc(netIfMsgInDrops), + ok; + _ -> + handle_recv_msg(Addr, Port, Bytes, State) + end. + + +handle_recv_msg(Addr, Port, Bytes, #state{server = Pid}) + when is_binary(Bytes) andalso (size(Bytes) =:= 0) -> + Pid ! {snmp_error, {empty_message, Addr, Port}, Addr, Port}, + ok; + +handle_recv_msg(Addr, Port, Bytes, + #state{server = Pid, + note_store = NoteStore, + mpd_state = MpdState, + sock = Sock, + log = Log} = State) -> + Domain = snmp_conf:which_domain(Addr), % What the ****... + Logger = logger(Log, read, Addr, Port), + case (catch snmpm_mpd:process_msg(Bytes, Domain, Addr, Port, + MpdState, NoteStore, Logger)) of + + {ok, Vsn, Pdu, MS, ACM} -> + maybe_handle_recv_pdu(Addr, Port, Vsn, Pdu, MS, ACM, + Logger, State); + + {discarded, Reason, Report} -> + ?vdebug("discarded: ~p", [Reason]), + ErrorInfo = {failed_processing_message, Reason}, + Pid ! {snmp_error, ErrorInfo, Addr, Port}, + maybe_udp_send(State#state.filter, Sock, Addr, Port, Report), + ok; + + {discarded, Reason} -> + ?vdebug("discarded: ~p", [Reason]), + ErrorInfo = {failed_processing_message, Reason}, + Pid ! {snmp_error, ErrorInfo, Addr, Port}, + ok; + + Error -> + error_msg("processing of received message failed: " + "~n ~p", [Error]), + ok + end. + + +maybe_handle_recv_pdu(Addr, Port, + Vsn, #pdu{type = Type} = Pdu, PduMS, ACM, + Logger, + #state{filter = FilterMod} = State) -> + case (catch FilterMod:accept_recv_pdu(Addr, Port, Type)) of + false -> + inc(netIfPduInDrops), + ok; + _ -> + handle_recv_pdu(Addr, Port, Vsn, Pdu, PduMS, ACM, Logger, State) + end; +maybe_handle_recv_pdu(Addr, Port, Vsn, Trap, PduMS, ACM, Logger, + #state{filter = FilterMod} = State) + when is_record(Trap, trappdu) -> + case (catch FilterMod:accept_recv_pdu(Addr, Port, trappdu)) of + false -> + inc(netIfPduInDrops), + ok; + _ -> + handle_recv_pdu(Addr, Port, Vsn, Trap, PduMS, ACM, Logger, State) + end; +maybe_handle_recv_pdu(Addr, Port, Vsn, Pdu, PduMS, ACM, Logger, State) -> + handle_recv_pdu(Addr, Port, Vsn, Pdu, PduMS, ACM, Logger, State). + + +handle_recv_pdu(Addr, Port, + Vsn, #pdu{type = 'inform-request'} = Pdu, _PduMS, ACM, + Logger, #state{server = Pid, irb = IRB} = State) -> + handle_inform_request(IRB, Pid, Vsn, Pdu, ACM, + Addr, Port, Logger, State); +handle_recv_pdu(Addr, Port, + _Vsn, #pdu{type = report} = Pdu, _PduMS, ok, + _Logger, + #state{server = Pid} = _State) -> + ?vtrace("received report - ok", []), + Pid ! {snmp_report, {ok, Pdu}, Addr, Port}, + ok; +handle_recv_pdu(Addr, Port, + _Vsn, #pdu{type = report} = Pdu, _PduMS, + {error, ReqId, Reason}, + _Logger, + #state{server = Pid} = _State) -> + ?vtrace("received report - error", []), + Pid ! {snmp_report, {error, ReqId, Reason, Pdu}, Addr, Port}, + ok; +handle_recv_pdu(Addr, Port, + _Vsn, #pdu{type = 'snmpv2-trap'} = Pdu, _PduMS, _ACM, + _Logger, + #state{server = Pid} = _State) -> + ?vtrace("received snmpv2-trap", []), + Pid ! {snmp_trap, Pdu, Addr, Port}, + ok; +handle_recv_pdu(Addr, Port, + _Vsn, Trap, _PduMS, _ACM, + _Logger, + #state{server = Pid} = _State) when is_record(Trap, trappdu) -> + ?vtrace("received trappdu", []), + Pid ! {snmp_trap, Trap, Addr, Port}, + ok; +handle_recv_pdu(Addr, Port, + _Vsn, Pdu, _PduMS, _ACM, + _Logger, + #state{server = Pid} = _State) when is_record(Pdu, pdu) -> + ?vtrace("received pdu", []), + Pid ! {snmp_pdu, Pdu, Addr, Port}, + ok; +handle_recv_pdu(_Addr, _Port, _Vsn, Pdu, _PduMS, ACM, _Logger, _State) -> + ?vlog("received unexpected pdu: " + "~n Pdu: ~p" + "~n ACM: ~p", [Pdu, ACM]), + ok. + + +handle_inform_request(auto, Pid, Vsn, Pdu, ACM, Addr, Port, Logger, State) -> + ?vtrace("received inform-request (true)", []), + Pid ! {snmp_inform, ignore, Pdu, Addr, Port}, + RePdu = make_response_pdu(Pdu), + maybe_send_inform_response(RePdu, Vsn, ACM, Addr, Port, Logger, State); +handle_inform_request({user, To}, Pid, Vsn, #pdu{request_id = ReqId} = Pdu, + ACM, Addr, Port, _Logger, _State) -> + ?vtrace("received inform-request (false)", []), + + Pid ! {snmp_inform, ReqId, Pdu, Addr, Port}, + + %% Before we go any further, we need to check that we have not + %% already received this message (possible resend). + + Key = {ReqId, Addr, Port}, + case ets:lookup(snmpm_inform_request_table, Key) of + [_] -> + %% OK, we already know about this. We assume this + %% is a resend. Either the agent is really eager or + %% the user has not answered yet. Bad user! + ok; + [] -> + RePdu = make_response_pdu(Pdu), + Expire = t() + To, + Rec = {Key, Expire, {Vsn, ACM, RePdu}}, + ets:insert(snmpm_inform_request_table, Rec) + end, + ok. + +handle_inform_response(Ref, Addr, Port, State) -> + Verbosity = get(verbosity), + spawn_opt(fun() -> + Log = worker_init(State, Verbosity), + Res = (catch do_handle_inform_response( + Ref, + Addr, Port, + State#state{log = Log})), + worker_exit(inform_reponse, {Addr, Port}, Res) + end, + [monitor]). + + + +do_handle_inform_response(Ref, Addr, Port, State) -> + Key = {Ref, Addr, Port}, + case ets:lookup(snmpm_inform_request_table, Key) of + [{Key, _, {Vsn, ACM, RePdu}}] -> + Logger = logger(State#state.log, read, Addr, Port), + ets:delete(snmpm_inform_request_table, Key), + maybe_send_inform_response(RePdu, Vsn, ACM, Addr, Port, + Logger, State); + [] -> + %% Already acknowledged, or the user was to slow to reply... + ok + end, + ok. + +maybe_send_inform_response(RePdu, Vsn, ACM, Addr, Port, Logger, + #state{server = Pid, + sock = Sock, + filter = FilterMod}) -> + case (catch FilterMod:accept_send_pdu(Addr, Port, pdu_type_of(RePdu))) of + false -> + inc(netIfPduOutDrops), + ok; + _ -> + case snmpm_mpd:generate_response_msg(Vsn, RePdu, ACM, Logger) of + {ok, Msg} -> + maybe_udp_send(FilterMod, Sock, Addr, Port, Msg); + {discarded, Reason} -> + ?vlog("failed generating response message:" + "~n Reason: ~p", [Reason]), + ReqId = RePdu#pdu.request_id, + ErrorInfo = {failed_generating_response, {RePdu, Reason}}, + Pid ! {snmp_error, ReqId, ErrorInfo, Addr, Port}, + ok + end + end. + +handle_inform_response_gc(#state{irb = IRB} = State) -> + ets:safe_fixtable(snmpm_inform_request_table, true), + do_irgc(ets:first(snmpm_inform_request_table), t()), + ets:safe_fixtable(snmpm_inform_request_table, false), + State#state{irgc = irgc_start(IRB)}. + +%% We are deleting at the same time as we are traversing the table!!! +do_irgc('$end_of_table', _) -> + ok; +do_irgc(Key, Now) -> + Next = ets:next(snmpm_inform_request_table, Key), + case ets:lookup(snmpm_inform_request_table, Key) of + [{Key, BestBefore, _}] when BestBefore < Now -> + ets:delete(snmpm_inform_request_table, Key); + _ -> + ok + end, + do_irgc(Next, Now). + +irgc_start(auto) -> + undefined; +irgc_start(_) -> + erlang:send_after(?IRGC_TIMEOUT, self(), inform_response_gc). + +irgc_stop(undefined) -> + ok; +irgc_stop(Ref) -> + (catch erlang:cancel_timer(Ref)). + + +handle_send_pdu(Pdu, Vsn, MsgData, Domain, Addr, Port, State) -> + Verbosity = get(verbosity), + spawn_opt(fun() -> + Log = worker_init(State, Verbosity), + Res = (catch maybe_handle_send_pdu( + Pdu, Vsn, MsgData, + Domain, Addr, Port, + State#state{log = Log})), + worker_exit(send_pdu, {Domain, Addr, Port}, Res) + end, + [monitor]). + +maybe_handle_send_pdu(Pdu, Vsn, MsgData, Domain, Addr, Port, + #state{filter = FilterMod} = State) -> + case (catch FilterMod:accept_send_pdu(Addr, Port, pdu_type_of(Pdu))) of + false -> + inc(netIfPduOutDrops), + ok; + _ -> + do_handle_send_pdu(Pdu, Vsn, MsgData, Domain, Addr, Port, State) + end. + +do_handle_send_pdu(Pdu, Vsn, MsgData, _Domain, Addr, Port, + #state{server = Pid, + note_store = NoteStore, + sock = Sock, + log = Log, + filter = FilterMod}) -> + Logger = logger(Log, write, Addr, Port), + case (catch snmpm_mpd:generate_msg(Vsn, NoteStore, + Pdu, MsgData, Logger)) of + {ok, Msg} -> + ?vtrace("do_handle_send_pdu -> message generated", []), + maybe_udp_send(FilterMod, Sock, Addr, Port, Msg); + {discarded, Reason} -> + ?vlog("PDU not sent: " + "~n PDU: ~p" + "~n Reason: ~p", [Pdu, Reason]), + Pid ! {snmp_error, Pdu, Reason}, + ok + end. + + +maybe_udp_send(FilterMod, Sock, Addr, Port, Msg) -> + case (catch FilterMod:accept_send(Addr, Port)) of + false -> + inc(netIfMsgOutDrops), + ok; + _ -> + udp_send(Sock, Addr, Port, Msg) + end. + + +udp_send(Sock, Addr, Port, Msg) -> + case (catch gen_udp:send(Sock, Addr, Port, Msg)) of + ok -> + ?vdebug("sent ~w bytes to ~w:~w [~w]", + [sz(Msg), Addr, Port, Sock]), + ok; + {error, Reason} -> + error_msg("failed sending message to ~p:~p: " + "~n ~p",[Addr, Port, Reason]), + ok; + Error -> + error_msg("failed sending message to ~p:~p: " + "~n ~p",[Addr, Port, Error]), + ok + end. + +sz(B) when is_binary(B) -> + size(B); +sz(L) when is_list(L) -> + length(L); +sz(_) -> + undefined. + + +handle_disk_log(_Log, {wrap, NoLostItems}, State) -> + ?vlog("Audit Trail Log - wrapped: ~w previously logged items where lost", + [NoLostItems]), + State; +handle_disk_log(_Log, {truncated, NoLostItems}, State) -> + ?vlog("Audit Trail Log - truncated: ~w items where lost when truncating", + [NoLostItems]), + State; +handle_disk_log(_Log, full, State) -> + error_msg("Failed to write to Audit Trail Log (full)", []), + State; +handle_disk_log(_Log, {error_status, ok}, State) -> + State; +handle_disk_log(_Log, {error_status, {error, Reason}}, State) -> + error_msg("Error status received from Audit Trail Log: " + "~n~p", [Reason]), + State; +handle_disk_log(_Log, _Info, State) -> + State. + + +%% mk_discovery_msg('version-3', Pdu, _VsnHdr, UserName) -> +%% ScopedPDU = #scopedPdu{contextEngineID = "", +%% contextName = "", +%% data = Pdu}, +%% Bytes = snmp_pdus:enc_scoped_pdu(ScopedPDU), +%% MsgID = get(msg_id), +%% put(msg_id,MsgID+1), +%% UsmSecParams = +%% #usmSecurityParameters{msgAuthoritativeEngineID = "", +%% msgAuthoritativeEngineBoots = 0, +%% msgAuthoritativeEngineTime = 0, +%% msgUserName = UserName, +%% msgPrivacyParameters = "", +%% msgAuthenticationParameters = ""}, +%% SecBytes = snmp_pdus:enc_usm_security_parameters(UsmSecParams), +%% PduType = Pdu#pdu.type, +%% Hdr = #v3_hdr{msgID = MsgID, +%% msgMaxSize = 1000, +%% msgFlags = snmp_misc:mk_msg_flags(PduType, 0), +%% msgSecurityModel = ?SEC_USM, +%% msgSecurityParameters = SecBytes}, +%% Msg = #message{version = 'version-3', vsn_hdr = Hdr, data = Bytes}, +%% case (catch snmp_pdus:enc_message_only(Msg)) of +%% {'EXIT', Reason} -> +%% error("Encoding error. Pdu: ~w. Reason: ~w",[Pdu, Reason]), +%% error; +%% L when list(L) -> +%% {Msg, L} +%% end; +%% mk_discovery_msg(Version, Pdu, {Com, _, _, _, _}, UserName) -> +%% Msg = #message{version = Version, vsn_hdr = Com, data = Pdu}, +%% case catch snmp_pdus:enc_message(Msg) of +%% {'EXIT', Reason} -> +%% error("Encoding error. Pdu: ~w. Reason: ~w",[Pdu, Reason]), +%% error; +%% L when list(L) -> +%% {Msg, L} +%% end. + + +%% mk_msg('version-3', Pdu, {Context, User, EngineID, CtxEngineId, SecLevel}, +%% MsgData) -> +%% %% Code copied from snmp_mpd.erl +%% {MsgId, SecName, SecData} = +%% if +%% tuple(MsgData), Pdu#pdu.type == 'get-response' -> +%% MsgData; +%% true -> +%% Md = get(msg_id), +%% put(msg_id, Md + 1), +%% {Md, User, []} +%% end, +%% ScopedPDU = #scopedPdu{contextEngineID = CtxEngineId, +%% contextName = Context, +%% data = Pdu}, +%% ScopedPDUBytes = snmp_pdus:enc_scoped_pdu(ScopedPDU), + +%% PduType = Pdu#pdu.type, +%% V3Hdr = #v3_hdr{msgID = MsgId, +%% msgMaxSize = 1000, +%% msgFlags = snmp_misc:mk_msg_flags(PduType, SecLevel), +%% msgSecurityModel = ?SEC_USM}, +%% Message = #message{version = 'version-3', vsn_hdr = V3Hdr, +%% data = ScopedPDUBytes}, +%% SecEngineID = case PduType of +%% 'get-response' -> snmp_framework_mib:get_engine_id(); +%% _ -> EngineID +%% end, +%% case catch snmp_usm:generate_outgoing_msg(Message, SecEngineID, +%% SecName, SecData, SecLevel) of +%% {'EXIT', Reason} -> +%% error("Encoding error. Pdu: ~w. Reason: ~w",[Pdu, Reason]), +%% error; +%% {error, Reason} -> +%% error("Encoding error. Pdu: ~w. Reason: ~w",[Pdu, Reason]), +%% error; +%% Packet -> +%% Packet +%% end; +%% mk_msg(Version, Pdu, {Com, _User, _EngineID, _Ctx, _SecLevel}, _SecData) -> +%% Msg = #message{version = Version, vsn_hdr = Com, data = Pdu}, +%% case catch snmp_pdus:enc_message(Msg) of +%% {'EXIT', Reason} -> +%% error("Encoding error. Pdu: ~w. Reason: ~w",[Pdu, Reason]), +%% error; +%% B when list(B) -> +%% B +%% end. + + +%% handle_system_info_updated(#state{log = {Log, _OldType}} = State, +%% audit_trail_log_type = _What) -> +%% %% Just to make sure, check that ATL is actually enabled +%% case snmpm_config:system_info(audit_trail_log) of +%% {ok, true} -> +%% {ok, Type} = snmpm_config:system_info(audit_trail_log_type), +%% NewState = State#state{log = {Log, Type}}, +%% {NewState, ok}; +%% _ -> +%% {State, {error, {adt_not_enabled}}} +%% end; +%% handle_system_info_updated(_State, _What) -> +%% ok. + +handle_get_log_type(#state{log = {_Log, Value}} = State) -> + %% Just to make sure, check that ATL is actually enabled + case snmpm_config:system_info(audit_trail_log) of + {ok, true} -> + Type = + case {lists:member(read, Value), lists:member(write, Value)} of + {true, true} -> + read_write; + {true, false} -> + read; + {false, true} -> + write; + {false, false} -> + throw({State, {error, {bad_atl_type, Value}}}) + end, + {ok, Type}; + _ -> + {error, not_enabled} + end; +handle_get_log_type(_State) -> + {error, not_enabled}. + +handle_set_log_type(#state{log = {Log, OldValue}} = State, NewType) -> + %% Just to make sure, check that ATL is actually enabled + case snmpm_config:system_info(audit_trail_log) of + {ok, true} -> + NewValue = + case NewType of + read -> + [read]; + write -> + [write]; + read_write -> + [read,write]; + _ -> + throw({State, {error, {bad_atl_type, NewType}}}) + end, + NewState = State#state{log = {Log, NewValue}}, + OldType = + case {lists:member(read, OldValue), + lists:member(write, OldValue)} of + {true, true} -> + read_write; + {true, false} -> + read; + {false, true} -> + write; + {false, false} -> + throw({State, {error, {bad_atl_type, OldValue}}}) + end, + {NewState, {ok, OldType}}; + _ -> + {State, {error, not_enabled}} + end; +handle_set_log_type(State, _NewType) -> + {State, {error, not_enabled}}. + + +%% ------------------------------------------------------------------- + +worker_init(#state{log = undefined = Log}, Verbosity) -> + worker_init2(Log, Verbosity); +worker_init(#state{log = {Name, Log, Type}}, Verbosity) -> + case snmp_log:open(Name, Log) of + {ok, NewLog} -> + worker_init2({Name, NewLog, Type}, Verbosity); + {error, Reason} -> + warning_msg("NetIf worker ~p failed opening ATL: " + "~n ~p", [self(), Reason]), + NewLog = undefined, + worker_init2({Name, NewLog, Type}, Verbosity) + end; +worker_init(State, Verbosity) -> + ?vinfo("worker_init -> entry with invalid data: " + "~n State: ~p" + "~n Verbosity: ~p", [State, Verbosity]), + exit({worker_init, State, Verbosity}). + +worker_init2(Log, Verbosity) -> + put(sname, mnifw), + put(verbosity, Verbosity), + Log. + + +worker_exit(Tag, Info, Result) -> + exit({net_if_worker, {Tag, Info, Result}}). + +handle_worker_exit(_, {_, _, ok}) -> + ok; +handle_worker_exit(Pid, {udp, {Addr, Port}, ExitStatus}) -> + warning_msg("Worker process (~p) terminated " + "while processing (incomming) message from ~w:~w: " + "~n~p", [Pid, Addr, Port, ExitStatus]), + ok; +handle_worker_exit(Pid, {send_pdu, {Domain, Addr, Port}, ExitStatus}) -> + warning_msg("Worker process (~p) terminated " + "while processing (outgoing) pdu for [~w] ~w:~w: " + "~n~p", [Pid, Domain, Addr, Port, ExitStatus]), + ok; +handle_worker_exit(Pid, {inform_response, {Addr, Port}, ExitStatus}) -> + warning_msg("Worker process (~p) terminated " + "while processing (outgoing) inform response for ~w:~w: " + "~n~p", [Pid, Addr, Port, ExitStatus]), + ok; +handle_worker_exit(_, _) -> + ok. + + +%% ------------------------------------------------------------------- + +make_response_pdu(#pdu{request_id = ReqId, varbinds = Vbs}) -> + #pdu{type = 'get-response', + request_id = ReqId, + error_status = noError, + error_index = 0, + varbinds = Vbs}. + + +%% ---------------------------------------------------------------- + +pdu_type_of(#pdu{type = Type}) -> + Type; +pdu_type_of(TrapPdu) when is_record(TrapPdu, trappdu) -> + trap. + + +%% ------------------------------------------------------------------- + +%% At this point this function is used during testing +maybe_process_extra_info(?DEFAULT_EXTRA_INFO) -> + ok; +maybe_process_extra_info({?SNMPM_EXTRA_INFO_TAG, Fun}) + when is_function(Fun, 0) -> + (catch Fun()), + ok; +maybe_process_extra_info(_ExtraInfo) -> + ok. + + +%% ------------------------------------------------------------------- + +t() -> + {A,B,C} = erlang:now(), + A*1000000000+B*1000+(C div 1000). + + +%% ------------------------------------------------------------------- + +logger(undefined, _Type, _Addr, _Port) -> + fun(_) -> + ok + end; +logger({_Name, Log, Types}, Type, Addr, Port) -> + case lists:member(Type, Types) of + true -> + fun(Msg) -> + snmp_log:log(Log, Msg, Addr, Port) + end; + false -> + fun(_) -> + ok + end + end. + + +%% ------------------------------------------------------------------- + +%% info_msg(F, A) -> +%% ?snmpm_info("NET-IF server: " ++ F, A). + +warning_msg(F, A) -> + ?snmpm_warning("NET-IF server: " ++ F, A). + +error_msg(F, A) -> + ?snmpm_error("NET-IF server: " ++ F, A). + + + +%%%------------------------------------------------------------------- + +% get_opt(Key, Opts) -> +% ?vtrace("get option ~w", [Key]), +% snmp_misc:get_option(Key, Opts). + +get_opt(Opts, Key, Def) -> + ?vtrace("get option ~w with default ~p", [Key, Def]), + snmp_misc:get_option(Key, Opts, Def). + + +%% ------------------------------------------------------------------- + +get_info(#state{sock = Id}) -> + ProcSize = proc_mem(self()), + PortInfo = get_port_info(Id), + [{process_memory, ProcSize}, {port_info, PortInfo}]. + +proc_mem(P) when is_pid(P) -> + case (catch erlang:process_info(P, memory)) of + {memory, Sz} when is_integer(Sz) -> + Sz; + _ -> + undefined + end. +%% proc_mem(_) -> +%% undefined. + + +get_port_info(Id) -> + PortInfo = + case (catch erlang:port_info(Id)) of + PI when is_list(PI) -> + [{port_info, PI}]; + _ -> + [] + end, + PortStatus = + case (catch prim_inet:getstatus(Id)) of + {ok, PS} -> + [{port_status, PS}]; + _ -> + [] + end, + PortAct = + case (catch inet:getopts(Id, [active])) of + {ok, PA} -> + [{port_act, PA}]; + _ -> + [] + end, + PortStats = + case (catch inet:getstat(Id)) of + {ok, Stat} -> + [{port_stats, Stat}]; + _ -> + [] + end, + IfList = + case (catch inet:getif(Id)) of + {ok, IFs} -> + [{interfaces, IFs}]; + _ -> + [] + end, + BufSz = + case (catch inet:getopts(Id, [recbuf, sndbuf, buffer])) of + {ok, Sz} -> + [{buffer_size, Sz}]; + _ -> + [] + end, + [{socket, Id}] ++ + IfList ++ + PortStats ++ + PortInfo ++ + PortStatus ++ + PortAct ++ + BufSz. + + +%%----------------------------------------------------------------- +%% Counter functions +%%----------------------------------------------------------------- +init_counters() -> + F = fun(Counter) -> maybe_create_counter(Counter) end, + lists:map(F, counters()). + +reset_counters() -> + F = fun(Counter) -> snmpm_config:reset_stats_counter(Counter) end, + lists:map(F, counters()). + +maybe_create_counter(Counter) -> + snmpm_config:maybe_cre_stats_counter(Counter, 0). + +counters() -> + [ + netIfMsgOutDrops, + netIfMsgInDrops, + netIfPduOutDrops, + netIfPduInDrops + ]. + +inc(Name) -> inc(Name, 1). +inc(Name, N) -> snmpm_config:incr_stats_counter(Name, N). + +%% get_counters() -> +%% Counters = counters(), +%% get_counters(Counters, []). + +%% get_counters([], Acc) -> +%% lists:reverse(Acc); +%% get_counters([Counter|Counters], Acc) -> +%% case snmpm_config:get_stats_counter(Counter) of +%% {ok, CounterVal} -> +%% get_counters(Counters, [{Counter, CounterVal}|Acc]); +%% _ -> +%% get_counters(Counters, Acc) +%% end. + + +%% ---------------------------------------------------------------- + +call(Pid, Req) -> + call(Pid, Req, infinity). + +call(Pid, Req, Timeout) -> + gen_server:call(Pid, Req, Timeout). + +cast(Pid, Msg) -> + gen_server:cast(Pid, Msg). + diff --git a/lib/snmp/src/misc/snmp_conf.erl b/lib/snmp/src/misc/snmp_conf.erl index 7249def24e..e1e7fab57b 100644 --- a/lib/snmp/src/misc/snmp_conf.erl +++ b/lib/snmp/src/misc/snmp_conf.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2011. All Rights Reserved. +%% Copyright Ericsson AB 1996-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -71,6 +71,18 @@ %%----------------------------------------------------------------- +%% read_files(Dir, Files) -> Configs +%% Dir - string() - Full path to the config dir. +%% Files - [{Gen, Filter, Check, FileName}] +%% Gen - function/2 - In case of failure when reading the config file, +%% this function is called to either generate a +%% default file or issue the error. +%% Filter - function/1 - Filters all the config entries read from the file +%% Check - function/1 - Check each entry as they are read from the file. +%% FileName - string() - Name of the config file. +%% Configs - [config_entry()] +%% config_entry() - term() + read_files(Dir, Files) when is_list(Dir) andalso is_list(Files) -> read_files(Dir, Files, []). @@ -90,7 +102,7 @@ read_files(Dir, [{Gen, Filter, Check, FileName}|Files], Res) {error, R} -> ?vlog("failed reading file info for ~s: " "~n ~p", [FileName, R]), - Gen(Dir), + Gen(Dir, R), read_files(Dir, Files, [Filter([])|Res]) end. @@ -99,6 +111,7 @@ read_files(Dir, [{Gen, Filter, Check, FileName}|Files], Res) read(File, Check) when is_function(Check) -> ?vdebug("read -> entry with" "~n File: ~p", [File]), + Fd = open_file(File), case loop(Fd, [], Check, 1, File) of diff --git a/lib/snmp/src/misc/snmp_config.erl b/lib/snmp/src/misc/snmp_config.erl index 6ab20e3e48..1da2c339c7 100644 --- a/lib/snmp/src/misc/snmp_config.erl +++ b/lib/snmp/src/misc/snmp_config.erl @@ -90,6 +90,17 @@ ]). +-export_type([void/0, + verify_config_entry_function/0, + verify_config_function/0, + write_config_function/0]). + + +%%---------------------------------------------------------------------- + +-type void() :: term(). % Any value - ignored + + %%---------------------------------------------------------------------- %% Handy SNMP configuration %%---------------------------------------------------------------------- @@ -2356,53 +2367,109 @@ write_sys_config_file_manager_atl_opt(Fid, {seqno, SeqNo}) -> header() -> - {Y,Mo,D} = date(), - {H,Mi,S} = time(), + {Y, Mo, D} = date(), + {H, Mi, S} = time(), io_lib:format("%% This file was generated by " - "snmp_config (version-~s) ~w-~2.2.0w-~2.2.0w " + "~w (version-~s) ~w-~2.2.0w-~2.2.0w " "~2.2.0w:~2.2.0w:~2.2.0w\n", - [?version,Y,Mo,D,H,Mi,S]). + [?MODULE, ?version, Y, Mo, D, H, Mi, S]). + +%% *If* these functions are successfull, they successfully return anything +%% (which is ignored), but they fail with either a throw or an exit or +%% something similar. + +%% Verification of one config entry read from a file +-type(verify_config_entry_function() :: + fun((Entry :: term()) -> ok | {error, Reason :: term()})). + +%% Verification of config to be written +-type(verify_config_function() :: + fun(() -> void())). + +%% Write config to file (as defined by Fd) +-type(write_config_function() :: + fun((Fd :: file:io_device()) -> void())). + +-spec write_config_file(Dir :: string(), + FileName :: string(), + Verify :: verify_config_function(), + Write :: write_config_function()) -> + ok | {error, Reason :: term()}. write_config_file(Dir, FileName, Verify, Write) when (is_list(Dir) andalso is_list(FileName) andalso is_function(Verify) andalso is_function(Write)) -> - (catch do_write_config_file(Dir, FileName, Verify, Write)). + try + begin + do_write_config_file(Dir, FileName, Verify, Write) + end + catch + throw:Error -> + Error; + T:E -> + {error, {failed_write, Dir, FileName, T, E}} + end. + do_write_config_file(Dir, FileName, Verify, Write) -> Verify(), case file:open(filename:join(Dir, FileName), [write]) of {ok, Fd} -> - (catch Write(Fd)), - file:close(Fd), - ok; + file_write_and_close(Write, Fd, Dir, FileName); Error -> Error end. - append_config_file(Dir, FileName, Verify, Write) when (is_list(Dir) andalso is_list(FileName) andalso is_function(Verify) andalso is_function(Write)) -> - (catch do_append_config_file(Dir, FileName, Verify, Write)). + try + begin + do_append_config_file(Dir, FileName, Verify, Write) + end + catch + throw:Error -> + Error; + T:E -> + {error, {failed_append, Dir, FileName, T, E}} + end. do_append_config_file(Dir, FileName, Verify, Write) -> Verify(), case file:open(filename:join(Dir, FileName), [read, write]) of {ok, Fd} -> file:position(Fd, eof), - (catch Write(Fd)), - file:close(Fd), - ok; + file_write_and_close(Write, Fd, Dir, FileName); Error -> Error end. +file_write_and_close(Write, Fd, Dir, FileName) -> + ok = Write(Fd), + case file:sync(Fd) of + ok -> + case file:close(Fd) of + ok -> + ok; + {error, Reason} -> + {error, {failed_closing, Dir, FileName, Reason}} + end; + {error, Reason} -> + {error, {failed_syncing, Dir, FileName, Reason}} + end. + + +-spec read_config_file(Dir :: string(), + FileName :: string(), + Verify :: verify_config_entry_function()) -> + {ok, Config :: list()} | {error, Reason :: term()}. + read_config_file(Dir, FileName, Verify) when is_list(Dir) andalso is_list(FileName) andalso is_function(Verify) -> (catch do_read_config_file(Dir, FileName, Verify)). diff --git a/lib/snmp/src/misc/snmp_log.erl b/lib/snmp/src/misc/snmp_log.erl index 2c781810ef..a8c5df0b64 100644 --- a/lib/snmp/src/misc/snmp_log.erl +++ b/lib/snmp/src/misc/snmp_log.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2011. All Rights Reserved. +%% Copyright Ericsson AB 1997-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -21,7 +21,7 @@ -export([ - create/4, create/5, create/6, + create/4, create/5, create/6, open/1, open/2, change_size/2, close/1, sync/1, info/1, log/4, log_to_txt/5, log_to_txt/6, log_to_txt/7, @@ -109,6 +109,24 @@ create(Name, File, SeqNoGen, Size, Repair, Notify) -> {error, {bad_args, Name, File, SeqNoGen, Size, Repair, Notify}}. +%% -- open --- + +%% Open an already existing ( = open ) log + +open(Name) -> + open(Name, #snmp_log{seqno = disabled}). +open(Name, #snmp_log{seqno = SeqNoGen} = _OldLog) -> + %% We include mode in the opts just to be on the safe side + case disk_log:open([{name, Name}, {mode, read_write}]) of + {ok, Log} -> + %% SeqNo must be proprly initiated also + {ok, #snmp_log{id = Log, seqno = SeqNoGen}}; + {repaired, Log, _RecBytes, _BadBytes} -> + {ok, #snmp_log{id = Log, seqno = SeqNoGen}}; + ERROR -> + ERROR + end. + %% -- close --- @@ -394,6 +412,14 @@ log_to_io(Log, FileName, Dir, Mibs, Start) -> log_to_io(Log, FileName, Dir, Mibs, Start, Stop) when is_list(Mibs) -> + ?vtrace("log_to_io -> entry with" + "~n Log: ~p" + "~n FileName: ~p" + "~n Dir: ~p" + "~n Mibs: ~p" + "~n Start: ~p" + "~n Stop: ~p", + [Log, FileName, Dir, Mibs, Start, Stop]), File = filename:join(Dir, FileName), Converter = fun(L) -> do_log_to_io(L, Mibs, Start, Stop) @@ -401,28 +427,10 @@ log_to_io(Log, FileName, Dir, Mibs, Start, Stop) log_convert(Log, File, Converter). -%% -- log_to_plain --- - -%% log_to_plain(Log, FileName, Dir) -> -%% log_to_plain(Log, FileName, Dir, null, null). - -%% log_to_plain(Log, FileName, Dir, Start) -> -%% log_to_plain(Log, FileName, Dir, Start, null). - -%% log_to_plain(Log, FileName, Dir, Start, Stop) -%% when is_list(Mibs) -> -%% File = filename:join(Dir, FileName), -%% Converter = fun(L) -> -%% do_log_to_plain(L, Start, Stop) -%% end, -%% log_convert(Log, File, Converter). - - %% -------------------------------------------------------------------- %% Internal functions %% -------------------------------------------------------------------- - %% -- log_convert --- log_convert(#snmp_log{id = Log}, File, Converter) -> @@ -431,6 +439,26 @@ log_convert(Log, File, Converter) -> do_log_convert(Log, File, Converter). do_log_convert(Log, File, Converter) -> + %% ?vtrace("do_log_converter -> entry with" + %% "~n Log: ~p" + %% "~n File: ~p" + %% "~n disk_log:info(Log): ~p", [Log, File, disk_log:info(Log)]), + {Pid, Ref} = + erlang:spawn_monitor( + fun() -> + Result = do_log_convert2(Log, File, Converter), + exit(Result) + end), + receive + {'DOWN', Ref, process, Pid, Result} -> + %% ?vtrace("do_log_converter -> received result" + %% "~n Result: ~p" + %% "~n disk_log:info(Log): ~p", + %% [Result, disk_log:info(Log)]), + Result + end. + +do_log_convert2(Log, File, Converter) -> %% First check if the caller process has already opened the %% log, because if we close an already open log we will cause %% a runtime error. @@ -439,29 +467,18 @@ do_log_convert(Log, File, Converter) -> Converter(Log); false -> %% Not yet member of the ruling party, apply for membership... - %% If a log is opened as read_write it is not possible to - %% open it as read_only. So, to get around this we open - %% it under a different name... - Log2 = convert_name(Log), - case log_open(Log2, File) of + case log_open(Log, File) of {ok, _} -> - Res = Converter(Log2), - disk_log:close(Log2), + Res = Converter(Log), + disk_log:close(Log), Res; {error, {name_already_open, _}} -> - Converter(Log2); + Converter(Log); {error, Reason} -> {error, {Log, Reason}} end end. -convert_name(Name) when is_list(Name) -> - Name ++ "_tmp"; -convert_name(Name) when is_atom(Name) -> - list_to_atom(atom_to_list(Name) ++ "_tmp"); -convert_name(Name) -> - lists:flatten(io_lib:format("~w_tmp", [Name])). - %% -- do_log_to_text --- @@ -705,21 +722,21 @@ tsf_ge(_Local,Universal,{universal_time,DateTime}) -> tsf_ge(Local,_Universal,DateTime) -> tsf_ge(Local,DateTime). -tsf_ge(TimeStamp,DateTime) -> +tsf_ge(TimeStamp, DateTime) -> T1 = calendar:datetime_to_gregorian_seconds(TimeStamp), T2 = calendar:datetime_to_gregorian_seconds(DateTime), T1 >= T2. -tsf_le(_Local,_Universal,null) -> +tsf_le(_Local, _Universal, null) -> true; -tsf_le(Local,_Universal,{local_time,DateTime}) -> - tsf_le(Local,DateTime); -tsf_le(_Local,Universal,{universal_time,DateTime}) -> - tsf_le(Universal,DateTime); -tsf_le(Local,_Universal,DateTime) -> +tsf_le(Local, _Universal, {local_time, DateTime}) -> + tsf_le(Local, DateTime); +tsf_le(_Local, Universal, {universal_time, DateTime}) -> + tsf_le(Universal, DateTime); +tsf_le(Local, _Universal, DateTime) -> tsf_le(Local,DateTime). -tsf_le(TimeStamp,DateTime) -> +tsf_le(TimeStamp, DateTime) -> T1 = calendar:datetime_to_gregorian_seconds(TimeStamp), T2 = calendar:datetime_to_gregorian_seconds(DateTime), T1 =< T2. @@ -864,11 +881,8 @@ do_std_log_open(Name, File, Size, Repair, Notify) -> log_open(Name, File) -> - Opts = [{name, Name}, - {file, File}, - {type, ?LOG_TYPE}, - {format, ?LOG_FORMAT}, - {mode, read_only}], + Opts = [{name, Name}, + {file, File}], case disk_log:open(Opts) of {error, {badarg, size}} -> {error, no_such_log}; diff --git a/lib/snmp/src/misc/snmp_verbosity.erl b/lib/snmp/src/misc/snmp_verbosity.erl index 85037ba2ae..df5986b7bc 100644 --- a/lib/snmp/src/misc/snmp_verbosity.erl +++ b/lib/snmp/src/misc/snmp_verbosity.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2009. All Rights Reserved. +%% Copyright Ericsson AB 2000-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -76,7 +76,7 @@ format_timestamp({_N1, _N2, N3} = Now) -> {YYYY,MM,DD} = Date, {Hour,Min,Sec} = Time, FormatDate = - io_lib:format("~.4w:~.2.0w:~.2.0w ~.2.0w:~.2.0w:~.2.0w 4~w", + io_lib:format("~.4w:~.2.0w:~.2.0w ~.2.0w:~.2.0w:~.2.0w ~w", [YYYY,MM,DD,Hour,Min,Sec,round(N3/1000)]), lists:flatten(FormatDate). @@ -144,6 +144,8 @@ image_of_sname(mse) -> "M-SERVER"; image_of_sname(msew) -> io_lib:format("M-SERVER-worker(~p)", [self()]); image_of_sname(mns) -> "M-NOTE-STORE"; image_of_sname(mnif) -> "M-NET-IF"; +image_of_sname(mnifl) -> "M-NET-IF-LOGGER"; +image_of_sname(mnifw) -> io_lib:format("M-NET-IF-worker(~p)", [self()]); image_of_sname(mconf) -> "M-CONF"; image_of_sname(mgr) -> "MGR"; diff --git a/lib/snmp/test/Makefile b/lib/snmp/test/Makefile index 78ffb1c255..8c1f53e51a 100644 --- a/lib/snmp/test/Makefile +++ b/lib/snmp/test/Makefile @@ -224,6 +224,9 @@ $(SNMP_BIN_TARGET_DIR)/Klas4.bin: $(SNMP_BIN_TARGET_DIR)/Klas3.bin $(SNMP_BIN_TARGET_DIR)/SA-MIB.bin: $(SNMP_BIN_TARGET_DIR)/OLD-SNMPEA-MIB.bin +$(SNMP_BIN_TARGET_DIR)/Test3.bin: $(SNMP_BIN_TARGET_DIR)/Test2.bin + + # ---------------------------------------------------- # Release Target # ---------------------------------------------------- diff --git a/lib/snmp/test/exp/.gitignore b/lib/snmp/test/exp/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/lib/snmp/test/exp/.gitignore diff --git a/lib/snmp/test/snmp_agent_bl_test.erl b/lib/snmp/test/exp/snmp_agent_bl_test.erl index b17489a755..b17489a755 100644 --- a/lib/snmp/test/snmp_agent_bl_test.erl +++ b/lib/snmp/test/exp/snmp_agent_bl_test.erl diff --git a/lib/snmp/test/snmp_agent_ms_test.erl b/lib/snmp/test/exp/snmp_agent_ms_test.erl index 1f34f1c8d1..1f34f1c8d1 100644 --- a/lib/snmp/test/snmp_agent_ms_test.erl +++ b/lib/snmp/test/exp/snmp_agent_ms_test.erl diff --git a/lib/snmp/test/snmp_agent_mt_test.erl b/lib/snmp/test/exp/snmp_agent_mt_test.erl index 4f125c0017..4f125c0017 100644 --- a/lib/snmp/test/snmp_agent_mt_test.erl +++ b/lib/snmp/test/exp/snmp_agent_mt_test.erl diff --git a/lib/snmp/test/snmp_agent_v1_test.erl b/lib/snmp/test/exp/snmp_agent_v1_test.erl index 737bb25cc3..737bb25cc3 100644 --- a/lib/snmp/test/snmp_agent_v1_test.erl +++ b/lib/snmp/test/exp/snmp_agent_v1_test.erl diff --git a/lib/snmp/test/snmp_agent_v2_test.erl b/lib/snmp/test/exp/snmp_agent_v2_test.erl index dc94c18ad9..dc94c18ad9 100644 --- a/lib/snmp/test/snmp_agent_v2_test.erl +++ b/lib/snmp/test/exp/snmp_agent_v2_test.erl diff --git a/lib/snmp/test/snmp_agent_v3_test.erl b/lib/snmp/test/exp/snmp_agent_v3_test.erl index 266be72878..266be72878 100644 --- a/lib/snmp/test/snmp_agent_v3_test.erl +++ b/lib/snmp/test/exp/snmp_agent_v3_test.erl diff --git a/lib/snmp/test/modules.mk b/lib/snmp/test/modules.mk index eacc749b53..7b0bdc5b8f 100644 --- a/lib/snmp/test/modules.mk +++ b/lib/snmp/test/modules.mk @@ -76,7 +76,8 @@ MIB_FILES = \ TestTrap.mib \ TestTrapv2.mib \ Test1.mib \ - Test2.mib + Test2.mib \ + Test3.mib SPECS = snmp.spec snmp.spec.vxworks diff --git a/lib/snmp/test/snmp_SUITE.erl b/lib/snmp/test/snmp_SUITE.erl index b6d72da2fa..22b9c64588 100644 --- a/lib/snmp/test/snmp_SUITE.erl +++ b/lib/snmp/test/snmp_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2011. All Rights Reserved. +%% Copyright Ericsson AB 1997-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -19,6 +19,8 @@ -module(snmp_SUITE). +-include("snmp_test_lib.hrl"). + -export([all/0, suite/0, groups/0, @@ -39,6 +41,21 @@ end_per_testcase(_Case, Config) when is_list(Config) -> Config. +init_per_suite(Config) when is_list(Config) -> + + ?DBG("init_per_suite -> entry with" + "~n Config: ~p", [Config]), + + Config. + +end_per_suite(Config) when is_list(Config) -> + + ?DBG("end_per_suite -> entry with" + "~n Config: ~p", [Config]), + + Config. + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Top test case @@ -53,7 +70,8 @@ all() -> {group, manager}]. groups() -> - [{app, [], [{group, app_test}, + [ + {app, [], [{group, app_test}, {group, appup_test}]}, {compiler, [], [{group, compiler_test}]}, {misc, [], [{group, conf_test}, @@ -66,6 +84,7 @@ groups() -> {manager, [], [{group, manager_config_test}, {group, manager_user_test}, {group, manager_test}]}, + {app_test, [], [{snmp_app_test, all}]}, {appup_test, [], [{snmp_appup_test, all}]}, {compiler_test, [], [{snmp_compiler_test, all}]}, @@ -78,17 +97,25 @@ groups() -> {agent_test, [], [{snmp_agent_test, all}]}, {manager_config_test, [], [{snmp_manager_config_test, all}]}, {manager_user_test, [], [{snmp_manager_user_test, all}]}, - {manager_test, [], [{snmp_manager_test, all}]}]. + {manager_test, [], [{snmp_manager_test, all}]} + ]. -init_per_suite(Config) -> - Config. -end_per_suite(_Config) -> - ok. +init_per_group(GroupName, Config0) -> -init_per_group(_GroupName, Config) -> - Config. + ?DBG("init_per_group -> entry with" + "~n GroupName: ~p" + "~n Config0: ~p", [GroupName, Config0]), + + %% Group name is not really the suite name + %% (but it is a good enough approximation), + %% but it does not matter since we only need + %% it to be unique. + snmp_test_lib:init_suite_top_dir(GroupName, Config0). + end_per_group(_GroupName, Config) -> - Config. + lists:keydelete(snmp_suite_top_dir, 1, Config). + + diff --git a/lib/snmp/test/snmp_agent_test.erl b/lib/snmp/test/snmp_agent_test.erl index e968bc65b1..8ee5dd534d 100644 --- a/lib/snmp/test/snmp_agent_test.erl +++ b/lib/snmp/test/snmp_agent_test.erl @@ -224,52 +224,82 @@ groups() -> }, {tickets2, [], [otp8395, otp9884]}, {otp_4394, [], [otp_4394_test]}, - {otp_7157, [], [otp_7157_test] - } + {otp_7157, [], [otp_7157_test]} ]. -init_per_group(all_tcs, Config) -> - init_all(Config); -init_per_group(otp_7157, Config) -> - init_otp_7157(Config); -init_per_group(otp_4394, Config) -> - init_otp_4394(Config); -init_per_group(v2_inform, Config) -> - init_v2_inform(Config); -init_per_group(multiple_reqs_2, Config) -> - init_mul(Config); -init_per_group(multiple_reqs, Config) -> - init_mul(Config); -init_per_group(test_multi_threaded, Config) -> - init_mt(Config); -init_per_group(test_v3, Config) -> - init_v3(Config); -init_per_group(test_v1_v2, Config) -> - init_v1_v2(Config); -init_per_group(test_v2, Config) -> - init_v2(Config); -init_per_group(test_v1, Config) -> - init_v1(Config); -init_per_group(misc, Config) -> - init_misc(Config); -init_per_group(mib_storage_varm_mnesia, Config) -> - init_varm_mib_storage_mnesia(Config); -init_per_group(mib_storage_varm_dets, Config) -> - init_varm_mib_storage_dets(Config); -init_per_group(mib_storage_size_check_mnesia, Config) -> - init_size_check_msm(Config); -init_per_group(mib_storage_size_check_dets, Config) -> - init_size_check_msd(Config); -init_per_group(mib_storage_size_check_ets, Config) -> - init_size_check_mse(Config); -init_per_group(mib_storage_mnesia, Config) -> - init_mib_storage_mnesia(Config); -init_per_group(mib_storage_dets, Config) -> - init_mib_storage_dets(Config); -init_per_group(mib_storage_ets, Config) -> - init_mib_storage_ets(Config); -init_per_group(_GroupName, Config) -> - Config. + +init_per_suite(Config0) when is_list(Config0) -> + + ?DBG("init_per_suite -> entry with" + "~n Config0: ~p", [Config0]), + + Config1 = snmp_test_lib:init_suite_top_dir(?MODULE, Config0), + Config2 = snmp_test_lib:fix_data_dir(Config1), + + %% Mib-dirs + MibDir = snmp_test_lib:lookup(data_dir, Config2), + StdMibDir = filename:join([code:priv_dir(snmp), "mibs"]), + + Config3 = [{mib_dir, MibDir}, {std_mib_dir, StdMibDir} | Config2], + + ?DBG("init_per_suite -> end with" + "~n Config3: ~p", [Config3]), + + Config3. + +end_per_suite(Config) when is_list(Config) -> + + ?DBG("end_per_suite -> entry with" + "~n Config: ~p", [Config]), + + Config. + + +init_per_group(all_tcs = GroupName, Config) -> + init_all(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(otp_7157 = GroupName, Config) -> + init_otp_7157(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(otp_4394 = GroupName, Config) -> + init_otp_4394(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(v2_inform = GroupName, Config) -> + init_v2_inform(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(multiple_reqs_2 = GroupName, Config) -> + init_mul(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(multiple_reqs = GroupName, Config) -> + init_mul(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(test_multi_threaded = GroupName, Config) -> + init_mt(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(test_v3 = GroupName, Config) -> + init_v3(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(test_v1_v2 = GroupName, Config) -> + init_v1_v2(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(test_v2 = GroupName, Config) -> + init_v2(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(test_v1 = GroupName, Config) -> + init_v1(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(misc = GroupName, Config) -> + init_misc(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(mib_storage_varm_mnesia = GroupName, Config) -> + init_varm_mib_storage_mnesia(snmp_test_lib:init_group_top_dir(GroupName, + Config)); +init_per_group(mib_storage_varm_dets = GroupName, Config) -> + init_varm_mib_storage_dets(snmp_test_lib:init_group_top_dir(GroupName, + Config)); +init_per_group(mib_storage_size_check_mnesia = GroupName, Config) -> + init_size_check_msm(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(mib_storage_size_check_dets = GroupName, Config) -> + init_size_check_msd(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(mib_storage_size_check_ets = GroupName, Config) -> + init_size_check_mse(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(mib_storage_mnesia = GroupName, Config) -> + init_mib_storage_mnesia(snmp_test_lib:init_group_top_dir(GroupName, + Config)); +init_per_group(mib_storage_dets = GroupName, Config) -> + init_mib_storage_dets(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(mib_storage_ets = GroupName, Config) -> + init_mib_storage_ets(snmp_test_lib:init_group_top_dir(GroupName, Config)); +init_per_group(GroupName, Config) -> + snmp_test_lib:init_group_top_dir(GroupName, Config). end_per_group(all_tcs, Config) -> finish_all(Config); @@ -320,38 +350,36 @@ init_per_testcase(otp8395 = Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [Case, Config]), - Config2 = init_per_testcase2(Case, init_per_suite(Config)), - otp8395({init, Config2}); + otp8395({init, init_per_testcase2(Case, Config)}); init_per_testcase(otp9884 = Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [Case, Config]), - Config2 = init_per_testcase2(Case, init_per_suite(Config)), - otp9884({init, Config2}); + otp9884({init, init_per_testcase2(Case, Config)}); init_per_testcase(otp_7157_test = _Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [_Case, Config]), Dog = ?WD_START(?MINS(1)), - [{watchdog, Dog}|Config]; + [{watchdog, Dog} | Config ]; init_per_testcase(v2_inform_i = _Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [_Case, Config]), Dog = ?WD_START(?MINS(10)), - [{watchdog, Dog}|Config]; + [{watchdog, Dog} | Config ]; init_per_testcase(v3_inform_i = _Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [_Case, Config]), Dog = ?WD_START(?MINS(10)), - [{watchdog, Dog}|Config]; + [{watchdog, Dog} | Config ]; init_per_testcase(_Case, Config) when is_list(Config) -> ?DBG("init_per_testcase -> entry with" "~n Case: ~p" "~n Config: ~p", [_Case, Config]), Dog = ?WD_START(?MINS(6)), - [{watchdog, Dog}|Config]. + [{watchdog, Dog}| Config ]. end_per_testcase(otp8395, Config) when is_list(Config) -> otp8395({fin, Config}); @@ -366,65 +394,13 @@ end_per_testcase(_Case, Config) when is_list(Config) -> Config. -init_per_suite(Config) -> - ?DBG("init_per_suite -> entry with" - "~n Config: ~p", [Config]), - - %% Suite root dir for test suite - PrivDir = ?config(priv_dir, Config), - - %% Create top-directory for this sub-suite - SuiteTopDir = filename:join([PrivDir, ?MODULE]), - case file:make_dir(SuiteTopDir) of - ok -> - ok; - {error, eexist} -> - %% This can happen since this is not really a - %% suite-init function. - ok; - {error, Reason} -> - ?FAIL({failed_creating_suite_top_dir, SuiteTopDir, Reason}) - end, - - - %% -- - %% Fix config (data-dir is not correct): - %% - - Config1 = fix_data_dir(Config), - %% Config1 = Config, - - %% Mib-dirs - MibDir = ?config(data_dir, Config1), - StdMibDir = filename:join([code:priv_dir(snmp), "mibs"]), - - Config2 = [{suite_top_dir, SuiteTopDir}, - {mib_dir, MibDir}, - {std_mib_dir, StdMibDir} | Config1], - - ?DBG("init_per_suite -> done when" - "~n Config2: ~p", [Config2]), - Config2. - -%% end_per_suite(Config) -> -end_per_suite(Config) -> - Config. - -fix_data_dir(Config) -> - DataDir0 = ?config(data_dir, Config), - DataDir1 = filename:split(filename:absname(DataDir0)), - [_|DataDir2] = lists:reverse(DataDir1), - DataDir = filename:join(lists:reverse(DataDir2) ++ [?snmp_test_data]), - Config1 = lists:keydelete(data_dir, 1, Config), - [{data_dir, DataDir} | Config1]. +init_per_testcase2(Case, Config) -> + ?DBG("end_per_testcase2 -> entry with" + "~n Case: ~p" + "~n Config: ~p", [Case, Config]), -init_per_testcase2(Case, Config) -> - SuiteToDir = ?config(suite_top_dir, Config), - - %% Create top-directory for this test-case - CaseTopDir = filename:join([SuiteToDir, Case]), - ok = file:make_dir(CaseTopDir), + CaseTopDir = snmp_test_lib:init_testcase_top_dir(Case, Config), %% Create agent top-dir(s) AgentTopDir = filename:join([CaseTopDir, agent]), @@ -623,19 +599,19 @@ mib_storage_mnesia_cases() -> msm_mib_of]. mse_size_check_cases() -> -[mse_size_check]. + [mse_size_check]. msd_size_check_cases() -> -[msd_size_check]. + [msd_size_check]. msm_size_check_cases() -> -[msm_size_check]. + [msm_size_check]. varm_mib_storage_dets_cases() -> -[msd_varm_mib_start]. + [msd_varm_mib_start]. varm_mib_storage_mnesia_cases() -> -[msm_varm_mib_start]. + [msm_varm_mib_start]. init_mib_storage_ets(Config) when is_list(Config) -> ?LOG("init_mib_storage_ets -> entry", []), @@ -644,36 +620,37 @@ init_mib_storage_ets(Config) when is_list(Config) -> init_mib_storage_dets(Config) when is_list(Config) -> ?LOG("init_mib_storage_ets -> entry", []), - ?line AgentDir = ?GCONF(agent_dir, Config), - MibStorage = {snmp_mib_storage,{dets,AgentDir}}, + ?line AgentDbDir = ?GCONF(agent_db_dir, Config), + MibStorage = {snmp_mib_storage, {dets, AgentDbDir}}, init_ms(Config, [MibStorage]). init_mib_storage_mnesia(Config) when is_list(Config) -> ?LOG("init_mib_storage_ets -> entry", []), - MibStorage = {snmp_mib_storage,{mnesia,[]}}, + MibStorage = {snmp_mib_storage, {mnesia,[]}}, init_ms(Config, [MibStorage]). init_ms(Config, Opts) when is_list(Config) -> ?LOG("init_mib_storage_ets -> entry", []), - ?line SaNode = ?GCONF(snmp_sa, Config), + ?line SaNode = ?GCONF(snmp_sa, Config), ?line create_tables(SaNode), - ?line AgentDir = ?GCONF(agent_dir, Config), - ?line MgrDir = ?GCONF(mgr_dir, Config), - ?line Ip = ?GCONF(ip, Config), - ?line config([v1], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + ?line AgentConfDir = ?GCONF(agent_conf_dir, Config), + ?line MgrDir = ?GCONF(mgr_dir, Config), + ?line Ip = ?GCONF(ip, Config), + ?line config([v1], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), MasterAgentVerbosity = {snmp_master_agent_verbosity, trace}, MibsVerbosity = {snmp_mibserver_verbosity, trace}, SymStoreVerbosity = {snmp_symbolic_store_verbosity, trace}, - Opts1 = [MasterAgentVerbosity,MibsVerbosity,SymStoreVerbosity|Opts], - [{vsn, v1} | start_v1_agent(Config,Opts1)]. + Opts1 = [MasterAgentVerbosity, MibsVerbosity, SymStoreVerbosity | Opts], + [{vsn, v1} | start_v1_agent(Config, Opts1)]. init_size_check_mse(Config) when is_list(Config) -> MibStorage = {snmp_mib_storage, ets}, init_size_check_ms(Config, [MibStorage]). init_size_check_msd(Config) when is_list(Config) -> - AgentDir = ?GCONF(agent_dir, Config), - MibStorage = {snmp_mib_storage, {dets, AgentDir}}, + AgentDbDir = ?GCONF(agent_db_dir, Config), + MibStorage = {snmp_mib_storage, {dets, AgentDbDir}}, init_size_check_ms(Config, [MibStorage]). init_size_check_msm(Config) when is_list(Config) -> @@ -695,22 +672,24 @@ init_size_check_ms(Config, Opts) when is_list(Config) -> ?SKIP({failed_starting_crypto, Reason}) end, create_tables(SaNode), - AgentDir = ?GCONF(agent_dir, Config), - MgrDir = ?GCONF(mgr_dir, Config), - Ip = ?GCONF(ip, Config), - ?line ok = - config([v3], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + AgentConfDir = ?GCONF(agent_conf_dir, Config), + MgrDir = ?GCONF(mgr_dir, Config), + Ip = ?GCONF(ip, Config), + ?line ok = config([v3], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, v3} | start_v3_agent(Config, Opts)]. init_varm_mib_storage_dets(Config) when is_list(Config) -> ?LOG("init_varm_mib_storage_dets -> entry", []), - ?line SaNode = ?GCONF(snmp_sa, Config), + ?line SaNode = ?GCONF(snmp_sa, Config), ?line create_tables(SaNode), - ?line AgentDir = ?GCONF(agent_dir, Config), - ?line MgrDir = ?GCONF(mgr_dir, Config), - ?line Ip = ?GCONF(ip, Config), - ?line config([v1], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), - MibStorage = {snmp_mib_storage,{dets,AgentDir}}, + ?line AgentDbDir = ?GCONF(agent_db_dir, Config), + ?line AgentConfDir = ?GCONF(agent_conf_dir, Config), + ?line MgrDir = ?GCONF(mgr_dir, Config), + ?line Ip = ?GCONF(ip, Config), + ?line config([v1], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), + MibStorage = {snmp_mib_storage, {dets, AgentDbDir}}, MasterAgentVerbosity = {snmp_master_agent_verbosity, trace}, MibsVerbosity = {snmp_mibserver_verbosity, trace}, SymStoreVerbosity = {snmp_symbolic_store_verbosity, trace}, @@ -719,12 +698,13 @@ init_varm_mib_storage_dets(Config) when is_list(Config) -> init_varm_mib_storage_mnesia(Config) when is_list(Config) -> ?LOG("init_varm_mib_storage_mnesia -> entry", []), - ?line SaNode = ?GCONF(snmp_sa, Config), + ?line SaNode = ?GCONF(snmp_sa, Config), ?line create_tables(SaNode), - ?line AgentDir = ?GCONF(agent_dir, Config), - ?line MgrDir = ?GCONF(mgr_dir, Config), - ?line Ip = ?GCONF(ip, Config), - ?line config([v1], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + ?line AgentConfDir = ?GCONF(agent_conf_dir, Config), + ?line MgrDir = ?GCONF(mgr_dir, Config), + ?line Ip = ?GCONF(ip, Config), + ?line config([v1], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), MibStorage = {snmp_mib_storage,{mnesia,[]}}, MasterAgentVerbosity = {snmp_master_agent_verbosity, trace}, MibsVerbosity = {snmp_mibserver_verbosity, trace}, @@ -1189,10 +1169,11 @@ v1_cases() -> init_v1(Config) when is_list(Config) -> ?line SaNode = ?config(snmp_sa, Config), ?line create_tables(SaNode), - ?line AgentDir = ?config(agent_dir, Config), + ?line AgentConfDir = ?config(agent_conf_dir, Config), ?line MgrDir = ?config(mgr_dir, Config), ?line Ip = ?config(ip, Config), - ?line config([v1], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + ?line config([v1], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, v1} | start_v1_agent(Config)]. finish_v1(Config) when is_list(Config) -> @@ -1232,10 +1213,11 @@ v2_cases() -> init_v2(Config) when is_list(Config) -> SaNode = ?config(snmp_sa, Config), create_tables(SaNode), - AgentDir = ?config(agent_dir, Config), + AgentConfDir = ?config(agent_conf_dir, Config), MgrDir = ?config(mgr_dir, Config), Ip = ?config(ip, Config), - config([v2], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + config([v2], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, v2} | start_v2_agent(Config)]. finish_v2(Config) when is_list(Config) -> @@ -1251,10 +1233,11 @@ v1_v2_cases() -> init_v1_v2(Config) when is_list(Config) -> SaNode = ?config(snmp_sa, Config), create_tables(SaNode), - AgentDir = ?config(agent_dir, Config), + AgentConfDir = ?config(agent_conf_dir, Config), MgrDir = ?config(mgr_dir, Config), Ip = ?config(ip, Config), - config([v1,v2], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + config([v1,v2], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, bilingual} | start_bilingual_agent(Config)]. finish_v1_v2(Config) when is_list(Config) -> @@ -1315,10 +1298,10 @@ init_v3(Config) when is_list(Config) -> end, SaNode = ?config(snmp_sa, Config), create_tables(SaNode), - AgentDir = ?config(agent_dir, Config), + AgentConfDir = ?config(agent_conf_dir, Config), MgrDir = ?config(mgr_dir, Config), Ip = ?config(ip, Config), - ?line ok = config([v3], MgrDir, AgentDir, + ?line ok = config([v3], MgrDir, AgentConfDir, tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, v3} | start_v3_agent(Config)]. @@ -1338,11 +1321,11 @@ mt_cases() -> init_mt(Config) when is_list(Config) -> SaNode = ?config(snmp_sa, Config), create_tables(SaNode), - AgentDir = ?config(agent_dir, Config), + AgentConfDir = ?config(agent_conf_dir, Config), MgrDir = ?config(mgr_dir, Config), Ip = ?config(ip, Config), - ?line ok = - config([v2], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + ?line ok = config([v2], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), [{vsn, v2} | start_multi_threaded_agent(Config)]. finish_mt(Config) when is_list(Config) -> @@ -1421,7 +1404,13 @@ simple(Config) when is_list(Config) -> ?P(simple), init_case(Config), - try_test(simple_standard_test). + try_test(simple_standard_test), + + p("Display log"), + display_log(Config), + + p("done"), + ok. simple_2(X) -> ?P(simple_2), simple(X). @@ -1601,7 +1590,7 @@ change_target_addr_config(Config) when is_list(Config) -> try_test(ma_trap1, [MA]), ?LOG("change_target_addr_config -> set silence verbosity for local_db",[]), - ?line snmpa:verbosity(local_db,silence), + ?line snmpa:verbosity(local_db, silence), %% Start new dummy listener ?LOG("change_target_addr_config -> start dummy manager",[]), @@ -1609,9 +1598,9 @@ change_target_addr_config(Config) when is_list(Config) -> %% Reconfigure ?LOG("change_target_addr_config -> reconfigure",[]), - AgentDir = ?config(agent_dir, Config), - ?line rewrite_target_addr_conf(AgentDir, NewPort), - ?line snmp_target_mib:reconfigure(AgentDir), + AgentConfDir = ?config(agent_conf_dir, Config), + ?line rewrite_target_addr_conf(AgentConfDir, NewPort), + ?line snmp_target_mib:reconfigure(AgentConfDir), %% Send the trap again ?LOG("change_target_addr_config -> send trap again",[]), @@ -1624,7 +1613,7 @@ change_target_addr_config(Config) when is_list(Config) -> ?line ok = dummy_manager_stop(Pid), ?LOG("change_target_addr_config -> reset target address config",[]), - ?line reset_target_addr_conf(AgentDir), + ?line reset_target_addr_conf(AgentConfDir), ?LOG("change_target_addr_config -> unload TestTrap",[]), ?line unload_master("TestTrap"). @@ -2067,7 +2056,7 @@ v3_inform(_X) -> {req, [], {conf, init_v3_inform, [v3_inform_i], finish_v3_inform}}. init_v2_inform(Config) when is_list(Config) -> - _Dir = ?config(agent_dir, Config), + _Dir = ?config(agent_conf_dir, Config), %% snmp_internal_mib:configure(Dir), Config. @@ -2075,7 +2064,7 @@ init_v3_inform(X) -> init_v2_inform(X). finish_v2_inform(Config) when is_list(Config) -> - _Dir = ?config(agent_dir, Config), + _Dir = ?config(agent_conf_dir, Config), %% snmp_internal_mib:configure(Dir), Config. @@ -2377,9 +2366,9 @@ v3_md5_auth(Config) when is_list(Config) -> ?P1("Testing MD5 authentication...takes a few seconds..."), - AgentDir = ?config(agent_dir, Config), - ?line rewrite_target_params_conf(AgentDir, "authMD5", authNoPriv), - ?line snmp_target_mib:reconfigure(AgentDir), + AgentConfDir = ?config(agent_conf_dir, Config), + ?line rewrite_target_params_conf(AgentConfDir, "authMD5", authNoPriv), + ?line snmp_target_mib:reconfigure(AgentConfDir), MA = whereis(snmp_master_agent), @@ -2388,14 +2377,14 @@ v3_md5_auth(Config) when is_list(Config) -> ?line load_master("TestTrapv2"), try_test(v3_sync, [[{v2_proc, []}, - {ma_v2_trap1, [MA]}, - {v3_inform_sync, [MA]}]], - [{sec_level, authNoPriv}, {user, "authMD5"}]), + {ma_v2_trap1, [MA]}, + {v3_inform_sync, [MA]}]], + [{sec_level, authNoPriv}, {user, "authMD5"}]), ?line unload_master("TestTrapv2"), ?line unload_master("TestTrap"), ?line unload_master("Test2"), - ?line reset_target_params_conf(AgentDir). + ?line reset_target_params_conf(AgentConfDir). v3_sha_auth(suite) -> []; v3_sha_auth(Config) when is_list(Config) -> @@ -2404,9 +2393,9 @@ v3_sha_auth(Config) when is_list(Config) -> ?P1("Testing SHA authentication...takes a few seconds..."), - AgentDir = ?config(agent_dir, Config), - ?line rewrite_target_params_conf(AgentDir, "authSHA", authNoPriv), - ?line snmp_target_mib:reconfigure(AgentDir), + AgentConfDir = ?config(agent_conf_dir, Config), + ?line rewrite_target_params_conf(AgentConfDir, "authSHA", authNoPriv), + ?line snmp_target_mib:reconfigure(AgentConfDir), MA = whereis(snmp_master_agent), @@ -2415,14 +2404,14 @@ v3_sha_auth(Config) when is_list(Config) -> ?line load_master("TestTrapv2"), try_test(v3_sync, [[{v2_proc, []}, - {ma_v2_trap1, [MA]}, - {v3_inform_sync, [MA]}]], - [{sec_level, authNoPriv}, {user, "authSHA"}]), + {ma_v2_trap1, [MA]}, + {v3_inform_sync, [MA]}]], + [{sec_level, authNoPriv}, {user, "authSHA"}]), ?line unload_master("TestTrapv2"), ?line unload_master("TestTrap"), ?line unload_master("Test2"), - ?line reset_target_params_conf(AgentDir). + ?line reset_target_params_conf(AgentConfDir). v3_des_priv(suite) -> []; v3_des_priv(Config) when is_list(Config) -> @@ -2431,9 +2420,9 @@ v3_des_priv(Config) when is_list(Config) -> ?P1("Testing DES encryption...takes a few seconds..."), - AgentDir = ?config(agent_dir, Config), - ?line rewrite_target_params_conf(AgentDir, "privDES", authPriv), - ?line snmp_target_mib:reconfigure(AgentDir), + AgentConfDir = ?config(agent_conf_dir, Config), + ?line rewrite_target_params_conf(AgentConfDir, "privDES", authPriv), + ?line snmp_target_mib:reconfigure(AgentConfDir), MA = whereis(snmp_master_agent), @@ -2444,14 +2433,14 @@ v3_des_priv(Config) when is_list(Config) -> ?line load_master("TestTrapv2"), try_test(v3_sync, [[{v2_proc, []}, - {ma_v2_trap1, [MA]}, - {v3_inform_sync, [MA]}]], - [{sec_level, authPriv}, {user, "privDES"}]), + {ma_v2_trap1, [MA]}, + {v3_inform_sync, [MA]}]], + [{sec_level, authPriv}, {user, "privDES"}]), ?line unload_master("TestTrapv2"), ?line unload_master("TestTrap"), ?line unload_master("Test2"), - ?line reset_target_params_conf(AgentDir). + ?line reset_target_params_conf(AgentConfDir). %% -define(usmStatsNotInTimeWindows_instance, [1,3,6,1,6,3,15,1,1,2,0]). @@ -4795,7 +4784,7 @@ snmp_user_based_sm_mib_3(Config) when is_list(Config) -> ?P(snmp_user_based_sm_mib_3), init_case(Config), - _AgentDir = ?config(agent_dir, Config), + _AgentDir = ?config(agent_conf_dir, Config), ?line load_master_std("SNMP-USER-BASED-SM-MIB"), %% The newUser used here already has VACM access. @@ -5727,28 +5716,28 @@ otp_3725_test(MaNode) -> init_otp_4394(Config) when is_list(Config) -> ?DBG("init_otp_4394 -> entry with" "~n Config: ~p", [Config]), - ?line AgentDir = ?config(agent_dir, Config), - ?line MgrDir = ?config(mgr_dir, Config), - ?line Ip = ?config(ip, Config), - ?line otp_4394_config(AgentDir, MgrDir, Ip), + ?line AgentConfDir = ?config(agent_conf_dir, Config), + ?line MgrDir = ?config(mgr_dir, Config), + ?line Ip = ?config(ip, Config), + ?line otp_4394_config(AgentConfDir, MgrDir, Ip), MasterAgentVerbosity = {master_agent_verbosity, trace}, NetIfVerbosity = {net_if_verbosity, trace}, - Opts = [MasterAgentVerbosity,NetIfVerbosity], - [{vsn, v1} | start_v1_agent(Config,Opts)]. + Opts = [MasterAgentVerbosity, NetIfVerbosity], + [{vsn, v1} | start_v1_agent(Config, Opts)]. -otp_4394_config(AgentDir, MgrDir, Ip0) -> +otp_4394_config(AgentConfDir, MgrDir, Ip0) -> ?DBG("otp_4394_config -> entry with" - "~n AgentDir: ~p" - "~n MgrDir: ~p" - "~n Ip0: ~p", [AgentDir, MgrDir, Ip0]), + "~n AgentConfDir: ~p" + "~n MgrDir: ~p" + "~n Ip0: ~p", [AgentConfDir, MgrDir, Ip0]), Vsn = [v1], Ip = tuple_to_list(Ip0), - ?line snmp_config:write_agent_snmp_files(AgentDir, Vsn, Ip, + ?line snmp_config:write_agent_snmp_files(AgentConfDir, Vsn, Ip, ?TRAP_UDP, Ip, 4000, "OTP-4394 test"), - ?line case update_usm(Vsn, AgentDir) of + ?line case update_usm(Vsn, AgentConfDir) of true -> - ?line copy_file(filename:join(AgentDir, "usm.conf"), + ?line copy_file(filename:join(AgentConfDir, "usm.conf"), filename:join(MgrDir, "usm.conf")), ?line update_usm_mgr(Vsn, MgrDir); false -> @@ -5756,8 +5745,8 @@ otp_4394_config(AgentDir, MgrDir, Ip0) -> end, C1 = {"a", "all-rights", "initial", "", "pc"}, C2 = {"c", "secret", "secret_name", "", "secret_tag"}, - ?line write_community_conf(AgentDir, [C1, C2]), - ?line update_vacm(Vsn, AgentDir), + ?line write_community_conf(AgentConfDir, [C1, C2]), + ?line update_vacm(Vsn, AgentConfDir), Ta1 = {"shelob v1", [134,138,177,177], 5000, 1500, 3, %% Anv�nd Ip och modda "pc1", @@ -5771,9 +5760,9 @@ otp_4394_config(AgentDir, MgrDir, Ip0) -> "target_v1", "", %% [255,255,255,255,0,0], [], 2048}, - ?line write_target_addr_conf(AgentDir, [Ta1, Ta2]), - ?line write_target_params_conf(AgentDir, Vsn), - ?line write_notify_conf(AgentDir), + ?line write_target_addr_conf(AgentConfDir, [Ta1, Ta2]), + ?line write_target_params_conf(AgentConfDir, Vsn), + ?line write_notify_conf(AgentConfDir), ok. @@ -5826,10 +5815,11 @@ init_otp_7157(Config) when is_list(Config) -> ?DBG("init_otp_7157 -> entry with" "~n Config: ~p", [Config]), - ?line AgentDir = ?config(agent_dir, Config), + ?line AgentConfDir = ?config(agent_conf_dir, Config), ?line MgrDir = ?config(mgr_dir, Config), ?line Ip = ?config(ip, Config), - ?line config([v2], MgrDir, AgentDir, tuple_to_list(Ip), tuple_to_list(Ip)), + ?line config([v2], MgrDir, AgentConfDir, + tuple_to_list(Ip), tuple_to_list(Ip)), MasterAgentVerbosity = {master_agent_verbosity, trace}, NetIfVerbosity = {net_if_verbosity, trace}, Opts = [MasterAgentVerbosity, NetIfVerbosity], @@ -6562,8 +6552,8 @@ write_community_conf(Dir, Conf) -> write_target_addr_conf(Dir, Conf) -> snmp_agent_test_lib:write_target_addr_conf(Dir, Conf). -write_target_addr_conf(Dir, ManagerIp, UDP, Vsns) -> - snmp_agent_test_lib:write_target_addr_conf(Dir, ManagerIp, UDP, Vsns). +%% write_target_addr_conf(Dir, ManagerIp, UDP, Vsns) -> +%% snmp_agent_test_lib:write_target_addr_conf(Dir, ManagerIp, UDP, Vsns). rewrite_target_addr_conf(Dir, NewPort) -> snmp_agent_test_lib:rewrite_target_addr_conf(Dir, NewPort). @@ -6596,9 +6586,42 @@ copy_file(From, To) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +display_log(Config) -> + case lists:keysearch(agent_log_dir, 1, Config) of + {value, {_, Dir}} -> + case lists:keysearch(snmp_master, 1, Config) of + {value, {_, Node}} -> + LogDir = Dir, + Mibs = [], + OutFile = filename:join(LogDir, "snmpa_log.txt"), + p("~n" + "=========================" + " < Audit Trail Log > " + "=========================" + "~n"), + rcall(Node, snmpa, log_to_txt, [LogDir, Mibs, OutFile]), + rcall(Node, snmpa, log_to_io, [LogDir, Mibs]), + p("~n" + "=========================" + " < / Audit Trail Log > " + "=========================" + "~n", []); + false -> + p("display_log -> no agent node found"), + ok + end; + false -> + p("display_log -> no agent log dir found: " + "~n ~p", [Config]), + ok + end. + + +%% ------ + display_memory_usage() -> Info = snmpa:info(snmp_master_agent), - AMU = display_agent_memory_uasge(Info), + AMU = display_agent_memory_usage(Info), NIMU = display_net_if_memory_usage(Info), NSMU = display_note_store_memory_usage(Info), SSMU = display_symbolic_store_memory_usage(Info), @@ -6608,7 +6631,7 @@ display_memory_usage() -> AMU ++ NIMU ++ NSMU ++ SSMU ++ LDBMU ++ MSMU, []), ok. -display_agent_memory_uasge(Info) -> +display_agent_memory_usage(Info) -> AgentInfo = lists_key1search(agent, Info), ProcMem = lists_key1search([process_memory,master_agent], AgentInfo), @@ -6717,3 +6740,27 @@ lists_key1search(Key, List) when is_atom(Key) -> regs() -> lists:sort(registered()). + +%% ------ + +rcall(Node, Mod, Func, Args) -> + case rpc:call(Node, Mod, Func, Args) of + {badrpc, nodedown} -> + ?FAIL({rpc_failure, Node}); + Else -> + Else + end. + + +%% ------ + +p(F) -> + p(F, []). + +p(F, A) -> + io:format("*** [~s] ***" + "~n" ++ F ++ "~n", [formated_timestamp()|A]). + +formated_timestamp() -> + snmp_test_lib:formated_timestamp(). + diff --git a/lib/snmp/test/snmp_agent_test_lib.erl b/lib/snmp/test/snmp_agent_test_lib.erl index 084b3ee8da..cd9a87eab8 100644 --- a/lib/snmp/test/snmp_agent_test_lib.erl +++ b/lib/snmp/test/snmp_agent_test_lib.erl @@ -119,20 +119,10 @@ %%% didn't undo (since it failed). %%%----------------------------------------------------------------- -init_all(Config0) when is_list(Config0) -> - ?LOG("init_all -> entry with" - "~n Config0: ~p",[Config0]), - - %% -- - %% Fix config: - %% +init_all(Config) when is_list(Config) -> - DataDir0 = ?config(data_dir, Config0), - DataDir1 = filename:split(filename:absname(DataDir0)), - [_|DataDir2] = lists:reverse(DataDir1), - DataDir3 = filename:join(lists:reverse(DataDir2) ++ [?snmp_test_data]), - Config1 = lists:keydelete(data_dir, 1, Config0), - Config = [{data_dir, DataDir3 ++ "/"}|Config1], + ?LOG("init_all -> entry with" + "~n Config: ~p",[Config]), %% -- %% Start nodes @@ -143,34 +133,43 @@ init_all(Config0) when is_list(Config0) -> %% -- - %% Create necessary files + %% Create necessary files ( and dirs ) %% - PrivDir = ?config(priv_dir, Config), - ?DBG("init_all -> PrivDir ~p", [PrivDir]), + SuiteTopDir = ?config(snmp_suite_top_dir, Config), + ?DBG("init_all -> SuiteTopDir ~p", [SuiteTopDir]), - TopDir = filename:join(PrivDir, snmp_agent_test), - case file:make_dir(TopDir) of - ok -> - ok; - {error, eexist} -> - ok; - Error -> - ?FAIL({failed_creating_subsuite_top_dir, Error}) - end, + AgentDir = filename:join(SuiteTopDir, "agent/"), + ?line ok = file:make_dir(AgentDir), + ?DBG("init_all -> AgentDir ~p", [AgentDir]), - DataDir = ?config(data_dir, Config), - ?DBG("init_all -> DataDir ~p", [DataDir]), + AgentDbDir = filename:join(AgentDir, "db/"), + ?line ok = file:make_dir(AgentDbDir), + ?DBG("init_all -> AgentDbDir ~p", [AgentDbDir]), - ?line ok = file:make_dir(MgrDir = filename:join(TopDir, "mgr_dir/")), - ?DBG("init_all -> MgrDir ~p", [MgrDir]), + AgentLogDir = filename:join(AgentDir, "log/"), + ?line ok = file:make_dir(AgentLogDir), + ?DBG("init_all -> AgentLogDir ~p", [AgentLogDir]), - ?line ok = file:make_dir(AgentDir = filename:join(TopDir, "agent_dir/")), - ?DBG("init_all -> AgentDir ~p", [AgentDir]), + AgentConfDir = filename:join(AgentDir, "conf/"), + ?line ok = file:make_dir(AgentConfDir), + ?DBG("init_all -> AgentConfDir ~p", [AgentConfDir]), - ?line ok = file:make_dir(SaDir = filename:join(TopDir, "sa_dir/")), + MgrDir = filename:join(SuiteTopDir, "mgr/"), + ?line ok = file:make_dir(MgrDir), + ?DBG("init_all -> MgrDir ~p", [MgrDir]), + + SaDir = filename:join(SuiteTopDir, "sa/"), + ?line ok = file:make_dir(SaDir), ?DBG("init_all -> SaDir ~p", [SaDir]), + SaDbDir = filename:join(SaDir, "db/"), + ?line ok = file:make_dir(SaDbDir), + ?DBG("init_all -> SaDbDir ~p", [SaDbDir]), + + %% MibDir = ?config(mib_dir, Config), + %% ?DBG("init_all -> MibDir ~p", [DataDir]), + %% -- %% Start and initiate mnesia @@ -184,11 +183,11 @@ init_all(Config0) when is_list(Config0) -> ?DBG("init_all -> application mnesia: set_env dir",[]), ?line application_controller:set_env(mnesia, dir, - filename:join(TopDir, "Mnesia1")), + filename:join(AgentDbDir, "Mnesia1")), ?DBG("init_all -> application mnesia: set_env dir on node ~p",[SaNode]), - ?line rpc:call(SaNode, application_controller, set_env, - [mnesia, dir, filename:join(TopDir, "Mnesia2")]), + ?line rpc:call(SaNode, application_controller, set_env, + [mnesia, dir, filename:join(SaDir, "Mnesia2")]), ?DBG("init_all -> create mnesia schema",[]), ?line ok = mnesia:create_schema([SaNode, node()]), @@ -199,13 +198,18 @@ init_all(Config0) when is_list(Config0) -> ?DBG("init_all -> start application mnesia on ~p",[SaNode]), ?line ok = rpc:call(SaNode, application, start, [mnesia]), Ip = ?LOCALHOST(), - [{snmp_sa, SaNode}, - {snmp_mgr, MgrNode}, - {agent_dir, AgentDir ++ "/"}, - {mgr_dir, MgrDir ++ "/"}, - {sa_dir, SaDir ++ "/"}, - {mib_dir, DataDir}, - {ip, Ip} | + [{snmp_sa, SaNode}, + {snmp_mgr, MgrNode}, + {snmp_master, node()}, + {agent_dir, AgentDir ++ "/"}, + {agent_db_dir, AgentDbDir ++ "/"}, + {agent_log_dir, AgentLogDir ++ "/"}, + {agent_conf_dir, AgentConfDir ++ "/"}, + {sa_dir, SaDir ++ "/"}, + {sa_db_dir, SaDbDir ++ "/"}, + {mgr_dir, MgrDir ++ "/"}, + %% {mib_dir, DataDir}, + {ip, Ip} | Config]. @@ -220,11 +224,14 @@ finish_all(Config) when is_list(Config) -> %% --- This one *must* be run first in each case --- init_case(Config) when is_list(Config) -> + ?DBG("init_case -> entry with" - "~n Config: ~p", [Config]), - SaNode = ?config(snmp_sa, Config), - MgrNode = ?config(snmp_mgr, Config), - MasterNode = node(), + "~n Config: ~p", [Config]), + + SaNode = ?config(snmp_sa, Config), + MgrNode = ?config(snmp_mgr, Config), + MasterNode = ?config(snmp_master, Config), + %% MasterNode = node(), SaHost = ?HOSTNAME(SaNode), MgrHost = ?HOSTNAME(MgrNode), @@ -411,7 +418,8 @@ start_v3_agent(Config, Opts) when is_list(Config) andalso is_list(Opts) -> start_bilingual_agent(Config) when is_list(Config) -> start_agent(Config, [v1,v2]). -start_bilingual_agent(Config, Opts) when is_list(Config) andalso is_list(Opts) -> +start_bilingual_agent(Config, Opts) + when is_list(Config) andalso is_list(Opts) -> start_agent(Config, [v1,v2], Opts). start_mt_agent(Config) when is_list(Config) -> @@ -423,57 +431,33 @@ start_mt_agent(Config, Opts) when is_list(Config) andalso is_list(Opts) -> start_agent(Config, Vsns) -> start_agent(Config, Vsns, []). start_agent(Config, Vsns, Opts) -> + ?LOG("start_agent -> entry (~p) with" "~n Config: ~p" "~n Vsns: ~p" "~n Opts: ~p", [node(), Config, Vsns, Opts]), - ?line AgentDir = ?config(agent_dir, Config), - ?line SaNode = ?config(snmp_sa, Config), - -%% AgentConfig = -%% [{agent_type, master}, -%% %% {multi_threaded, MultiT}, -%% %% {priority, Prio}, -%% %% {error_report_mod, ErrorReportMod}, -%% {versions, Vsns}, -%% {db_dir, AgentDir}, -%% %% {db_init_error, DbInitError}, -%% %% {set_mechanism, SetModule}, -%% %% {authentication_service, AuthModule}, -%% {audit_trail_log, [{type, read_write}, -%% {dir, AgentDir}, -%% {size, {10240, 10}}, -%% {repair, true}]}, -%% {config, [{verbosity, info}, -%% {dir, AgentDir}, -%% {force_load, false}]}, -%% {mibs, Mibs}, -%% %% {mib_storage, MibStorage}, -%% {local_db, []}, -%% {mib_server, []}, -%% {symbolic_store, []}, -%% {note_store, []}, -%% {net_if, []}, -%% %% {supervisor, SupOpts} -%% ], - + ?line AgentLogDir = ?config(agent_log_dir, Config), + ?line AgentConfDir = ?config(agent_conf_dir, Config), + ?line AgentDbDir = ?config(agent_db_dir, Config), + ?line SaNode = ?config(snmp_sa, Config), + app_env_init(vsn_init(Vsns) ++ - [{audit_trail_log, read_write_log}, - {audit_trail_log_dir, AgentDir}, - {audit_trail_log_size, {10240, 10}}, - {force_config_reload, false}, - {snmp_agent_type, master}, - {snmp_config_dir, AgentDir}, - {snmp_db_dir, AgentDir}, - {snmp_local_db_auto_repair, true}, - {snmp_local_db_verbosity, log}, - {snmp_master_agent_verbosity, trace}, - {snmp_supervisor_verbosity, trace}, - {snmp_mibserver_verbosity, log}, + [{audit_trail_log, read_write_log}, + {audit_trail_log_dir, AgentLogDir}, + {audit_trail_log_size, {10240, 10}}, + {force_config_reload, false}, + {snmp_agent_type, master}, + {snmp_config_dir, AgentConfDir}, + {snmp_db_dir, AgentDbDir}, + {snmp_local_db_auto_repair, true}, + {snmp_local_db_verbosity, log}, + {snmp_master_agent_verbosity, trace}, + {snmp_supervisor_verbosity, trace}, + {snmp_mibserver_verbosity, log}, {snmp_symbolic_store_verbosity, log}, - {snmp_note_store_verbosity, log}, - {snmp_net_if_verbosity, trace}], + {snmp_note_store_verbosity, log}, + {snmp_net_if_verbosity, trace}], Opts), @@ -1237,30 +1221,44 @@ stop_node(Node) -> %%% Configuration %%%----------------------------------------------------------------- -config(Vsns, MgrDir, AgentDir, MIp, AIp) -> - ?line snmp_config:write_agent_snmp_files(AgentDir, Vsns, MIp, - ?TRAP_UDP, AIp, 4000, +config(Vsns, MgrDir, AgentConfDir, MIp, AIp) -> + ?LOG("config -> entry with" + "~n Vsns: ~p" + "~n MgrDir: ~p" + "~n AgentConfDir: ~p" + "~n MIp: ~p" + "~n AIp: ~p", + [Vsns, MgrDir, AgentConfDir, MIp, AIp]), + ?line snmp_config:write_agent_snmp_files(AgentConfDir, Vsns, + MIp, ?TRAP_UDP, AIp, 4000, "test"), - ?line case update_usm(Vsns, AgentDir) of + ?line case update_usm(Vsns, AgentConfDir) of true -> - ?line copy_file(filename:join(AgentDir, "usm.conf"), + ?line copy_file(filename:join(AgentConfDir, "usm.conf"), filename:join(MgrDir, "usm.conf")), ?line update_usm_mgr(Vsns, MgrDir); false -> ?line ok end, - ?line update_community(Vsns, AgentDir), - ?line update_vacm(Vsns, AgentDir), - ?line write_target_addr_conf(AgentDir, MIp, ?TRAP_UDP, Vsns), - ?line write_target_params_conf(AgentDir, Vsns), - ?line write_notify_conf(AgentDir), + ?line update_community(Vsns, AgentConfDir), + ?line update_vacm(Vsns, AgentConfDir), + ?line write_target_addr_conf(AgentConfDir, MIp, ?TRAP_UDP, Vsns), + ?line write_target_params_conf(AgentConfDir, Vsns), + ?line write_notify_conf(AgentConfDir), ok. delete_files(Config) -> - Dir = ?config(agent_dir, Config), - {ok, List} = file:list_dir(Dir), + AgentDir = ?config(agent_dir, Config), + delete_files(AgentDir, [db, conf]). + +delete_files(_AgentFiles, []) -> + ok; +delete_files(AgentDir, [DirName|DirNames]) -> + Dir = filename:join(AgentDir, DirName), + {ok, Files} = file:list_dir(Dir), lists:foreach(fun(FName) -> file:delete(filename:join(Dir, FName)) end, - List). + Files), + delete_files(AgentDir, DirNames). update_usm(Vsns, Dir) -> case lists:member(v3, Vsns) of @@ -1418,8 +1416,8 @@ rewrite_target_addr_conf2(_NewPort,O) -> O. reset_target_addr_conf(Dir) -> - ?line ok = file:rename(filename:join(Dir,"target_addr.old"), - filename:join(Dir,"target_addr.conf")). + ?line ok = file:rename(filename:join(Dir, "target_addr.old"), + filename:join(Dir, "target_addr.conf")). write_target_params_conf(Dir, Vsns) -> F = fun(v1) -> {"target_v1", v1, v1, "all-rights", noAuthNoPriv}; @@ -1458,7 +1456,6 @@ copy_file(From, To) -> ok = file:write_file(To, Bin). - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% display_memory_usage() -> diff --git a/lib/snmp/test/snmp_app_test.erl b/lib/snmp/test/snmp_app_test.erl index bc62c8d530..0a320dbbce 100644 --- a/lib/snmp/test/snmp_app_test.erl +++ b/lib/snmp/test/snmp_app_test.erl @@ -23,8 +23,9 @@ -module(snmp_app_test). -export([ - all/0,groups/0,init_per_group/2,end_per_group/2, init_per_suite/1, - end_per_suite/1, + all/0, groups/0, + init_per_group/2, end_per_group/2, + init_per_suite/1, end_per_suite/1, init_per_testcase/2, end_per_testcase/2, fields/1, @@ -52,33 +53,47 @@ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% all() -> -Cases = [fields, modules, exportall, app_depend, - undef_funcs, {group, start_and_stop}], - Cases. + Cases = + [ + fields, + modules, + exportall, + app_depend, + undef_funcs, + {group, start_and_stop} + ], + Cases. groups() -> [{start_and_stop, [], - [start_and_stop_empty, start_and_stop_with_agent, - start_and_stop_with_manager, - start_and_stop_with_agent_and_manager, - start_epmty_and_then_agent_and_manager_and_stop, - start_with_agent_and_then_manager_and_stop, - start_with_manager_and_then_agent_and_stop]}]. + [start_and_stop_empty, + start_and_stop_with_agent, + start_and_stop_with_manager, + start_and_stop_with_agent_and_manager, + start_epmty_and_then_agent_and_manager_and_stop, + start_with_agent_and_then_manager_and_stop, + start_with_manager_and_then_agent_and_stop]}]. init_per_group(_GroupName, Config) -> - Config. + Config. end_per_group(_GroupName, Config) -> - Config. + Config. init_per_suite(Config) when is_list(Config) -> ?DISPLAY_SUITE_INFO(), + + %% Note that part of this stuff (the suite top dir creation) + %% may already be done (if we run the entire snmp suite). + PrivDir = ?config(priv_dir, Config), TopDir = filename:join(PrivDir, app), case file:make_dir(TopDir) of ok -> ok; + {error, eexist} -> + ok; Error -> fail({failed_creating_subsuite_top_dir, Error}) end, diff --git a/lib/snmp/test/snmp_compiler_test.erl b/lib/snmp/test/snmp_compiler_test.erl index 0a147130b0..257fc47952 100644 --- a/lib/snmp/test/snmp_compiler_test.erl +++ b/lib/snmp/test/snmp_compiler_test.erl @@ -40,6 +40,7 @@ all/0, groups/0, init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2, + init_per_suite/1, end_per_suite/1, description/1, oid_conflicts/1, @@ -48,6 +49,7 @@ agent_capabilities/1, module_compliance/1, warnings_as_errors/1, + augments_extra_info/1, otp_6150/1, otp_8574/1, @@ -58,6 +60,7 @@ %%---------------------------------------------------------------------- %% Internal exports %%---------------------------------------------------------------------- + -export([ ]). @@ -74,19 +77,41 @@ %% External functions %%====================================================================== -init_per_testcase(_Case, Config) when is_list(Config) -> - Dir = ?config(priv_dir, Config), - DataDir = ?config(data_dir, Config), - [_|RL] = lists:reverse(filename:split(DataDir)), - MibDir = join(lists:reverse(["snmp_test_data"|RL])), - CompDir = join(Dir, "comp_dir/"), - ?line ok = file:make_dir(CompDir), - [{comp_dir, CompDir}, {mib_dir, MibDir} | Config]. +init_per_suite(Config0) when is_list(Config0) -> + + ?DBG("init_per_suite -> entry with" + "~n Config0: ~p", [Config0]), + + Config1 = snmp_test_lib:init_suite_top_dir(?MODULE, Config0), + Config2 = snmp_test_lib:fix_data_dir(Config1), + + %% Mib-dirs + %% data_dir is trashed by the test-server / common-test + %% so there is no point in fixing it... + MibDir = snmp_test_lib:lookup(data_dir, Config2), + StdMibDir = filename:join([code:priv_dir(snmp), "mibs"]), + + [{mib_dir, MibDir}, {std_mib_dir, StdMibDir} | Config2]. + +end_per_suite(Config) when is_list(Config) -> + + ?DBG("end_per_suite -> entry with" + "~n Config: ~p", [Config]), + + Config. + + +init_per_testcase(Case, Config) when is_list(Config) -> + + ?DBG("init_per_testcase -> entry with" + "~n Config: ~p", [Config]), + + CaseTopDir = snmp_test_lib:init_testcase_top_dir(Case, Config), + + [{case_top_dir, CaseTopDir} | Config]. end_per_testcase(_Case, Config) when is_list(Config) -> - CompDir = ?config(comp_dir, Config), - ?line ok = ?DEL_DIR(CompDir), - lists:keydelete(comp_dir, 1, Config). + Config. %%====================================================================== @@ -102,6 +127,7 @@ all() -> agent_capabilities, module_compliance, warnings_as_errors, + augments_extra_info, {group, tickets} ]. @@ -126,7 +152,7 @@ description(Config) when is_list(Config) -> put(tname,desc), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), Filename = join(Dir,"test"), MibSrcName = Filename ++ ".mib", MibBinName = Filename ++ ".bin", @@ -161,7 +187,7 @@ oid_conflicts(Config) when is_list(Config) -> put(tname,oid_conflicts), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), Mib = join(Dir,"TESTv2.mib"), ?line ok = write_oid_conflict_mib(Mib), ?line {error,compilation_failed} = @@ -278,7 +304,7 @@ warnings_as_errors(suite) -> warnings_as_errors(Config) when is_list(Config) -> put(tname,warnings_as_errors), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), MibDir = ?config(mib_dir, Config), MibFile = join(MibDir, "OTP8574-MIB.mib"), OutFile = join(Dir, "OTP8574-MIB.bin"), @@ -303,7 +329,7 @@ otp_6150(Config) when is_list(Config) -> put(tname,otp_6150), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), MibDir = ?config(mib_dir, Config), MibFile = join(MibDir, "ERICSSON-TOP-MIB.mib"), ?line {ok, Mib} = snmpc:compile(MibFile, [{outdir, Dir}, {verbosity, trace}]), @@ -319,7 +345,7 @@ otp_8574(Config) when is_list(Config) -> put(tname,otp_8574), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), MibDir = ?config(mib_dir, Config), MibFile = join(MibDir, "OTP8574-MIB.mib"), @@ -352,7 +378,7 @@ otp_8595(Config) when is_list(Config) -> put(tname,otp_8595), p("starting with Config: ~p~n", [Config]), - Dir = ?config(comp_dir, Config), + Dir = ?config(case_top_dir, Config), MibDir = ?config(mib_dir, Config), MibFile = join(MibDir, "OTP8595-MIB.mib"), ?line {ok, Mib} = @@ -364,6 +390,47 @@ otp_8595(Config) when is_list(Config) -> %%====================================================================== + +augments_extra_info(suite) -> + []; +augments_extra_info(Config) when is_list(Config) -> + put(tname, augments_extra_info), + p("starting with Config: ~p~n", [Config]), + + Dir = ?config(case_top_dir, Config), + MibDir = ?config(mib_dir, Config), + Test2File = join(MibDir, "Test2.mib"), + Test3File = join(MibDir, "Test3.mib"), + ?line {ok, Test2BinFile} = + snmpc:compile(Test2File, [{outdir, Dir}, + {verbosity, silence}, + {group_check, false}]), + io:format("Test2BinFile: ~n~p~n", [Test2BinFile]), + ?line {ok, Test3BinFile} = + snmpc:compile(Test3File, [{i, [MibDir]}, + {outdir, Dir}, + {verbosity, silence}, + {group_check, true}]), + io:format("Test3BinFile: ~n~p~n", [Test3BinFile]), + {ok, Test3Mib} = snmp_misc:read_mib(Test3BinFile), + io:format("Test3Mib: ~n~p~n", [Test3Mib]), + %% There is only one table in this mib + #mib{table_infos = [{TableName, TI}]} = Test3Mib, + io:format("TableName: ~p" + "~n Table Info: ~p" + "~n", [TableName, TI]), + #table_info{nbr_of_cols = 4, + defvals = DefVals, + not_accessible = [2,4], + index_types = {augments, {tEntry, undefined}}, + first_accessible = 1} = TI, + io:format("Table info: ~p" + "~n DefVals: ~p" + "~n", [TableName, DefVals]), + ok. + + +%%====================================================================== %% Internal functions %%====================================================================== @@ -517,11 +584,11 @@ DESCRIPTION \"" ++ Desc ++ "\" ::= { test 1 } END", - Message = file:write_file(Filename ,Binary), + Message = file:write_file(Filename, Binary), case Message of ok -> ok; {error, Reason} -> - exit({failed_writing_mib,Reason}) + exit({failed_writing_mib, Reason}) end. @@ -546,8 +613,8 @@ check_desc(Desc1, Desc2) -> exit({'description not equal', Desc1, Desc2}). -join(Comp) -> - filename:join(Comp). +%% join(Comp) -> +%% filename:join(Comp). join(A,B) -> filename:join(A,B). diff --git a/lib/snmp/test/snmp_log_test.erl b/lib/snmp/test/snmp_log_test.erl index b692017407..8478825f59 100644 --- a/lib/snmp/test/snmp_log_test.erl +++ b/lib/snmp/test/snmp_log_test.erl @@ -42,7 +42,11 @@ -export([ init_per_testcase/2, end_per_testcase/2, - all/0,groups/0,init_per_group/2,end_per_group/2, + all/0, + groups/0, + init_per_group/2, + end_per_group/2, + open_and_close/1, open_write_and_close1/1, @@ -109,16 +113,21 @@ end_per_testcase(_Case, Config) when is_list(Config) -> %%====================================================================== %% ?SKIP(not_yet_implemented). all() -> -[open_and_close, {group, open_write_and_close}, - {group, log_to_io}, {group, log_to_txt}]. + [ + open_and_close, + {group, open_write_and_close}, + {group, log_to_io}, + {group, log_to_txt}]. groups() -> - [{open_write_and_close, [], - [open_write_and_close1, open_write_and_close2, - open_write_and_close3, open_write_and_close4]}, - {log_to_io, [], [log_to_io1, log_to_io2]}, - {log_to_txt, [], - [log_to_txt1, log_to_txt2, log_to_txt3]}]. + [ + {open_write_and_close, [], + [open_write_and_close1, open_write_and_close2, + open_write_and_close3, open_write_and_close4]}, + {log_to_io, [], [log_to_io1, log_to_io2]}, + {log_to_txt, [], + [log_to_txt1, log_to_txt2, log_to_txt3]} + ]. init_per_group(_GroupName, Config) -> Config. diff --git a/lib/snmp/test/snmp_manager_test.erl b/lib/snmp/test/snmp_manager_test.erl index 75c9f7b277..c374a2f0a6 100644 --- a/lib/snmp/test/snmp_manager_test.erl +++ b/lib/snmp/test/snmp_manager_test.erl @@ -49,6 +49,8 @@ init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2, + init_per_suite/1, end_per_suite/1, + simple_start_and_stop1/1, simple_start_and_stop2/1, @@ -146,36 +148,43 @@ %% External functions %%====================================================================== +init_per_suite(Config0) when is_list(Config0) -> + + ?DBG("init_per_suite -> entry with" + "~n Config0: ~p", [Config0]), + + Config1 = snmp_test_lib:init_suite_top_dir(?MODULE, Config0), + Config2 = snmp_test_lib:fix_data_dir(Config1), + + %% Mib-dirs + %% data_dir is trashed by the test-server / common-test + %% so there is no point in fixing it... + MibDir = snmp_test_lib:lookup(data_dir, Config2), + StdMibDir = filename:join([code:priv_dir(snmp), "mibs"]), + + [{mib_dir, MibDir}, {std_mib_dir, StdMibDir} | Config2]. + +end_per_suite(Config) when is_list(Config) -> + + ?DBG("end_per_suite -> entry with" + "~n Config: ~p", [Config]), + + Config. + + init_per_testcase(Case, Config) when is_list(Config) -> io:format(user, "~n~n*** INIT ~w:~w ***~n~n", [?MODULE,Case]), init_per_testcase2(Case, Config). init_per_testcase2(Case, Config) -> - ?DBG("init [~w] Nodes [1]: ~p", [Case, erlang:nodes()]), - - %% Fix a correct data dir (points to the wrong location): - DataDir0 = ?config(data_dir, Config), - DataDir1 = filename:split(filename:absname(DataDir0)), - [_|DataDir2] = lists:reverse(DataDir1), - DataDir = filename:join(lists:reverse(DataDir2) ++ [?snmp_test_data]), + ?DBG("init_per_testcase2 -> ~p", [erlang:nodes()]), - PrivDir = ?config(priv_dir, Config), + CaseTopDir = snmp_test_lib:init_testcase_top_dir(Case, Config), - TopDir = filename:join(PrivDir, ?MODULE), - case file:make_dir(TopDir) of - ok -> - ok; - {error, eexist} -> - ok; - Error -> - ?FAIL({failed_creating_subsuite_top_dir, Error}) - end, - - CaseTopDir = filename:join(TopDir, Case), - ?line ok = file:make_dir(CaseTopDir), - %% -- Manager dirs -- - MgrTopDir = filename:join(CaseTopDir, "manager/"), + MgrTopDir = filename:join(CaseTopDir, "manager/"), + ?DBG("init_per_testcase2 -> try create manager top dir: ~n~p", + [MgrTopDir]), ?line ok = file:make_dir(MgrTopDir), MgrConfDir = filename:join(MgrTopDir, "conf/"), @@ -200,10 +209,9 @@ init_per_testcase2(Case, Config) -> AgLogDir = filename:join(AgTopDir, "log/"), ?line ok = file:make_dir(AgLogDir), - Conf = [{snmp_data_dir, DataDir}, - {watchdog, ?WD_START(?MINS(5))}, + Conf = [{watchdog, ?WD_START(?MINS(5))}, {ip, ?LOCALHOST()}, - {top_dir, TopDir}, + {case_top_dir, CaseTopDir}, {agent_dir, AgTopDir}, {agent_conf_dir, AgConfDir}, {agent_db_dir, AgDbDir}, @@ -388,9 +396,11 @@ all() -> {group, user_tests}, {group, agent_tests}, {group, request_tests}, + {group, request_tests_mt}, {group, event_tests}, + {group, event_tests_mt}, discovery, - {group, tickets} + {group, tickets} ]. groups() -> @@ -431,6 +441,15 @@ groups() -> {group, misc_request_tests} ] }, + {request_tests_mt, [], + [ + {group, get_tests}, + {group, get_next_tests}, + {group, set_tests}, + {group, bulk_tests}, + {group, misc_request_tests} + ] + }, {get_tests, [], [ simple_sync_get1, @@ -477,18 +496,30 @@ groups() -> misc_async2 ] }, - {event_tests, [], - [ - trap1%% , - %% trap2, - %% inform1, - %% inform2, - %% inform3, - %% inform4, - %% inform_swarm, - %% report - ] - }, + {event_tests, [], + [ + trap1, + trap2, + inform1, + inform2, + inform3, + inform4, + inform_swarm, + report + ] + }, + {event_tests_mt, [], + [ + trap1, + trap2, + inform1, + inform2, + inform3, + inform4, + inform_swarm, + report + ] + }, {tickets, [], [ {group, otp8015}, @@ -507,11 +538,21 @@ groups() -> } ]. -init_per_group(_GroupName, Config) -> - Config. +init_per_group(request_tests_mt = GroupName, Config) -> + snmp_test_lib:init_group_top_dir( + GroupName, + [{manager_net_if_module, snmpm_net_if_mt} | Config]); +init_per_group(event_tests_mt = GroupName, Config) -> + snmp_test_lib:init_group_top_dir( + GroupName, + [{manager_net_if_module, snmpm_net_if_mt} | Config]); +init_per_group(GroupName, Config) -> + snmp_test_lib:init_group_top_dir(GroupName, Config). + end_per_group(_GroupName, Config) -> - Config. + %% Do we really need to do this? + lists:keydelete(snmp_group_top_dir, 1, Config). %%====================================================================== @@ -1571,6 +1612,11 @@ simple_sync_get1(Config) when is_list(Config) -> ?line ok = mgr_user_load_mib(Node, std_mib()), Oids2 = [[sysObjectID, 0], [sysDescr, 0], [sysUpTime, 0]], ?line ok = do_simple_sync_get(Node, Addr, Port, Oids2), + + p("Display log"), + display_log(Config), + + p("done"), ok. do_simple_sync_get(Node, Addr, Port, Oids) -> @@ -1578,7 +1624,7 @@ do_simple_sync_get(Node, Addr, Port, Oids) -> ?DBG("~n Reply: ~p" "~n Rem: ~w", [Reply, Rem]), - + %% verify that the operation actually worked: %% The order should be the same, so no need to seach ?line ok = case Reply of @@ -1612,7 +1658,9 @@ simple_sync_get2(suite) -> []; simple_sync_get2(Config) when is_list(Config) -> process_flag(trap_exit, true), put(tname, ssg2), - do_simple_sync_get2(Config). + do_simple_sync_get2(Config), + display_log(Config), + ok. do_simple_sync_get2(Config) -> Get = fun(Node, TargetName, Oids) -> @@ -1677,7 +1725,9 @@ simple_sync_get3(suite) -> []; simple_sync_get3(Config) when is_list(Config) -> process_flag(trap_exit, true), put(tname, ssg3), - do_simple_sync_get3(Config). + do_simple_sync_get3(Config), + display_log(Config), + ok. do_simple_sync_get3(Config) -> Self = self(), @@ -1703,8 +1753,8 @@ do_simple_sync_get3(Config) -> %%====================================================================== -simple_async_get1(doc) -> ["Simple (async) get-request - " - "Old style (Addr & Port)"]; +simple_async_get1(doc) -> + ["Simple (async) get-request - Old style (Addr & Port)"]; simple_async_get1(suite) -> []; simple_async_get1(Config) when is_list(Config) -> process_flag(trap_exit, true), @@ -1766,6 +1816,7 @@ simple_async_get1(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. async_g_exec1(Node, Addr, Port, Oids) -> @@ -1816,7 +1867,9 @@ simple_async_get2(Config) when is_list(Config) -> TargetName = ?config(manager_agent_target_name, Config), Get = fun(Oids) -> async_g_exec2(MgrNode, TargetName, Oids) end, PostVerify = fun(Res) -> Res end, - do_simple_async_sync_get2(Config, MgrNode, AgentNode, Get, PostVerify). + do_simple_async_sync_get2(Config, MgrNode, AgentNode, Get, PostVerify), + display_log(Config), + ok. do_simple_async_sync_get2(Config, MgrNode, AgentNode, Get, PostVerify) -> ?line ok = mgr_user_load_mib(MgrNode, std_mib()), @@ -1905,7 +1958,9 @@ simple_async_get3(Config) when is_list(Config) -> PostVerify = fun(ok) -> receive Msg -> ok end; (Error) -> Error end, - do_simple_async_sync_get2(Config, MgrNode, AgentNode, Get, PostVerify). + do_simple_async_sync_get2(Config, MgrNode, AgentNode, Get, PostVerify), + display_log(Config), + ok. async_g_exec3(Node, TargetName, Oids, SendOpts) -> mgr_user_async_get2(Node, TargetName, Oids, SendOpts). @@ -1999,6 +2054,8 @@ simple_sync_get_next1(Config) when is_list(Config) -> end, ?line ok = do_simple_get_next(8, MgrNode, Addr, Port, Oids08, VF08), + + display_log(Config), ok. @@ -2062,7 +2119,10 @@ simple_sync_get_next2(Config) when is_list(Config) -> mgr_user_sync_get_next(Node, TargetName, Oids) end, PostVerify = fun(Res) -> Res end, - do_simple_sync_get_next2(Config, GetNext, PostVerify). + do_simple_sync_get_next2(Config, GetNext, PostVerify), + display_log(Config), + ok. + do_simple_sync_get_next2(Config, GetNext, PostVerify) when is_function(GetNext, 3) andalso is_function(PostVerify, 1) -> @@ -2193,7 +2253,9 @@ simple_sync_get_next3(Config) when is_list(Config) -> PostVerify = fun(ok) -> receive Msg -> ok end; (Error) -> Error end, - do_simple_sync_get_next2(Config, GetNext, PostVerify). + do_simple_sync_get_next2(Config, GetNext, PostVerify), + display_log(Config), + ok. %%====================================================================== @@ -2286,6 +2348,7 @@ simple_async_get_next1(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. @@ -2315,7 +2378,9 @@ simple_async_get_next2(Config) when is_list(Config) -> async_gn_exec2(MgrNode, TargetName, Oids) end, PostVerify = fun(Res) -> Res end, - do_simple_async_get_next2(MgrNode, AgentNode, GetNext, PostVerify). + do_simple_async_get_next2(MgrNode, AgentNode, GetNext, PostVerify), + display_log(Config), + ok. do_simple_async_get_next2(MgrNode, AgentNode, GetNext, PostVerify) when is_function(GetNext, 1) andalso is_function(PostVerify, 1) -> @@ -2437,7 +2502,9 @@ simple_async_get_next3(Config) when is_list(Config) -> (Error) -> Error end, - do_simple_async_get_next2(MgrNode, AgentNode, GetNext, PostVerify). + do_simple_async_get_next2(MgrNode, AgentNode, GetNext, PostVerify), + display_log(Config), + ok. async_gn_exec3(Node, TargetName, Oids, SendOpts) -> mgr_user_async_get_next2(Node, TargetName, Oids, SendOpts). @@ -2475,6 +2542,8 @@ simple_sync_set1(Config) when is_list(Config) -> {[sysLocation, 0], Val22} ], ?line ok = do_simple_set1(Node, Addr, Port, VAVs2), + + display_log(Config), ok. do_simple_set1(Node, Addr, Port, VAVs) -> @@ -2527,7 +2596,9 @@ simple_sync_set2(Config) when is_list(Config) -> end, PostVerify = fun() -> ok end, - do_simple_sync_set2(Config, Set, PostVerify). + do_simple_sync_set2(Config, Set, PostVerify), + display_log(Config), + ok. do_simple_sync_set2(Config, Set, PostVerify) when is_function(Set, 3) andalso is_function(PostVerify, 0) -> @@ -2604,7 +2675,9 @@ simple_sync_set3(Config) when is_list(Config) -> end, PostVerify = fun() -> receive Msg -> ok end end, - do_simple_sync_set2(Config, Set, PostVerify). + do_simple_sync_set2(Config, Set, PostVerify), + display_log(Config), + ok. %%====================================================================== @@ -2663,6 +2736,7 @@ simple_async_set1(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. @@ -2724,7 +2798,9 @@ simple_async_set2(Config) when is_list(Config) -> end, PostVerify = fun(Res) -> Res end, - do_simple_async_set2(MgrNode, AgentNode, Set, PostVerify). + do_simple_async_set2(MgrNode, AgentNode, Set, PostVerify), + display_log(Config), + ok. do_simple_async_set2(MgrNode, AgentNode, Set, PostVerify) -> Requests = @@ -2806,7 +2882,9 @@ simple_async_set3(Config) when is_list(Config) -> (Res) -> Res end, - do_simple_async_set2(MgrNode, AgentNode, Set, PostVerify). + do_simple_async_set2(MgrNode, AgentNode, Set, PostVerify), + display_log(Config), + ok. async_s_exec3(Node, TargetName, VAVs, SendOpts) -> mgr_user_async_set2(Node, TargetName, VAVs, SendOpts). @@ -2932,6 +3010,7 @@ simple_sync_get_bulk1(Config) when is_list(Config) -> 0, 2, [[TCnt2, 1]], VF11), + display_log(Config), ok. fl(L) -> @@ -3004,7 +3083,9 @@ simple_sync_get_bulk2(Config) when is_list(Config) -> end, PostVerify = fun(Res) -> Res end, - do_simple_sync_get_bulk2(Config, MgrNode, AgentNode, GetBulk, PostVerify). + do_simple_sync_get_bulk2(Config, MgrNode, AgentNode, GetBulk, PostVerify), + display_log(Config), + ok. do_simple_sync_get_bulk2(Config, MgrNode, AgentNode, GetBulk, PostVerify) -> %% -- 1 -- @@ -3169,7 +3250,9 @@ simple_sync_get_bulk3(Config) when is_list(Config) -> (Res) -> Res end, - do_simple_sync_get_bulk2(Config, MgrNode, AgentNode, GetBulk, PostVerify). + do_simple_sync_get_bulk2(Config, MgrNode, AgentNode, GetBulk, PostVerify), + display_log(Config), + ok. %%====================================================================== @@ -3308,6 +3391,7 @@ simple_async_get_bulk1(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. @@ -3340,7 +3424,9 @@ simple_async_get_bulk2(Config) when is_list(Config) -> end, PostVerify = fun(Res) -> Res end, - do_simple_async_get_bulk2(MgrNode, AgentNode, GetBulk, PostVerify). + do_simple_async_get_bulk2(MgrNode, AgentNode, GetBulk, PostVerify), + display_log(Config), + ok. do_simple_async_get_bulk2(MgrNode, AgentNode, GetBulk, PostVerify) -> %% We re-use the verification functions from the ssgb test-case @@ -3505,7 +3591,9 @@ simple_async_get_bulk3(Config) when is_list(Config) -> (Res) -> Res end, - do_simple_async_get_bulk2(MgrNode, AgentNode, GetBulk, PostVerify). + do_simple_async_get_bulk2(MgrNode, AgentNode, GetBulk, PostVerify), + display_log(Config), + ok. async_gb_exec3(Node, TargetName, {NR, MR, Oids}, SendOpts) -> mgr_user_async_get_bulk2(Node, TargetName, NR, MR, Oids, SendOpts). @@ -3697,6 +3785,7 @@ misc_async1(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. @@ -3885,6 +3974,7 @@ misc_async2(Config) when is_list(Config) -> p("manager info when ending test: ~n~p", [mgr_info(MgrNode)]), p("agent info when ending test: ~n~p", [agent_info(AgentNode)]), + display_log(Config), ok. @@ -4099,7 +4189,9 @@ trap1(Config) when is_list(Config) -> {5, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4290,7 +4382,9 @@ trap2(Config) when is_list(Config) -> {7, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4415,7 +4509,9 @@ inform1(Config) when is_list(Config) -> {6, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4423,7 +4519,7 @@ inform1(Config) when is_list(Config) -> inform2(suite) -> []; inform2(Config) when is_list(Config) -> process_flag(trap_exit, true), - put(tname,i2), + put(tname, i2), p("starting with Config: ~p~n", [Config]), MgrNode = ?config(manager_node, Config), @@ -4577,7 +4673,7 @@ inform2(Config) when is_list(Config) -> [ {1, "Manager and agent info at start of test", Cmd1}, {2, "Send notifcation [no receiver] from agent", Cmd2}, - {3, "await inform-sent acknowledge from agent", Cmd3}, + {3, "Await inform-sent acknowledge from agent", Cmd3}, {4, "Await first inform to manager - do not reply", Cmd4}, {5, "Await second inform to manager - reply", Cmd5}, {6, "await inform-acknowledge from agent", Cmd6}, @@ -4585,7 +4681,9 @@ inform2(Config) when is_list(Config) -> {8, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4719,7 +4817,9 @@ inform3(Config) when is_list(Config) -> {9, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4835,7 +4935,9 @@ inform4(Config) when is_list(Config) -> {6, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. %%====================================================================== @@ -4923,7 +5025,10 @@ inform_swarm(Config) when is_list(Config) -> {5, "Manager and agent info after test completion", Cmd1} ], - command_handler(Commands). + command_handler(Commands), + display_log(Config), + ok. + inform_swarm_collector(N) -> inform_swarm_collector(N, 0, 0, 0, 10000). @@ -5227,8 +5332,8 @@ init_agent(Config) -> %% -- %% Retrieve some dir's %% - Dir = ?config(top_dir, Config), - DataDir = ?config(data_dir, Config), + Dir = ?config(agent_dir, Config), + MibDir = ?config(mib_dir, Config), %% -- %% Start node @@ -5259,7 +5364,7 @@ init_agent(Config) -> ?line ok = write_agent_config(Vsns, Config), Conf = [{agent_node, Node}, - {mib_dir, DataDir} | Config], + {mib_dir, MibDir} | Config], %% %% Start the agent @@ -5720,6 +5825,15 @@ start_manager(Node, Vsns, Conf0, Opts) -> AtlSeqNo = get_opt(manager_atl_seqno, Conf0, false), + NetIfConf = + case get_opt(manager_net_if_module, Conf0, no_module) of + no_module -> + [{verbosity, NetIfVerbosity}]; + NetIfModule -> + [{module, NetIfModule}, + {verbosity, NetIfVerbosity}] + end, + Env = [{versions, Vsns}, {inform_request_behaviour, IRB}, {audit_trail_log, [{type, read_write}, @@ -5732,7 +5846,7 @@ start_manager(Node, Vsns, Conf0, Opts) -> {verbosity, ConfigVerbosity}]}, {note_store, [{verbosity, NoteStoreVerbosity}]}, {server, [{verbosity, ServerVerbosity}]}, - {net_if, [{verbosity, NetIfVerbosity}]}], + {net_if, NetIfConf}], ?line ok = set_mgr_env(Node, Env), ?line ok = start_snmp(Node), @@ -6018,6 +6132,38 @@ write_conf_file(Dir, File, Str) -> %% ------ +display_log(Config) -> + case lists:keysearch(manager_log_dir, 1, Config) of + {value, {_, Dir}} -> + case lists:keysearch(manager_node, 1, Config) of + {value, {_, Node}} -> + LogDir = Dir, + Mibs = [], + OutFile = j(LogDir, "snmpm_log.txt"), + p("~n" + "=========================" + " < Audit Trail Log > " + "=========================" + "~n"), + rcall(Node, snmpm, log_to_txt, [LogDir, Mibs, OutFile]), + rcall(Node, snmpm, log_to_io, [LogDir, Mibs]), + p("~n" + "=========================" + " < / Audit Trail Log > " + "=========================" + "~n"); + false -> + p("display_log -> no manager node found"), + ok + end; + false -> + p("display_log -> no manager log dir found"), + ok + end. + + +%% ------ + test2_mib(Config) -> j(test_mib_dir(Config), "Test2.bin"). @@ -6034,7 +6180,7 @@ snmpv2_mib() -> j(mib_dir(), "SNMPv2-MIB.bin"). test_mib_dir(Config) -> - ?config(snmp_data_dir, Config). + ?config(mib_dir, Config). mib_dir() -> j(code:priv_dir(snmp), "mibs"). diff --git a/lib/snmp/test/snmp_test_data/Test3.mib b/lib/snmp/test/snmp_test_data/Test3.mib new file mode 100644 index 0000000000..7f76e4dba4 --- /dev/null +++ b/lib/snmp/test/snmp_test_data/Test3.mib @@ -0,0 +1,123 @@ +Test3 DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, snmpModules, mib-2 + FROM SNMPv2-SMI + MODULE-COMPLIANCE, OBJECT-GROUP + FROM SNMPv2-CONF + tEntry + FROM Test2; + +t3MIB MODULE-IDENTITY + LAST-UPDATED "1203090000Z" + ORGANIZATION "" + CONTACT-INFO + "" + DESCRIPTION + "Test mib, used to test processing of requests." + ::= { snmpModules 42 } + + +-- Administrative assignments **************************************** + +t3MIBObjects OBJECT IDENTIFIER ::= { t3MIB 1 } +t3MIBConformance OBJECT IDENTIFIER ::= { t3MIB 2 } + + +-- test4 OBJECT IDENTIFIER ::= { mib-2 18 } + +tAugTable OBJECT-TYPE + SYNTAX SEQUENCE OF TAugEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "This table AUGMENTS tTable of the Test2 MIB." + ::= { t3MIBObjects 1 } + +tAugEntry OBJECT-TYPE + SYNTAX TAugEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An entry (conceptual row) in the sysORTable." + AUGMENTS { tEntry } + ::= { tAugTable 1 } + +TAugEntry ::= SEQUENCE { + tFoo1 OCTET STRING, + tFoo2 OCTET STRING, + tBar1 Integer32, + tBar2 Integer32 +} + +tFoo1 OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..255)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "A string." + DEFVAL { "foo 1" } + ::= { tAugEntry 1 } + +tFoo2 OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..255)) + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A string." + DEFVAL { "foo 2" } + ::= { tAugEntry 2 } + +tBar1 OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "An integer." + DEFVAL { 42 } + ::= { tAugEntry 3 } + +tBar2 OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An integer." + DEFVAL { 42 } + ::= { tAugEntry 4 } + + +-- Conformance Information ******************************************* + +t3MIBCompliances OBJECT IDENTIFIER + ::= { t3MIBConformance 1 } +t3MIBGroups OBJECT IDENTIFIER + ::= { t3MIBConformance 2 } + +-- Compliance statements + +t3MIBCompliance MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for SNMP engines which + implement the SNMP-COMMUNITY-MIB." + + MODULE -- this module + MANDATORY-GROUPS { t3Group } + + ::= { t3MIBCompliances 1 } + +t3Group OBJECT-GROUP + OBJECTS { + tFoo1, + tFoo2, + tBar1, + tBar2 + } + STATUS current + DESCRIPTION + "A group." + ::= { t3MIBGroups 1 } + + +END diff --git a/lib/snmp/test/snmp_test_lib.erl b/lib/snmp/test/snmp_test_lib.erl index e4d58a1253..26115a0c74 100644 --- a/lib/snmp/test/snmp_test_lib.erl +++ b/lib/snmp/test/snmp_test_lib.erl @@ -25,7 +25,10 @@ -export([hostname/0, hostname/1, localhost/0, os_type/0, sz/1, display_suite_info/1]). -export([non_pc_tc_maybe_skip/4, os_based_skip/1]). --export([replace_config/3, set_config/3, get_config/2, get_config/3]). +-export([fix_data_dir/1, + init_suite_top_dir/2, init_group_top_dir/2, init_testcase_top_dir/2, + lookup/2, + replace_config/3, set_config/3, get_config/2, get_config/3]). -export([fail/3, skip/3]). -export([millis/0, millis_diff/2, hours/1, minutes/1, seconds/1, sleep/1]). -export([flush_mqueue/0, trap_exit/0, trap_exit/1]). @@ -198,6 +201,97 @@ os_based_skip(_Crap) -> %% Test suite utility functions %% +fix_data_dir(Config) -> + DataDir0 = lookup(data_dir, Config), + DataDir1 = filename:split(filename:absname(DataDir0)), + [_|DataDir2] = lists:reverse(DataDir1), + DataDir = filename:join(lists:reverse(DataDir2) ++ [?snmp_test_data]), + Config1 = lists:keydelete(data_dir, 1, Config), + [{data_dir, DataDir ++ "/"} | Config1]. + + +init_suite_top_dir(Suite, Config0) -> + io:format("~w:init_suite_top_dir -> entry with" + "~n Suite: ~p" + "~n Config0: ~p" + "~n", [?MODULE, Suite, Config0]), + Dir = lookup(priv_dir, Config0), + SuiteTopDir = filename:join(Dir, Suite), + case file:make_dir(SuiteTopDir) of + ok -> + ok; + {error, eexist} -> + ok; + {error, Reason} -> + fail({failed_creating_suite_top_dir, SuiteTopDir, Reason}, + ?MODULE, ?LINE) + end, + + %% This is just in case... + Config1 = lists:keydelete(snmp_group_top_dir, 1, Config0), + Config2 = lists:keydelete(snmp_suite_top_dir, 1, Config1), + [{snmp_suite_top_dir, SuiteTopDir} | Config2]. + + +init_group_top_dir(GroupName, Config) -> + io:format("~w:init_group_top_dir -> entry with" + "~n GroupName: ~p" + "~n Config: ~p" + "~n", [?MODULE, GroupName, Config]), + case lists:keysearch(snmp_group_top_dir, 1, Config) of + {value, {_Key, Dir}} -> + %% This is a sub-group, so create our dir within Dir + GroupTopDir = filename:join(Dir, GroupName), + case file:make_dir(GroupTopDir) of + ok -> + ok; + {error, Reason} -> + fail({failed_creating_group_top_dir, GroupTopDir, Reason}, + ?MODULE, ?LINE) + end, + [{snmp_group_top_dir, GroupTopDir} | Config]; + + _ -> + case lists:keysearch(snmp_suite_top_dir, 1, Config) of + {value, {_Key, Dir}} -> + GroupTopDir = filename:join(Dir, GroupName), + case file:make_dir(GroupTopDir) of + ok -> + ok; + {error, Reason} -> + fail({failed_creating_group_top_dir, + GroupTopDir, Reason}, + ?MODULE, ?LINE) + end, + [{snmp_group_top_dir, GroupTopDir} | Config]; + _ -> + fail(could_not_find_suite_top_dir, ?MODULE, ?LINE) + end + end. + + +init_testcase_top_dir(Case, Config) -> + io:format("~w:init_testcase_top_dir -> entry with" + "~n Case: ~p" + "~n Config: ~p" + "~n", [?MODULE, Case, Config]), + case lists:keysearch(snmp_group_top_dir, 1, Config) of + {value, {_Key, Dir}} -> + CaseTopDir = filename:join(Dir, Case), + ok = file:make_dir(CaseTopDir), + CaseTopDir; + false -> + case lists:keysearch(snmp_suite_top_dir, 1, Config) of + {value, {_Key, Dir}} -> + CaseTopDir = filename:join(Dir, Case), + ok = file:make_dir(CaseTopDir), + CaseTopDir; + false -> + fail(failed_creating_case_top_dir, ?MODULE, ?LINE) + end + end. + + replace_config(Key, Config, NewValue) -> lists:keyreplace(Key, 1, Config, {Key, NewValue}). @@ -220,6 +314,9 @@ get_config(Key,C,Default) -> Default end. +lookup(Key, Config) -> + {value, {Key, Value}} = lists:keysearch(Key, 1, Config), + Value. fail(Reason, Mod, Line) -> exit({suite_failed, Reason, Mod, Line}). diff --git a/lib/snmp/test/test-mibs/RMON-MIB.mib b/lib/snmp/test/test-mibs/RMON-MIB.mib index 0824dbde1d..fa5bc82f4a 100644 --- a/lib/snmp/test/test-mibs/RMON-MIB.mib +++ b/lib/snmp/test/test-mibs/RMON-MIB.mib @@ -1,3826 +1,3958 @@ - RMON-MIB DEFINITIONS ::= BEGIN - - IMPORTS - Counter FROM RFC1155-SMI - mib-2,DisplayString FROM RFC1213-MIB - OBJECT-TYPE FROM RFC-1212 - TRAP-TYPE FROM RFC-1215; - - -- Remote Network Monitoring MIB - - rmon OBJECT IDENTIFIER ::= { mib-2 16 } - - - -- textual conventions - - OwnerString ::= DisplayString - -- This data type is used to model an administratively - -- assigned name of the owner of a resource. This - -- information is taken from the NVT ASCII character - -- set. It is suggested that this name contain one or - - - -- more of the following: IP address, management station - -- name, network manager's name, location, or phone - -- number. - -- In some cases the agent itself will be the owner of - -- an entry. In these cases, this string shall be set - -- to a string starting with 'monitor'. - -- - -- SNMP access control is articulated entirely in terms - -- of the contents of MIB views; access to a particular - -- SNMP object instance depends only upon its presence - -- or absence in a particular MIB view and never upon - -- its value or the value of related object instances. - -- Thus, objects of this type afford resolution of - -- resource contention only among cooperating managers; - -- they realize no access control function with respect - -- to uncooperative parties. - -- - -- By convention, objects with this syntax are declared as - -- having - -- - -- SIZE (0..127) - - EntryStatus ::= INTEGER - { valid(1), - createRequest(2), - underCreation(3), - invalid(4) - } - -- The status of a table entry. - -- - -- Setting this object to the value invalid(4) has the - -- effect of invalidating the corresponding entry. - -- That is, it effectively disassociates the mapping - -- identified with said entry. - -- It is an implementation-specific matter as to whether - -- the agent removes an invalidated entry from the table. - -- Accordingly, management stations must be prepared to - -- receive tabular information from agents that - -- corresponds to entries currently not in use. Proper - -- interpretation of such entries requires examination - -- of the relevant EntryStatus object. - -- - -- An existing instance of this object cannot be set to - -- createRequest(2). This object may only be set to - -- createRequest(2) when this instance is created. When - -- this object is created, the agent may wish to create - -- supplemental object instances with default values - -- to complete a conceptual row in this table. Because - - - -- the creation of these default objects is entirely at - -- the option of the agent, the manager must not assume - -- that any will be created, but may make use of any that - -- are created. Immediately after completing the create - -- operation, the agent must set this object to - -- underCreation(3). - -- - -- When in the underCreation(3) state, an entry is - -- allowed to exist in a possibly incomplete, possibly - -- inconsistent state, usually to allow it to be - -- modified in mutiple PDUs. When in this state, an - -- entry is not fully active. Entries shall exist in - -- the underCreation(3) state until the management - -- station is finished configuring the entry and sets - -- this object to valid(1) or aborts, setting this - -- object to invalid(4). If the agent determines that - -- an entry has been in the underCreation(3) state for - -- an abnormally long time, it may decide that the - -- management station has crashed. If the agent makes - -- this decision, it may set this object to invalid(4) - -- to reclaim the entry. A prudent agent will - -- understand that the management station may need to - -- wait for human input and will allow for that - -- possibility in its determination of this abnormally - -- long period. - -- - -- An entry in the valid(1) state is fully configured and - -- consistent and fully represents the configuration or - -- operation such a row is intended to represent. For - -- example, it could be a statistical function that is - -- configured and active, or a filter that is available - -- in the list of filters processed by the packet capture - -- process. - -- - -- A manager is restricted to changing the state of an - -- entry in the following ways: - -- - -- create under - -- To: valid Request Creation invalid - -- From: - -- valid OK NO OK OK - -- createRequest N/A N/A N/A N/A - -- underCreation OK NO OK OK - -- invalid NO NO NO OK - -- nonExistent NO OK NO OK - -- - -- In the table above, it is not applicable to move the - -- state from the createRequest state to any other - - - -- state because the manager will never find the - -- variable in that state. The nonExistent state is - -- not a value of the enumeration, rather it means that - -- the entryStatus variable does not exist at all. - -- - -- An agent may allow an entryStatus variable to change - -- state in additional ways, so long as the semantics - -- of the states are followed. This allowance is made - -- to ease the implementation of the agent and is made - -- despite the fact that managers should never - -- excercise these additional state transitions. - - - statistics OBJECT IDENTIFIER ::= { rmon 1 } - history OBJECT IDENTIFIER ::= { rmon 2 } - alarm OBJECT IDENTIFIER ::= { rmon 3 } - hosts OBJECT IDENTIFIER ::= { rmon 4 } - hostTopN OBJECT IDENTIFIER ::= { rmon 5 } - matrix OBJECT IDENTIFIER ::= { rmon 6 } - filter OBJECT IDENTIFIER ::= { rmon 7 } - capture OBJECT IDENTIFIER ::= { rmon 8 } - event OBJECT IDENTIFIER ::= { rmon 9 } - - - -- The Ethernet Statistics Group - -- - -- Implementation of the Ethernet Statistics group is - -- optional. - -- - -- The ethernet statistics group contains statistics - -- measured by the probe for each monitored interface on - -- this device. These statistics take the form of free - -- running counters that start from zero when a valid entry - -- is created. - -- - -- This group currently has statistics defined only for - -- Ethernet interfaces. Each etherStatsEntry contains - -- statistics for one Ethernet interface. The probe must - -- create one etherStats entry for each monitored Ethernet - -- interface on the device. - - etherStatsTable OBJECT-TYPE - SYNTAX SEQUENCE OF EtherStatsEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of Ethernet statistics entries." - ::= { statistics 1 } - - - etherStatsEntry OBJECT-TYPE - SYNTAX EtherStatsEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A collection of statistics kept for a particular - Ethernet interface. As an example, an instance of the - etherStatsPkts object might be named etherStatsPkts.1" - INDEX { etherStatsIndex } - ::= { etherStatsTable 1 } - - EtherStatsEntry ::= SEQUENCE { - etherStatsIndex INTEGER (1..65535), - etherStatsDataSource OBJECT IDENTIFIER, - etherStatsDropEvents Counter, - etherStatsOctets Counter, - etherStatsPkts Counter, - etherStatsBroadcastPkts Counter, - etherStatsMulticastPkts Counter, - etherStatsCRCAlignErrors Counter, - etherStatsUndersizePkts Counter, - etherStatsOversizePkts Counter, - etherStatsFragments Counter, - etherStatsJabbers Counter, - etherStatsCollisions Counter, - etherStatsPkts64Octets Counter, - etherStatsPkts65to127Octets Counter, - etherStatsPkts128to255Octets Counter, - etherStatsPkts256to511Octets Counter, - etherStatsPkts512to1023Octets Counter, - etherStatsPkts1024to1518Octets Counter, - etherStatsOwner OwnerString, - etherStatsStatus EntryStatus - } - - etherStatsIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of this object uniquely identifies this - etherStats entry." - ::= { etherStatsEntry 1 } - - etherStatsDataSource OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-write - STATUS mandatory - - - DESCRIPTION - "This object identifies the source of the data that - this etherStats entry is configured to analyze. This - source can be any ethernet interface on this device. - In order to identify a particular interface, this - object shall identify the instance of the ifIndex - object, defined in RFC 1213 and RFC 1573 [4,6], for - the desired interface. For example, if an entry - were to receive data from interface #1, this object - would be set to ifIndex.1. - - The statistics in this group reflect all packets - on the local network segment attached to the - identified interface. - - An agent may or may not be able to tell if - fundamental changes to the media of the interface - have occurred and necessitate an invalidation of - this entry. For example, a hot-pluggable ethernet - card could be pulled out and replaced by a - token-ring card. In such a case, if the agent has - such knowledge of the change, it is recommended that - it invalidate this entry. - - This object may not be modified if the associated - etherStatsStatus object is equal to valid(1)." - ::= { etherStatsEntry 2 } - - etherStatsDropEvents OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of events in which packets - were dropped by the probe due to lack of resources. - Note that this number is not necessarily the number of - packets dropped; it is just the number of times this - condition has been detected." - ::= { etherStatsEntry 3 } - - etherStatsOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of octets of data (including - those in bad packets) received on the - network (excluding framing bits but including - - - FCS octets). - - This object can be used as a reasonable estimate of - ethernet utilization. If greater precision is - desired, the etherStatsPkts and etherStatsOctets - objects should be sampled before and after a common - interval. The differences in the sampled values are - Pkts and Octets, respectively, and the number of - seconds in the interval is Interval. These values - are used to calculate the Utilization as follows: - - Pkts * (9.6 + 6.4) + (Octets * .8) - Utilization = ------------------------------------- - Interval * 10,000 - - The result of this equation is the value Utilization - which is the percent utilization of the ethernet - segment on a scale of 0 to 100 percent." - ::= { etherStatsEntry 4 } - - etherStatsPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad packets, - broadcast packets, and multicast packets) received." - ::= { etherStatsEntry 5 } - - etherStatsBroadcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of good packets received that were - directed to the broadcast address. Note that this - does not include multicast packets." - ::= { etherStatsEntry 6 } - - etherStatsMulticastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of good packets received that were - directed to a multicast address. Note that this - number does not include packets directed to the - broadcast address." - - - ::= { etherStatsEntry 7 } - - etherStatsCRCAlignErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received that - had a length (excluding framing bits, but - including FCS octets) of between 64 and 1518 - octets, inclusive, but but had either a bad - Frame Check Sequence (FCS) with an integral - number of octets (FCS Error) or a bad FCS with - a non-integral number of octets (Alignment Error)." - ::= { etherStatsEntry 8 } - - etherStatsUndersizePkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received that were - less than 64 octets long (excluding framing bits, - but including FCS octets) and were otherwise well - formed." - ::= { etherStatsEntry 9 } - - etherStatsOversizePkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received that were - longer than 1518 octets (excluding framing bits, - but including FCS octets) and were otherwise - well formed." - ::= { etherStatsEntry 10 } - - etherStatsFragments OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received that were less - than 64 octets in length (excluding framing bits but - including FCS octets) and had either a bad Frame - Check Sequence (FCS) with an integral number of - octets (FCS Error) or a bad FCS with a non-integral - - - number of octets (Alignment Error). - - Note that it is entirely normal for - etherStatsFragments to increment. This is because - it counts both runts (which are normal occurrences - due to collisions) and noise hits." - ::= { etherStatsEntry 11 } - - etherStatsJabbers OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received that were - longer than 1518 octets (excluding framing bits, - but including FCS octets), and had either a bad - Frame Check Sequence (FCS) with an integral number - of octets (FCS Error) or a bad FCS with a - non-integral number of octets (Alignment Error). - - Note that this definition of jabber is different - than the definition in IEEE-802.3 section 8.2.1.5 - (10BASE5) and section 10.3.1.4 (10BASE2). These - documents define jabber as the condition where any - packet exceeds 20 ms. The allowed range to detect - jabber is between 20 ms and 150 ms." - ::= { etherStatsEntry 12 } - - etherStatsCollisions OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The best estimate of the total number of collisions - on this Ethernet segment. - - The value returned will depend on the location of - the RMON probe. Section 8.2.1.3 (10BASE-5) and - section 10.3.1.3 (10BASE-2) of IEEE standard 802.3 - states that a station must detect a collision, in - the receive mode, if three or more stations are - transmitting simultaneously. A repeater port must - detect a collision when two or more stations are - transmitting simultaneously. Thus a probe placed on - a repeater port could record more collisions than a - probe connected to a station on the same segment - would. - - - - Probe location plays a much smaller role when - considering 10BASE-T. 14.2.1.4 (10BASE-T) of IEEE - standard 802.3 defines a collision as the - simultaneous presence of signals on the DO and RD - circuits (transmitting and receiving at the same - time). A 10BASE-T station can only detect - collisions when it is transmitting. Thus probes - placed on a station and a repeater, should report - the same number of collisions. - - Note also that an RMON probe inside a repeater - should ideally report collisions between the - repeater and one or more other hosts (transmit - collisions as defined by IEEE 802.3k) plus receiver - collisions observed on any coax segments to which - the repeater is connected." - ::= { etherStatsEntry 13 } - - etherStatsPkts64Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were 64 octets in length - (excluding framing bits but including FCS octets)." - ::= { etherStatsEntry 14 } - - etherStatsPkts65to127Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were between - 65 and 127 octets in length inclusive - (excluding framing bits but including FCS octets)." - ::= { etherStatsEntry 15 } - - etherStatsPkts128to255Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were between - 128 and 255 octets in length inclusive - (excluding framing bits but including FCS octets)." - - - ::= { etherStatsEntry 16 } - - etherStatsPkts256to511Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were between - 256 and 511 octets in length inclusive - (excluding framing bits but including FCS octets)." - ::= { etherStatsEntry 17 } - - etherStatsPkts512to1023Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were between - 512 and 1023 octets in length inclusive - (excluding framing bits but including FCS octets)." - ::= { etherStatsEntry 18 } - - etherStatsPkts1024to1518Octets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets (including bad - packets) received that were between - 1024 and 1518 octets in length inclusive - (excluding framing bits but including FCS octets)." - ::= { etherStatsEntry 19 } - - etherStatsOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { etherStatsEntry 20 } - - etherStatsStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - - - DESCRIPTION - "The status of this etherStats entry." - ::= { etherStatsEntry 21 } - - - -- The History Control Group - - -- Implementation of the History Control group is optional. - -- - -- The history control group controls the periodic statistical - -- sampling of data from various types of networks. The - -- historyControlTable stores configuration entries that each - -- define an interface, polling period, and other parameters. - -- Once samples are taken, their data is stored in an entry - -- in a media-specific table. Each such entry defines one - -- sample, and is associated with the historyControlEntry that - -- caused the sample to be taken. Each counter in the - -- etherHistoryEntry counts the same event as its - -- similarly-named counterpart in the etherStatsEntry, - -- except that each value here is a cumulative sum during a - -- sampling period. - -- - -- If the probe keeps track of the time of day, it should - -- start the first sample of the history at a time such that - -- when the next hour of the day begins, a sample is - -- started at that instant. This tends to make more - -- user-friendly reports, and enables comparison of reports - -- from different probes that have relatively accurate time - -- of day. - -- - -- The probe is encouraged to add two history control entries - -- per monitored interface upon initialization that describe - -- a short term and a long term polling period. Suggested - -- parameters are 30 seconds for the short term polling period - -- and 30 minutes for the long term period. - - historyControlTable OBJECT-TYPE - SYNTAX SEQUENCE OF HistoryControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of history control entries." - ::= { history 1 } - - historyControlEntry OBJECT-TYPE - SYNTAX HistoryControlEntry - ACCESS not-accessible - STATUS mandatory - - - DESCRIPTION - "A list of parameters that set up a periodic sampling - of statistics. As an example, an instance of the - historyControlInterval object might be named - historyControlInterval.2" - INDEX { historyControlIndex } - ::= { historyControlTable 1 } - - HistoryControlEntry ::= SEQUENCE { - historyControlIndex INTEGER (1..65535), - historyControlDataSource OBJECT IDENTIFIER, - historyControlBucketsRequested INTEGER (1..65535), - historyControlBucketsGranted INTEGER (1..65535), - historyControlInterval INTEGER (1..3600), - historyControlOwner OwnerString, - historyControlStatus EntryStatus - } - - historyControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in the - historyControl table. Each such entry defines a - set of samples at a particular interval for an - interface on the device." - ::= { historyControlEntry 1 } - - historyControlDataSource OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "This object identifies the source of the data for - which historical data was collected and - placed in a media-specific table on behalf of this - historyControlEntry. This source can be any - interface on this device. In order to identify - a particular interface, this object shall identify - the instance of the ifIndex object, defined - in RFC 1213 and RFC 1573 [4,6], for the desired - interface. For example, if an entry were to receive - data from interface #1, this object would be set - to ifIndex.1. - - The statistics in this group reflect all packets - on the local network segment attached to the - - - identified interface. - - An agent may or may not be able to tell if fundamental - changes to the media of the interface have occurred - and necessitate an invalidation of this entry. For - example, a hot-pluggable ethernet card could be - pulled out and replaced by a token-ring card. In - such a case, if the agent has such knowledge of the - change, it is recommended that it invalidate this - entry. - - This object may not be modified if the associated - historyControlStatus object is equal to valid(1)." - ::= { historyControlEntry 2 } - - historyControlBucketsRequested OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The requested number of discrete time intervals - over which data is to be saved in the part of the - media-specific table associated with this - historyControlEntry. - - When this object is created or modified, the probe - should set historyControlBucketsGranted as closely to - this object as is possible for the particular probe - implementation and available resources." - DEFVAL { 50 } - ::= { historyControlEntry 3 } - - historyControlBucketsGranted OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of discrete sampling intervals - over which data shall be saved in the part of - the media-specific table associated with this - historyControlEntry. - - When the associated historyControlBucketsRequested - object is created or modified, the probe - should set this object as closely to the requested - value as is possible for the particular - probe implementation and available resources. The - probe must not lower this value except as a result - - - of a modification to the associated - historyControlBucketsRequested object. - - There will be times when the actual number of - buckets associated with this entry is less than - the value of this object. In this case, at the - end of each sampling interval, a new bucket will - be added to the media-specific table. - - When the number of buckets reaches the value of - this object and a new bucket is to be added to the - media-specific table, the oldest bucket associated - with this historyControlEntry shall be deleted by - the agent so that the new bucket can be added. - - When the value of this object changes to a value less - than the current value, entries are deleted - from the media-specific table associated with this - historyControlEntry. Enough of the oldest of these - entries shall be deleted by the agent so that their - number remains less than or equal to the new value of - this object. - - When the value of this object changes to a value - greater than the current value, the number of - associated media- specific entries may be allowed to - grow." - ::= { historyControlEntry 4 } - - historyControlInterval OBJECT-TYPE - SYNTAX INTEGER (1..3600) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The interval in seconds over which the data is - sampled for each bucket in the part of the - media-specific table associated with this - historyControlEntry. This interval can - be set to any number of seconds between 1 and - 3600 (1 hour). - - Because the counters in a bucket may overflow at their - maximum value with no indication, a prudent manager - will take into account the possibility of overflow - in any of the associated counters. It is important - to consider the minimum time in which any counter - could overflow on a particular media type and set - the historyControlInterval object to a value less - - - than this interval. This is typically most - important for the 'octets' counter in any - media-specific table. For example, on an Ethernet - network, the etherHistoryOctets counter could - overflow in about one hour at the Ethernet's maximum - utilization. - - This object may not be modified if the associated - historyControlStatus object is equal to valid(1)." - DEFVAL { 1800 } - ::= { historyControlEntry 5 } - - historyControlOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { historyControlEntry 6 } - - historyControlStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this historyControl entry. - - Each instance of the media-specific table associated - with this historyControlEntry will be deleted by the - agent if this historyControlEntry is not equal to - valid(1)." - ::= { historyControlEntry 7 } - - - -- The Ethernet History Group - - -- Implementation of the Ethernet History group is optional. - -- - -- The Ethernet History group records periodic - -- statistical samples from a network and stores them - -- for later retrieval. Once samples are taken, their - -- data is stored in an entry in a media-specific - -- table. Each such entry defines one sample, and is - -- associated with the historyControlEntry that caused - -- the sample to be taken. This group defines the - -- etherHistoryTable, for Ethernet networks. - -- - - - etherHistoryTable OBJECT-TYPE - SYNTAX SEQUENCE OF EtherHistoryEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of Ethernet history entries." - ::= { history 2 } - - etherHistoryEntry OBJECT-TYPE - SYNTAX EtherHistoryEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "An historical sample of Ethernet statistics on a - particular Ethernet interface. This sample is - associated with the historyControlEntry which set up - the parameters for a regular collection of these - samples. As an example, an instance of the - etherHistoryPkts object might be named - etherHistoryPkts.2.89" - INDEX { etherHistoryIndex , etherHistorySampleIndex } - ::= { etherHistoryTable 1 } - - EtherHistoryEntry ::= SEQUENCE { - etherHistoryIndex INTEGER (1..65535), - etherHistorySampleIndex INTEGER (1..2147483647), - etherHistoryIntervalStart TimeTicks, - etherHistoryDropEvents Counter, - etherHistoryOctets Counter, - etherHistoryPkts Counter, - etherHistoryBroadcastPkts Counter, - etherHistoryMulticastPkts Counter, - etherHistoryCRCAlignErrors Counter, - etherHistoryUndersizePkts Counter, - etherHistoryOversizePkts Counter, - etherHistoryFragments Counter, - etherHistoryJabbers Counter, - etherHistoryCollisions Counter, - etherHistoryUtilization INTEGER (0..10000) - } - - etherHistoryIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The history of which this entry is a part. The - history identified by a particular value of this - - - index is the same history as identified - by the same value of historyControlIndex." - ::= { etherHistoryEntry 1 } - - etherHistorySampleIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies the particular - sample this entry represents among all samples - associated with the same historyControlEntry. - This index starts at 1 and increases by one - as each new sample is taken." - ::= { etherHistoryEntry 2 } - - etherHistoryIntervalStart OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime at the start of the interval - over which this sample was measured. If the probe - keeps track of the time of day, it should start - the first sample of the history at a time such that - when the next hour of the day begins, a sample is - started at that instant. Note that following this - rule may require the probe to delay collecting the - first sample of the history, as each sample must be - of the same interval. Also note that the sample which - is currently being collected is not accessible in this - table until the end of its interval." - ::= { etherHistoryEntry 3 } - - etherHistoryDropEvents OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of events in which packets - were dropped by the probe due to lack of resources - during this sampling interval. Note that this number - is not necessarily the number of packets dropped, it - is just the number of times this condition has been - detected." - ::= { etherHistoryEntry 4 } - - etherHistoryOctets OBJECT-TYPE - - - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of octets of data (including - those in bad packets) received on the - network (excluding framing bits but including - FCS octets)." - ::= { etherHistoryEntry 5 } - - etherHistoryPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets (including bad packets) - received during this sampling interval." - ::= { etherHistoryEntry 6 } - - etherHistoryBroadcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets received during this - sampling interval that were directed to the - broadcast address." - ::= { etherHistoryEntry 7 } - - etherHistoryMulticastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets received during this - sampling interval that were directed to a - multicast address. Note that this number does not - include packets addressed to the broadcast address." - ::= { etherHistoryEntry 8 } - - etherHistoryCRCAlignErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets received during this sampling - interval that had a length (excluding framing bits - but including FCS octets) between 64 and 1518 - - - octets, inclusive, but had either a bad Frame Check - Sequence (FCS) with an integral number of octets - (FCS Error) or a bad FCS with a non-integral number - of octets (Alignment Error)." - ::= { etherHistoryEntry 9 } - - etherHistoryUndersizePkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets received during this - sampling interval that were less than 64 octets - long (excluding framing bits but including FCS - octets) and were otherwise well formed." - ::= { etherHistoryEntry 10 } - - etherHistoryOversizePkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets received during this - sampling interval that were longer than 1518 - octets (excluding framing bits but including - FCS octets) but were otherwise well formed." - ::= { etherHistoryEntry 11 } - - etherHistoryFragments OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets received during this - sampling interval that were less than 64 octets in - length (excluding framing bits but including FCS - octets) had either a bad Frame Check Sequence (FCS) - with an integral number of octets (FCS Error) or a bad - FCS with a non-integral number of octets (Alignment - Error). - - Note that it is entirely normal for - etherHistoryFragments to increment. This is because - it counts both runts (which are normal occurrences - due to collisions) and noise hits." - ::= { etherHistoryEntry 12 } - - etherHistoryJabbers OBJECT-TYPE - - - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets received during this - sampling interval that were longer than 1518 octets - (excluding framing bits but including FCS octets), - and had either a bad Frame Check Sequence (FCS) - with an integral number of octets (FCS Error) or - a bad FCS with a non-integral number of octets - (Alignment Error). - - Note that this definition of jabber is different - than the definition in IEEE-802.3 section 8.2.1.5 - (10BASE5) and section 10.3.1.4 (10BASE2). These - documents define jabber as the condition where any - packet exceeds 20 ms. The allowed range to detect - jabber is between 20 ms and 150 ms." - ::= { etherHistoryEntry 13 } - - etherHistoryCollisions OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The best estimate of the total number of collisions - on this Ethernet segment during this sampling - interval. - - The value returned will depend on the location of - the RMON probe. Section 8.2.1.3 (10BASE-5) and - section 10.3.1.3 (10BASE-2) of IEEE standard 802.3 - states that a station must detect a collision, in - the receive mode, if three or more stations are - transmitting simultaneously. A repeater port must - detect a collision when two or more stations are - transmitting simultaneously. Thus a probe placed on - a repeater port could record more collisions than a - probe connected to a station on the same segment - would. - - Probe location plays a much smaller role when - considering 10BASE-T. 14.2.1.4 (10BASE-T) of IEEE - standard 802.3 defines a collision as the - simultaneous presence of signals on the DO and RD - circuits (transmitting and receiving at the same - time). A 10BASE-T station can only detect - collisions when it is transmitting. Thus probes - - - placed on a station and a repeater, should report - the same number of collisions. - - Note also that an RMON probe inside a repeater - should ideally report collisions between the - repeater and one or more other hosts (transmit - collisions as defined by IEEE 802.3k) plus receiver - collisions observed on any coax segments to which - the repeater is connected." - ::= { etherHistoryEntry 14 } - - etherHistoryUtilization OBJECT-TYPE - SYNTAX INTEGER (0..10000) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The best estimate of the mean physical layer - network utilization on this interface during this - sampling interval, in hundredths of a percent." - ::= { etherHistoryEntry 15 } - - - -- The Alarm Group - - -- Implementation of the Alarm group is optional. - -- - -- The Alarm Group requires the implementation of the Event - -- group. - -- - -- The Alarm group periodically takes - -- statistical samples from variables in the probe and - -- compares them to thresholds that have been - -- configured. The alarm table stores configuration - -- entries that each define a variable, polling period, - -- and threshold parameters. If a sample is found to - -- cross the threshold values, an event is generated. - -- Only variables that resolve to an ASN.1 primitive - -- type of INTEGER (INTEGER, Counter, Gauge, or - -- TimeTicks) may be monitored in this way. - -- - -- This function has a hysteresis mechanism to limit - -- the generation of events. This mechanism generates - -- one event as a threshold is crossed in the - -- appropriate direction. No more events are generated - -- for that threshold until the opposite threshold is - -- crossed. - -- - -- In the case of a sampling a deltaValue, a probe may - - - -- implement this mechanism with more precision if it - -- takes a delta sample twice per period, each time - -- comparing the sum of the latest two samples to the - -- threshold. This allows the detection of threshold - -- crossings that span the sampling boundary. Note - -- that this does not require any special configuration - -- of the threshold value. It is suggested that probes - -- implement this more precise algorithm. - - alarmTable OBJECT-TYPE - SYNTAX SEQUENCE OF AlarmEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of alarm entries." - ::= { alarm 1 } - - alarmEntry OBJECT-TYPE - SYNTAX AlarmEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of parameters that set up a periodic checking - for alarm conditions. For example, an instance of the - alarmValue object might be named alarmValue.8" - INDEX { alarmIndex } - ::= { alarmTable 1 } - - AlarmEntry ::= SEQUENCE { - alarmIndex INTEGER (1..65535), - alarmInterval INTEGER, - alarmVariable OBJECT IDENTIFIER, - alarmSampleType INTEGER, - alarmValue INTEGER, - alarmStartupAlarm INTEGER, - alarmRisingThreshold INTEGER, - alarmFallingThreshold INTEGER, - alarmRisingEventIndex INTEGER (0..65535), - alarmFallingEventIndex INTEGER (0..65535), - alarmOwner OwnerString, - alarmStatus EntryStatus - } - - alarmIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - - - "An index that uniquely identifies an entry in the - alarm table. Each such entry defines a - diagnostic sample at a particular interval - for an object on the device." - ::= { alarmEntry 1 } - - alarmInterval OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The interval in seconds over which the data is - sampled and compared with the rising and falling - thresholds. When setting this variable, care - should be taken in the case of deltaValue - sampling - the interval should be set short enough - that the sampled variable is very unlikely to - increase or decrease by more than 2^31 - 1 during - a single sampling interval. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 2 } - - alarmVariable OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The object identifier of the particular variable to - be sampled. Only variables that resolve to an ASN.1 - primitive type of INTEGER (INTEGER, Counter, Gauge, - or TimeTicks) may be sampled. - - Because SNMP access control is articulated entirely - in terms of the contents of MIB views, no access - control mechanism exists that can restrict the value - of this object to identify only those objects that - exist in a particular MIB view. Because there is - thus no acceptable means of restricting the read - access that could be obtained through the alarm - mechanism, the probe must only grant write access to - this object in those views that have read access to - all objects on the probe. - - During a set operation, if the supplied variable - name is not available in the selected MIB view, a - badValue error must be returned. If at any time the - - - variable name of an established alarmEntry is no - longer available in the selected MIB view, the probe - must change the status of this alarmEntry to - invalid(4). - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 3 } - - alarmSampleType OBJECT-TYPE - SYNTAX INTEGER { - absoluteValue(1), - deltaValue(2) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The method of sampling the selected variable and - calculating the value to be compared against the - thresholds. If the value of this object is - absoluteValue(1), the value of the selected variable - will be compared directly with the thresholds at the - end of the sampling interval. If the value of this - object is deltaValue(2), the value of the selected - variable at the last sample will be subtracted from - the current value, and the difference compared with - the thresholds. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 4 } - - alarmValue OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of the statistic during the last sampling - period. For example, if the sample type is - deltaValue, this value will be the difference - between the samples at the beginning and end of the - period. If the sample type is absoluteValue, this - value will be the sampled value at the end of the - period. - - This is the value that is compared with the rising and - falling thresholds. - - - - The value during the current sampling period is not - made available until the period is completed and will - remain available until the next period completes." - ::= { alarmEntry 5 } - - alarmStartupAlarm OBJECT-TYPE - SYNTAX INTEGER { - risingAlarm(1), - fallingAlarm(2), - risingOrFallingAlarm(3) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The alarm that may be sent when this entry is first - set to valid. If the first sample after this entry - becomes valid is greater than or equal to the - risingThreshold and alarmStartupAlarm is equal to - risingAlarm(1) or risingOrFallingAlarm(3), then a - single rising alarm will be generated. If the first - sample after this entry becomes valid is less than - or equal to the fallingThreshold and - alarmStartupAlarm is equal to fallingAlarm(2) or - risingOrFallingAlarm(3), then a single falling alarm - will be generated. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 6 } - - alarmRisingThreshold OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "A threshold for the sampled statistic. When the - current sampled value is greater than or equal to - this threshold, and the value at the last sampling - interval was less than this threshold, a single - event will be generated. A single event will also - be generated if the first sample after this entry - becomes valid is greater than or equal to this - threshold and the associated alarmStartupAlarm is - equal to risingAlarm(1) or risingOrFallingAlarm(3). - - After a rising event is generated, another such event - will not be generated until the sampled value - falls below this threshold and reaches the - - - alarmFallingThreshold. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 7 } - - alarmFallingThreshold OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "A threshold for the sampled statistic. When the - current sampled value is less than or equal to this - threshold, and the value at the last sampling - interval was greater than this threshold, a single - event will be generated. A single event will also - be generated if the first sample after this entry - becomes valid is less than or equal to this - threshold and the associated alarmStartupAlarm is - equal to fallingAlarm(2) or risingOrFallingAlarm(3). - - After a falling event is generated, another such event - will not be generated until the sampled value - rises above this threshold and reaches the - alarmRisingThreshold. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 8 } - - alarmRisingEventIndex OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The index of the eventEntry that is - used when a rising threshold is crossed. The - eventEntry identified by a particular value of - this index is the same as identified by the same value - of the eventIndex object. If there is no - corresponding entry in the eventTable, then - no association exists. In particular, if this value - is zero, no associated event will be generated, as - zero is not a valid event index. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 9 } - - - alarmFallingEventIndex OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The index of the eventEntry that is - used when a falling threshold is crossed. The - eventEntry identified by a particular value of - this index is the same as identified by the same value - of the eventIndex object. If there is no - corresponding entry in the eventTable, then - no association exists. In particular, if this value - is zero, no associated event will be generated, as - zero is not a valid event index. - - This object may not be modified if the associated - alarmStatus object is equal to valid(1)." - ::= { alarmEntry 10 } - - alarmOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { alarmEntry 11 } - - alarmStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this alarm entry." - ::= { alarmEntry 12 } - - - -- The Host Group - - -- Implementation of the Host group is optional. - -- - -- The host group discovers new hosts on the network by - -- keeping a list of source and destination MAC Addresses seen - -- in good packets. For each of these addresses, the host - -- group keeps a set of statistics. The hostControlTable - -- controls which interfaces this function is performed on, - -- and contains some information about the process. On - -- behalf of each hostControlEntry, data is collected on an - - - -- interface and placed in both the hostTable and the - -- hostTimeTable. If the monitoring device finds itself - -- short of resources, it may delete entries as needed. It - -- is suggested that the device delete the least recently - -- used entries first. - - -- The hostTable contains entries for each address - -- discovered on a particular interface. Each entry - -- contains statistical data about that host. This table is - -- indexed by the MAC address of the host, through which a - -- random access may be achieved. - - -- The hostTimeTable contains data in the same format as the - -- hostTable, and must contain the same set of hosts, but is - -- indexed using hostTimeCreationOrder rather than - -- hostAddress. - -- The hostTimeCreationOrder is an integer which reflects - -- the relative order in which a particular entry was - -- discovered and thus inserted into the table. As this - -- order, and thus the index, is among those entries - -- currently in the table, the index for a particular entry - -- may change if an (earlier) entry is deleted. Thus the - -- association between hostTimeCreationOrder and - -- hostTimeEntry may be broken at any time. - - -- The hostTimeTable has two important uses. The first is the - -- fast download of this potentially large table. Because the - -- index of this table runs from 1 to the size of the table, - -- inclusive, its values are predictable. This allows very - -- efficient packing of variables into SNMP PDU's and allows - -- a table transfer to have multiple packets outstanding. - -- These benefits increase transfer rates tremendously. - - -- The second use of the hostTimeTable is the efficient - -- discovery by the management station of new entries added - -- to the table. After the management station has downloaded - -- the entire table, it knows that new entries will be added - -- immediately after the end of the current table. It can - -- thus detect new entries there and retrieve them easily. - - -- Because the association between hostTimeCreationOrder and - -- hostTimeEntry may be broken at any time, the management - -- station must monitor the related hostControlLastDeleteTime - -- object. When the management station thus detects a - -- deletion, it must assume that any such associations have - --- been broken, and invalidate any it has stored locally. - -- This includes restarting any download of the - -- hostTimeTable that may have been in progress, as well as - - - -- rediscovering the end of the hostTimeTable so that it may - -- detect new entries. If the management station does not - -- detect the broken association, it may continue to refer - -- to a particular host by its creationOrder while - -- unwittingly retrieving the data associated with another - -- host entirely. If this happens while downloading the - -- host table, the management station may fail to download - -- all of the entries in the table. - - hostControlTable OBJECT-TYPE - SYNTAX SEQUENCE OF HostControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of host table control entries." - ::= { hosts 1 } - - hostControlEntry OBJECT-TYPE - SYNTAX HostControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of parameters that set up the discovery of - hosts on a particular interface and the collection - of statistics about these hosts. For example, an - instance of the hostControlTableSize object might be - named hostControlTableSize.1" - INDEX { hostControlIndex } - ::= { hostControlTable 1 } - - HostControlEntry ::= SEQUENCE { - hostControlIndex INTEGER (1..65535), - hostControlDataSource OBJECT IDENTIFIER, - hostControlTableSize INTEGER, - hostControlLastDeleteTime TimeTicks, - hostControlOwner OwnerString, - hostControlStatus EntryStatus - } - - hostControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in the - hostControl table. Each such entry defines - a function that discovers hosts on a particular - interface and places statistics about them in the - - - hostTable and the hostTimeTable on behalf of this - hostControlEntry." - ::= { hostControlEntry 1 } - - hostControlDataSource OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "This object identifies the source of the data for - this instance of the host function. This source - can be any interface on this device. In order - to identify a particular interface, this object shall - identify the instance of the ifIndex object, defined - in RFC 1213 and RFC 1573 [4,6], for the desired - interface. For example, if an entry were to receive - data from interface #1, this object would be set to - ifIndex.1. - - The statistics in this group reflect all packets - on the local network segment attached to the - identified interface. - - An agent may or may not be able to tell if - fundamental changes to the media of the interface - have occurred and necessitate an invalidation of - this entry. For example, a hot-pluggable ethernet - card could be pulled out and replaced by a - token-ring card. In such a case, if the agent has - such knowledge of the change, it is recommended that - it invalidate this entry. - - This object may not be modified if the associated - hostControlStatus object is equal to valid(1)." - ::= { hostControlEntry 2 } - - hostControlTableSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of hostEntries in the hostTable and the - hostTimeTable associated with this hostControlEntry." - ::= { hostControlEntry 3 } - - hostControlLastDeleteTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - - - STATUS mandatory - DESCRIPTION - "The value of sysUpTime when the last entry - was deleted from the portion of the hostTable - associated with this hostControlEntry. If no - deletions have occurred, this value shall be zero." - ::= { hostControlEntry 4 } - - hostControlOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { hostControlEntry 5 } - - hostControlStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this hostControl entry. - - If this object is not equal to valid(1), all - associated entries in the hostTable, hostTimeTable, - and the hostTopNTable shall be deleted by the - agent." - ::= { hostControlEntry 6 } - - hostTable OBJECT-TYPE - SYNTAX SEQUENCE OF HostEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of host entries." - ::= { hosts 2 } - - hostEntry OBJECT-TYPE - SYNTAX HostEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A collection of statistics for a particular host - that has been discovered on an interface of this - device. For example, an instance of the - hostOutBroadcastPkts object might be named - hostOutBroadcastPkts.1.6.8.0.32.27.3.176" - - - INDEX { hostIndex, hostAddress } - ::= { hostTable 1 } - - HostEntry ::= SEQUENCE { - hostAddress OCTET STRING, - hostCreationOrder INTEGER (1..65535), - hostIndex INTEGER (1..65535), - hostInPkts Counter, - hostOutPkts Counter, - hostInOctets Counter, - hostOutOctets Counter, - hostOutErrors Counter, - hostOutBroadcastPkts Counter, - hostOutMulticastPkts Counter - } - - hostAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The physical address of this host." - ::= { hostEntry 1 } - - hostCreationOrder OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that defines the relative ordering of - the creation time of hosts captured for a - particular hostControlEntry. This index shall - be between 1 and N, where N is the value of - the associated hostControlTableSize. The ordering - of the indexes is based on the order of each entry's - insertion into the table, in which entries added - earlier have a lower index value than entries added - later. - - It is important to note that the order for a - particular entry may change as an (earlier) entry - is deleted from the table. Because this order may - change, management stations should make use of the - hostControlLastDeleteTime variable in the - hostControlEntry associated with the relevant - portion of the hostTable. By observing - this variable, the management station may detect - the circumstances where a previous association - - - between a value of hostCreationOrder - and a hostEntry may no longer hold." - ::= { hostEntry 2 } - - hostIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The set of collected host statistics of which - this entry is a part. The set of hosts - identified by a particular value of this - index is associated with the hostControlEntry - as identified by the same value of hostControlIndex." - ::= { hostEntry 3 } - - hostInPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets transmitted to this - address since it was added to the hostTable." - ::= { hostEntry 4 } - - hostOutPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets, including bad packets, - transmitted by this address since it was added - to the hostTable." - ::= { hostEntry 5 } - - hostInOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets transmitted to this address - since it was added to the hostTable (excluding - framing bits but including FCS octets), except for - those octets in bad packets." - ::= { hostEntry 6 } - - hostOutOctets OBJECT-TYPE - SYNTAX Counter - - - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets transmitted by this address - since it was added to the hostTable (excluding - framing bits but including FCS octets), including - those octets in bad packets." - ::= { hostEntry 7 } - - hostOutErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of bad packets transmitted by this address - since this host was added to the hostTable." - ::= { hostEntry 8 } - - hostOutBroadcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets transmitted by this - address that were directed to the broadcast address - since this host was added to the hostTable." - ::= { hostEntry 9 } - - hostOutMulticastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets transmitted by this - address that were directed to a multicast address - since this host was added to the hostTable. - Note that this number does not include packets - directed to the broadcast address." - ::= { hostEntry 10 } - - -- host Time Table - - hostTimeTable OBJECT-TYPE - SYNTAX SEQUENCE OF HostTimeEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of time-ordered host table entries." - - - ::= { hosts 3 } - - hostTimeEntry OBJECT-TYPE - SYNTAX HostTimeEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A collection of statistics for a particular host - that has been discovered on an interface of this - device. This collection includes the relative - ordering of the creation time of this object. For - example, an instance of the hostTimeOutBroadcastPkts - object might be named - hostTimeOutBroadcastPkts.1.687" - INDEX { hostTimeIndex, hostTimeCreationOrder } - ::= { hostTimeTable 1 } - - HostTimeEntry ::= SEQUENCE { - hostTimeAddress OCTET STRING, - hostTimeCreationOrder INTEGER (1..65535), - hostTimeIndex INTEGER (1..65535), - hostTimeInPkts Counter, - hostTimeOutPkts Counter, - hostTimeInOctets Counter, - hostTimeOutOctets Counter, - hostTimeOutErrors Counter, - hostTimeOutBroadcastPkts Counter, - hostTimeOutMulticastPkts Counter - } - - hostTimeAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The physical address of this host." - ::= { hostTimeEntry 1 } - - hostTimeCreationOrder OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in - the hostTime table among those entries associated - with the same hostControlEntry. This index shall - be between 1 and N, where N is the value of - the associated hostControlTableSize. The ordering - - - of the indexes is based on the order of each entry's - insertion into the table, in which entries added - earlier have a lower index value than entries added - later. Thus the management station has the ability to - learn of new entries added to this table without - downloading the entire table. - - It is important to note that the index for a - particular entry may change as an (earlier) entry - is deleted from the table. Because this order may - change, management stations should make use of the - hostControlLastDeleteTime variable in the - hostControlEntry associated with the relevant - portion of the hostTimeTable. By observing - this variable, the management station may detect - the circumstances where a download of the table - may have missed entries, and where a previous - association between a value of hostTimeCreationOrder - and a hostTimeEntry may no longer hold." - ::= { hostTimeEntry 2 } - - hostTimeIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The set of collected host statistics of which - this entry is a part. The set of hosts - identified by a particular value of this - index is associated with the hostControlEntry - as identified by the same value of hostControlIndex." - ::= { hostTimeEntry 3 } - - hostTimeInPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets transmitted to this - address since it was added to the hostTimeTable." - ::= { hostTimeEntry 4 } - - hostTimeOutPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of god packets transmitted by this - - - address since it was added to the hostTimeTable." - ::= { hostTimeEntry 5 } - - hostTimeInOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets transmitted to this address - since it was added to the hostTimeTable (excluding - framing bits but including FCS octets), except for - those octets in bad packets." - ::= { hostTimeEntry 6 } - - hostTimeOutOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets transmitted by this address - since it was added to the hostTimeTable (excluding - framing bits but including FCS octets), including - those octets in bad packets." - ::= { hostTimeEntry 7 } - - hostTimeOutErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of bad packets transmitted by this address - since this host was added to the hostTimeTable." - ::= { hostTimeEntry 8 } - - hostTimeOutBroadcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of good packets transmitted by this - address that were directed to the broadcast address - since this host was added to the hostTimeTable." - ::= { hostTimeEntry 9 } - - hostTimeOutMulticastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - - - DESCRIPTION - "The number of good packets transmitted by this - address that were directed to a multicast address - since this host was added to the hostTimeTable. - Note that this number does not include packets - directed to the broadcast address." - ::= { hostTimeEntry 10 } - - - -- The Host Top "N" Group - - -- Implementation of the Host Top N group is optional. - -- - -- The Host Top N group requires the implementation of the - -- host group. - -- - -- The Host Top N group is used to prepare reports that - -- describe the hosts that top a list ordered by one of - -- their statistics. - -- The available statistics are samples of one of their - -- base statistics, over an interval specified by the - -- management station. Thus, these statistics are rate - -- based. The management station also selects how many such - -- hosts are reported. - - -- The hostTopNControlTable is used to initiate the - -- generation of such a report. The management station - -- may select the parameters of such a report, such as - -- which interface, which statistic, how many hosts, - -- and the start and stop times of the sampling. When - -- the report is prepared, entries are created in the - -- hostTopNTable associated with the relevant - -- hostTopNControlEntry. These entries are static for - -- each report after it has been prepared. - - hostTopNControlTable OBJECT-TYPE - SYNTAX SEQUENCE OF HostTopNControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of top N host control entries." - ::= { hostTopN 1 } - - hostTopNControlEntry OBJECT-TYPE - SYNTAX HostTopNControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - - - "A set of parameters that control the creation of a - report of the top N hosts according to several - metrics. For example, an instance of the - hostTopNDuration object might be named - hostTopNDuration.3" - INDEX { hostTopNControlIndex } - ::= { hostTopNControlTable 1 } - - HostTopNControlEntry ::= SEQUENCE { - hostTopNControlIndex INTEGER (1..65535), - hostTopNHostIndex INTEGER (1..65535), - hostTopNRateBase INTEGER, - hostTopNTimeRemaining INTEGER, - hostTopNDuration INTEGER, - hostTopNRequestedSize INTEGER, - hostTopNGrantedSize INTEGER, - hostTopNStartTime TimeTicks, - hostTopNOwner OwnerString, - hostTopNStatus EntryStatus - } - - hostTopNControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry - in the hostTopNControl table. Each such - entry defines one top N report prepared for - one interface." - ::= { hostTopNControlEntry 1 } - - hostTopNHostIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The host table for which a top N report will be - prepared on behalf of this entry. The host table - identified by a particular value of this index is - associated with the same host table as identified by - the same value of hostIndex. - - This object may not be modified if the associated - hostTopNStatus object is equal to valid(1)." - ::= { hostTopNControlEntry 2 } - - hostTopNRateBase OBJECT-TYPE - - - SYNTAX INTEGER { - hostTopNInPkts(1), - hostTopNOutPkts(2), - hostTopNInOctets(3), - hostTopNOutOctets(4), - hostTopNOutErrors(5), - hostTopNOutBroadcastPkts(6), - hostTopNOutMulticastPkts(7) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The variable for each host that the hostTopNRate - variable is based upon. - - This object may not be modified if the associated - hostTopNStatus object is equal to valid(1)." - ::= { hostTopNControlEntry 3 } - - hostTopNTimeRemaining OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The number of seconds left in the report currently - being collected. When this object is modified by - the management station, a new collection is started, - possibly aborting a currently running report. The - new value is used as the requested duration of this - report, which is loaded into the associated - hostTopNDuration object. - - When this object is set to a non-zero value, any - associated hostTopNEntries shall be made - inaccessible by the monitor. While the value of - this object is non-zero, it decrements by one per - second until it reaches zero. During this time, all - associated hostTopNEntries shall remain - inaccessible. At the time that this object - decrements to zero, the report is made accessible in - the hostTopNTable. Thus, the hostTopN table needs - to be created only at the end of the collection - interval." - DEFVAL { 0 } - ::= { hostTopNControlEntry 4 } - - hostTopNDuration OBJECT-TYPE - SYNTAX INTEGER - - - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of seconds that this report has collected - during the last sampling interval, or if this - report is currently being collected, the number - of seconds that this report is being collected - during this sampling interval. - - When the associated hostTopNTimeRemaining object is - set, this object shall be set by the probe to the - same value and shall not be modified until the next - time the hostTopNTimeRemaining is set. - - This value shall be zero if no reports have been - requested for this hostTopNControlEntry." - DEFVAL { 0 } - ::= { hostTopNControlEntry 5 } - - hostTopNRequestedSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The maximum number of hosts requested for the top N - table. - - When this object is created or modified, the probe - should set hostTopNGrantedSize as closely to this - object as is possible for the particular probe - implementation and available resources." - DEFVAL { 10 } - ::= { hostTopNControlEntry 6 } - - hostTopNGrantedSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The maximum number of hosts in the top N table. - - When the associated hostTopNRequestedSize object is - created or modified, the probe should set this - object as closely to the requested value as is - possible for the particular implementation and - available resources. The probe must not lower this - value except as a result of a set to the associated - hostTopNRequestedSize object. - - - Hosts with the highest value of hostTopNRate shall be - placed in this table in decreasing order of this rate - until there is no more room or until there are no more - hosts." - ::= { hostTopNControlEntry 7 } - - hostTopNStartTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime when this top N report was - last started. In other words, this is the time that - the associated hostTopNTimeRemaining object was - modified to start the requested report." - ::= { hostTopNControlEntry 8 } - - hostTopNOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { hostTopNControlEntry 9 } - - hostTopNStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this hostTopNControl entry. - - If this object is not equal to valid(1), all - associated hostTopNEntries shall be deleted by the - agent." - ::= { hostTopNControlEntry 10 } - - hostTopNTable OBJECT-TYPE - SYNTAX SEQUENCE OF HostTopNEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of top N host entries." - ::= { hostTopN 2 } - - hostTopNEntry OBJECT-TYPE - SYNTAX HostTopNEntry - - - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of statistics for a host that is part of a - top N report. For example, an instance of the - hostTopNRate object might be named - hostTopNRate.3.10" - INDEX { hostTopNReport, hostTopNIndex } - ::= { hostTopNTable 1 } - - HostTopNEntry ::= SEQUENCE { - hostTopNReport INTEGER (1..65535), - hostTopNIndex INTEGER (1..65535), - hostTopNAddress OCTET STRING, - hostTopNRate INTEGER - } - - hostTopNReport OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "This object identifies the top N report of which - this entry is a part. The set of hosts - identified by a particular value of this - object is part of the same report as identified - by the same value of the hostTopNControlIndex object." - ::= { hostTopNEntry 1 } - - hostTopNIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in - the hostTopN table among those in the same report. - This index is between 1 and N, where N is the - number of entries in this table. Increasing values - of hostTopNIndex shall be assigned to entries with - decreasing values of hostTopNRate until index N - is assigned to the entry with the lowest value of - hostTopNRate or there are no more hostTopNEntries." - ::= { hostTopNEntry 2 } - - hostTopNAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - - - DESCRIPTION - "The physical address of this host." - ::= { hostTopNEntry 3 } - - hostTopNRate OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The amount of change in the selected variable - during this sampling interval. The selected - variable is this host's instance of the object - selected by hostTopNRateBase." - ::= { hostTopNEntry 4 } - - - -- The Matrix Group - - -- Implementation of the Matrix group is optional. - -- - -- The Matrix group consists of the matrixControlTable, - -- matrixSDTable and the matrixDSTable. These tables - -- store statistics for a particular conversation - -- between two addresses. As the device detects a new - -- conversation, including those to a non-unicast - -- address, it creates a new entry in both of the - -- matrix tables. It must only create new entries - -- based on information received in good packets. If - -- the monitoring device finds itself short of - -- resources, it may delete entries as needed. It is - -- suggested that the device delete the least recently - -- used entries first. - - matrixControlTable OBJECT-TYPE - SYNTAX SEQUENCE OF MatrixControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of information entries for the - traffic matrix on each interface." - ::= { matrix 1 } - - matrixControlEntry OBJECT-TYPE - SYNTAX MatrixControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "Information about a traffic matrix on a particular - - - interface. For example, an instance of the - matrixControlLastDeleteTime object might be named - matrixControlLastDeleteTime.1" - INDEX { matrixControlIndex } - ::= { matrixControlTable 1 } - - MatrixControlEntry ::= SEQUENCE { - matrixControlIndex INTEGER (1..65535), - matrixControlDataSource OBJECT IDENTIFIER, - matrixControlTableSize INTEGER, - matrixControlLastDeleteTime TimeTicks, - matrixControlOwner OwnerString, - matrixControlStatus EntryStatus - } - - matrixControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in the - matrixControl table. Each such entry defines - a function that discovers conversations on a - particular interface and places statistics about - them in the matrixSDTable and the matrixDSTable on - behalf of this matrixControlEntry." - ::= { matrixControlEntry 1 } - - matrixControlDataSource OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "This object identifies the source of - the data from which this entry creates a traffic - matrix. This source can be any interface on this - device. In order to identify a particular - interface, this object shall identify the instance - of the ifIndex object, defined in RFC 1213 and RFC - 1573 [4,6], for the desired interface. For example, - if an entry were to receive data from interface #1, - this object would be set to ifIndex.1. - - The statistics in this group reflect all packets - on the local network segment attached to the - identified interface. - - An agent may or may not be able to tell if - - - fundamental changes to the media of the interface - have occurred and necessitate an invalidation of - this entry. For example, a hot-pluggable ethernet - card could be pulled out and replaced by a - token-ring card. In such a case, if the agent has - such knowledge of the change, it is recommended that - it invalidate this entry. - - This object may not be modified if the associated - matrixControlStatus object is equal to valid(1)." - ::= { matrixControlEntry 2 } - - matrixControlTableSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of matrixSDEntries in the matrixSDTable - for this interface. This must also be the value of - the number of entries in the matrixDSTable for this - interface." - ::= { matrixControlEntry 3 } - - matrixControlLastDeleteTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime when the last entry - was deleted from the portion of the matrixSDTable - or matrixDSTable associated with this - matrixControlEntry. If no deletions have occurred, - this value shall be zero." - ::= { matrixControlEntry 4 } - - matrixControlOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { matrixControlEntry 5 } - - matrixControlStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - - - DESCRIPTION - "The status of this matrixControl entry. - - If this object is not equal to valid(1), all - associated entries in the matrixSDTable and the - matrixDSTable shall be deleted by the agent." - ::= { matrixControlEntry 6 } - - matrixSDTable OBJECT-TYPE - SYNTAX SEQUENCE OF MatrixSDEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of traffic matrix entries indexed by - source and destination MAC address." - ::= { matrix 2 } - - matrixSDEntry OBJECT-TYPE - SYNTAX MatrixSDEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A collection of statistics for communications between - two addresses on a particular interface. For example, - an instance of the matrixSDPkts object might be named - matrixSDPkts.1.6.8.0.32.27.3.176.6.8.0.32.10.8.113" - INDEX { matrixSDIndex, - matrixSDSourceAddress, matrixSDDestAddress } - ::= { matrixSDTable 1 } - - MatrixSDEntry ::= SEQUENCE { - matrixSDSourceAddress OCTET STRING, - matrixSDDestAddress OCTET STRING, - matrixSDIndex INTEGER (1..65535), - matrixSDPkts Counter, - matrixSDOctets Counter, - matrixSDErrors Counter - } - - matrixSDSourceAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The source physical address." - ::= { matrixSDEntry 1 } - - matrixSDDestAddress OBJECT-TYPE - - - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The destination physical address." - ::= { matrixSDEntry 2 } - - matrixSDIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The set of collected matrix statistics of which - this entry is a part. The set of matrix statistics - identified by a particular value of this index - is associated with the same matrixControlEntry - as identified by the same value of - matrixControlIndex." - ::= { matrixSDEntry 3 } - - matrixSDPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets transmitted from the source - address to the destination address (this number - includes bad packets)." - ::= { matrixSDEntry 4 } - - matrixSDOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets (excluding framing bits but - including FCS octets) contained in all packets - transmitted from the source address to the - destination address." - ::= { matrixSDEntry 5 } - - matrixSDErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of bad packets transmitted from - the source address to the destination address." - - - ::= { matrixSDEntry 6 } - - - -- Traffic matrix tables from destination to source - - matrixDSTable OBJECT-TYPE - SYNTAX SEQUENCE OF MatrixDSEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of traffic matrix entries indexed by - destination and source MAC address." - ::= { matrix 3 } - - matrixDSEntry OBJECT-TYPE - SYNTAX MatrixDSEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A collection of statistics for communications between - two addresses on a particular interface. For example, - an instance of the matrixSDPkts object might be named - matrixSDPkts.1.6.8.0.32.10.8.113.6.8.0.32.27.3.176" - INDEX { matrixDSIndex, - matrixDSDestAddress, matrixDSSourceAddress } - ::= { matrixDSTable 1 } - - MatrixDSEntry ::= SEQUENCE { - matrixDSSourceAddress OCTET STRING, - matrixDSDestAddress OCTET STRING, - matrixDSIndex INTEGER (1..65535), - matrixDSPkts Counter, - matrixDSOctets Counter, - matrixDSErrors Counter - } - - matrixDSSourceAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The source physical address." - ::= { matrixDSEntry 1 } - - matrixDSDestAddress OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - - - DESCRIPTION - "The destination physical address." - ::= { matrixDSEntry 2 } - - matrixDSIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The set of collected matrix statistics of which - this entry is a part. The set of matrix statistics - identified by a particular value of this index - is associated with the same matrixControlEntry - as identified by the same value of - matrixControlIndex." - ::= { matrixDSEntry 3 } - - matrixDSPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets transmitted from the source - address to the destination address (this number - includes bad packets)." - ::= { matrixDSEntry 4 } - - matrixDSOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of octets (excluding framing bits - but including FCS octets) contained in all packets - transmitted from the source address to the - destination address." - ::= { matrixDSEntry 5 } - - matrixDSErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of bad packets transmitted from - the source address to the destination address." - ::= { matrixDSEntry 6 } - - - - - -- The Filter Group - - -- Implementation of the Filter group is optional. - -- - -- The Filter group allows packets to be captured with an - -- arbitrary filter expression. A logical data and - -- event stream or "channel" is formed by the packets - -- that match the filter expression. - -- - -- This filter mechanism allows the creation of an arbitrary - -- logical expression with which to filter packets. Each - -- filter associated with a channel is OR'ed with the others. - -- Within a filter, any bits checked in the data and status - -- are AND'ed with respect to other bits in the same filter. - -- The NotMask also allows for checking for inequality. - -- Finally, the channelAcceptType object allows for - -- inversion of the whole equation. - -- - -- If a management station wishes to receive a trap to alert - -- it that new packets have been captured and are available - -- for download, it is recommended that it set up an alarm - -- entry that monitors the value of the relevant - -- channelMatches instance. - -- - -- The channel can be turned on or off, and can also - -- generate events when packets pass through it. - - filterTable OBJECT-TYPE - SYNTAX SEQUENCE OF FilterEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of packet filter entries." - ::= { filter 1 } - - filterEntry OBJECT-TYPE - SYNTAX FilterEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of parameters for a packet filter applied on a - particular interface. As an example, an instance of - the filterPktData object might be named - filterPktData.12" - INDEX { filterIndex } - ::= { filterTable 1 } - - - - - FilterEntry ::= SEQUENCE { - filterIndex INTEGER (1..65535), - filterChannelIndex INTEGER (1..65535), - filterPktDataOffset INTEGER, - filterPktData OCTET STRING, - filterPktDataMask OCTET STRING, - filterPktDataNotMask OCTET STRING, - filterPktStatus INTEGER, - filterPktStatusMask INTEGER, - filterPktStatusNotMask INTEGER, - filterOwner OwnerString, - filterStatus EntryStatus - } - - filterIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry - in the filter table. Each such entry defines - one filter that is to be applied to every packet - received on an interface." - ::= { filterEntry 1 } - - filterChannelIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "This object identifies the channel of which this - filter is a part. The filters identified by a - particular value of this object are associated with - the same channel as identified by the same value of - the channelIndex object." - ::= { filterEntry 2 } - - filterPktDataOffset OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The offset from the beginning of each packet where - a match of packet data will be attempted. This offset - is measured from the point in the physical layer - packet after the framing bits, if any. For example, - in an Ethernet frame, this point is at the beginning - of the destination MAC address. - - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - DEFVAL { 0 } - ::= { filterEntry 3 } - - filterPktData OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The data that is to be matched with the input - packet. For each packet received, this filter and - the accompanying filterPktDataMask and - filterPktDataNotMask will be adjusted for the - offset. The only bits relevant to this match - algorithm are those that have the corresponding - filterPktDataMask bit equal to one. The following - three rules are then applied to every packet: - - (1) If the packet is too short and does not have data - corresponding to part of the filterPktData, the - packet will fail this data match. - - (2) For each relevant bit from the packet with the - corresponding filterPktDataNotMask bit set to - zero, if the bit from the packet is not equal to - the corresponding bit from the filterPktData, - then the packet will fail this data match. - - (3) If for every relevant bit from the packet with the - corresponding filterPktDataNotMask bit set to one, - the bit from the packet is equal to the - corresponding bit from the filterPktData, then - the packet will fail this data match. - - Any packets that have not failed any of the three - matches above have passed this data match. In - particular, a zero length filter will match any - packet. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 4 } - - filterPktDataMask OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-write - STATUS mandatory - - - DESCRIPTION - "The mask that is applied to the match process. - After adjusting this mask for the offset, only those - bits in the received packet that correspond to bits - set in this mask are relevant for further processing - by the match algorithm. The offset is applied to - filterPktDataMask in the same way it is applied to the - filter. For the purposes of the matching algorithm, - if the associated filterPktData object is longer - than this mask, this mask is conceptually extended - with '1' bits until it reaches the length of the - filterPktData object. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 5 } - - filterPktDataNotMask OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The inversion mask that is applied to the match - process. After adjusting this mask for the offset, - those relevant bits in the received packet that - correspond to bits cleared in this mask must all be - equal to their corresponding bits in the - filterPktData object for the packet to be accepted. - In addition, at least one of those relevant bits in - the received packet that correspond to bits set in - this mask must be different to its corresponding bit - in the filterPktData object. - - For the purposes of the matching algorithm, if the - associated filterPktData object is longer than this - mask, this mask is conceptually extended with '0' - bits until it reaches the length of the - filterPktData object. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 6 } - - filterPktStatus OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - - - "The status that is to be matched with the input - packet. The only bits relevant to this match - algorithm are those that have the corresponding - filterPktStatusMask bit equal to one. The following - two rules are then applied to every packet: - - (1) For each relevant bit from the packet status - with the corresponding filterPktStatusNotMask bit - set to zero, if the bit from the packet status is - not equal to the corresponding bit from the - filterPktStatus, then the packet will fail this - status match. - - (2) If for every relevant bit from the packet status - with the corresponding filterPktStatusNotMask bit - set to one, the bit from the packet status is - equal to the corresponding bit from the - filterPktStatus, then the packet will fail this - status match. - - Any packets that have not failed either of the two - matches above have passed this status match. In - particular, a zero length status filter will match any - packet's status. - - The value of the packet status is a sum. This sum - initially takes the value zero. Then, for each - error, E, that has been discovered in this packet, - 2 raised to a value representing E is added to the - sum. The errors and the bits that represent them are - dependent on the media type of the interface that - this channel is receiving packets from. - - The errors defined for a packet captured off of an - Ethernet interface are as follows: - - bit # Error - 0 Packet is longer than 1518 octets - 1 Packet is shorter than 64 octets - 2 Packet experienced a CRC or Alignment - error - - For example, an Ethernet fragment would have a - value of 6 (2^1 + 2^2). - - As this MIB is expanded to new media types, this - object will have other media-specific errors - defined. - - - For the purposes of this status matching algorithm, - if the packet status is longer than this - filterPktStatus object, this object is conceptually - extended with '0' bits until it reaches the size of - the packet status. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 7 } - - filterPktStatusMask OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The mask that is applied to the status match - process. Only those bits in the received packet - that correspond to bits set in this mask are - relevant for further processing by the status match - algorithm. For the purposes of the matching - algorithm, if the associated filterPktStatus object - is longer than this mask, this mask is conceptually - extended with '1' bits until it reaches the size of - the filterPktStatus. In addition, if a packet - status is longer than this mask, this mask is - conceptually extended with '0' bits until it reaches - the size of the packet status. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 8 } - - filterPktStatusNotMask OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The inversion mask that is applied to the status - match process. Those relevant bits in the received - packet status that correspond to bits cleared in - this mask must all be equal to their corresponding - bits in the filterPktStatus object for the packet to - be accepted. In addition, at least one of those - relevant bits in the received packet status that - correspond to bits set in this mask must be - different to its corresponding bit in the - filterPktStatus object for the packet to be - accepted. - - - For the purposes of the matching algorithm, if the - associated filterPktStatus object or a packet status - is longer than this mask, this mask is conceptually - extended with '0' bits until it reaches the longer - of the lengths of the filterPktStatus object and the - packet status. - - This object may not be modified if the associated - filterStatus object is equal to valid(1)." - ::= { filterEntry 9 } - - filterOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { filterEntry 10 } - - filterStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this filter entry." - ::= { filterEntry 11 } - - channelTable OBJECT-TYPE - SYNTAX SEQUENCE OF ChannelEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of packet channel entries." - ::= { filter 2 } - - channelEntry OBJECT-TYPE - SYNTAX ChannelEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of parameters for a packet channel applied on a - particular interface. As an example, an instance of - the channelMatches object might be named - channelMatches.3" - INDEX { channelIndex } - ::= { channelTable 1 } - - - - ChannelEntry ::= SEQUENCE { - channelIndex INTEGER (1..65535), - channelIfIndex INTEGER (1..65535), - channelAcceptType INTEGER, - channelDataControl INTEGER, - channelTurnOnEventIndex INTEGER (0..65535), - channelTurnOffEventIndex INTEGER (0..65535), - channelEventIndex INTEGER (0..65535), - channelEventStatus INTEGER, - channelMatches Counter, - channelDescription DisplayString (SIZE (0..127)), - channelOwner OwnerString, - channelStatus EntryStatus - } - - channelIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in the - channel table. Each such entry defines one channel, - a logical data and event stream. - - It is suggested that before creating a channel, an - application should scan all instances of the - filterChannelIndex object to make sure that there - are no pre-existing filters that would be - inadvertently be linked to the channel." - ::= { channelEntry 1 } - - channelIfIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The value of this object uniquely identifies the - interface on this remote network monitoring device - to which the associated filters are applied to allow - data into this channel. The interface identified by - a particular value of this object is the same - interface as identified by the same value of the - ifIndex object, defined in RFC 1213 and RFC 1573 - [4,6]. - - The filters in this group are applied to all packets - on the local network segment attached to the - identified interface. - - - An agent may or may not be able to tell if - fundamental changes to the media of the interface - have occurred and necessitate an invalidation of - this entry. For example, a hot-pluggable ethernet - card could be pulled out and replaced by a - token-ring card. In such a case, if the agent has - such knowledge of the change, it is recommended that - it invalidate this entry. - - This object may not be modified if the associated - channelStatus object is equal to valid(1)." - ::= { channelEntry 2 } - - channelAcceptType OBJECT-TYPE - SYNTAX INTEGER { - acceptMatched(1), - acceptFailed(2) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "This object controls the action of the filters - associated with this channel. If this object is equal - to acceptMatched(1), packets will be accepted to this - channel if they are accepted by both the packet data - and packet status matches of an associated filter. If - this object is equal to acceptFailed(2), packets will - be accepted to this channel only if they fail either - the packet data match or the packet status match of - each of the associated filters. - - In particular, a channel with no associated filters - will match no packets if set to acceptMatched(1) - case and will match all packets in the - acceptFailed(2) case. - - This object may not be modified if the associated - channelStatus object is equal to valid(1)." - ::= { channelEntry 3 } - - channelDataControl OBJECT-TYPE - SYNTAX INTEGER { - on(1), - off(2) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - - - "This object controls the flow of data through this - channel. If this object is on(1), data, status and - events flow through this channel. If this object is - off(2), data, status and events will not flow - through this channel." - DEFVAL { off } - ::= { channelEntry 4 } - - channelTurnOnEventIndex OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The value of this object identifies the event - that is configured to turn the associated - channelDataControl from off to on when the event is - generated. The event identified by a particular value - of this object is the same event as identified by the - same value of the eventIndex object. If there is no - corresponding entry in the eventTable, then no - association exists. In fact, if no event is intended - for this channel, channelTurnOnEventIndex must be - set to zero, a non-existent event index. - - This object may not be modified if the associated - channelStatus object is equal to valid(1)." - ::= { channelEntry 5 } - - channelTurnOffEventIndex OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The value of this object identifies the event - that is configured to turn the associated - channelDataControl from on to off when the event is - generated. The event identified by a particular value - of this object is the same event as identified by the - same value of the eventIndex object. If there is no - corresponding entry in the eventTable, then no - association exists. In fact, if no event is intended - for this channel, channelTurnOffEventIndex must be - set to zero, a non-existent event index. - - This object may not be modified if the associated - channelStatus object is equal to valid(1)." - ::= { channelEntry 6 } - - - - channelEventIndex OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The value of this object identifies the event - that is configured to be generated when the - associated channelDataControl is on and a packet - is matched. The event identified by a particular - value of this object is the same event as identified - by the same value of the eventIndex object. If - there is no corresponding entry in the eventTable, - then no association exists. In fact, if no event is - intended for this channel, channelEventIndex must be - set to zero, a non-existent event index. - - This object may not be modified if the associated - channelStatus object is equal to valid(1)." - ::= { channelEntry 7 } - - channelEventStatus OBJECT-TYPE - SYNTAX INTEGER { - eventReady(1), - eventFired(2), - eventAlwaysReady(3) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The event status of this channel. - - If this channel is configured to generate events - when packets are matched, a means of controlling - the flow of those events is often needed. When - this object is equal to eventReady(1), a single - event may be generated, after which this object - will be set by the probe to eventFired(2). While - in the eventFired(2) state, no events will be - generated until the object is modified to - eventReady(1) (or eventAlwaysReady(3)). The - management station can thus easily respond to a - notification of an event by re-enabling this object. - - If the management station wishes to disable this - flow control and allow events to be generated - at will, this object may be set to - eventAlwaysReady(3). Disabling the flow control - is discouraged as it can result in high network - - - traffic or other performance problems." - DEFVAL { eventReady } - ::= { channelEntry 8 } - - channelMatches OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of times this channel has matched a - packet. Note that this object is updated even when - channelDataControl is set to off." - ::= { channelEntry 9 } - - channelDescription OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..127)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "A comment describing this channel." - ::= { channelEntry 10 } - - channelOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { channelEntry 11 } - - channelStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this channel entry." - ::= { channelEntry 12 } - - - -- The Packet Capture Group - - -- Implementation of the Packet Capture group is optional. - -- - -- The Packet Capture Group requires implementation of the - -- Filter Group. - -- - -- The Packet Capture group allows packets to be captured - - - -- upon a filter match. The bufferControlTable controls - -- the captured packets output from a channel that is - -- associated with it. The captured packets are placed - -- in entries in the captureBufferTable. These entries are - -- associated with the bufferControlEntry on whose behalf they - -- were stored. - - bufferControlTable OBJECT-TYPE - SYNTAX SEQUENCE OF BufferControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of buffers control entries." - ::= { capture 1 } - - bufferControlEntry OBJECT-TYPE - SYNTAX BufferControlEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of parameters that control the collection of - a stream of packets that have matched filters. As - an example, an instance of the - bufferControlCaptureSliceSize object might be named - bufferControlCaptureSliceSize.3" - INDEX { bufferControlIndex } - ::= { bufferControlTable 1 } - - BufferControlEntry ::= SEQUENCE { - bufferControlIndex INTEGER (1..65535), - bufferControlChannelIndex INTEGER (1..65535), - bufferControlFullStatus INTEGER, - bufferControlFullAction INTEGER, - bufferControlCaptureSliceSize INTEGER, - bufferControlDownloadSliceSize INTEGER, - bufferControlDownloadOffset INTEGER, - bufferControlMaxOctetsRequested INTEGER, - bufferControlMaxOctetsGranted INTEGER, - bufferControlCapturedPackets INTEGER, - bufferControlTurnOnTime TimeTicks, - bufferControlOwner OwnerString, - bufferControlStatus EntryStatus - } - - bufferControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - - - DESCRIPTION - "An index that uniquely identifies an entry - in the bufferControl table. The value of this - index shall never be zero. Each such - entry defines one set of packets that is - captured and controlled by one or more filters." - ::= { bufferControlEntry 1 } - - bufferControlChannelIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An index that identifies the channel that is the - source of packets for this bufferControl table. - The channel identified by a particular value of this - index is the same as identified by the same value of - the channelIndex object. - - This object may not be modified if the associated - bufferControlStatus object is equal to valid(1)." - ::= { bufferControlEntry 2 } - - bufferControlFullStatus OBJECT-TYPE - SYNTAX INTEGER { - spaceAvailable(1), - full(2) - } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "This object shows whether the buffer has room to - accept new packets or if it is full. - - If the status is spaceAvailable(1), the buffer is - accepting new packets normally. If the status is - full(2) and the associated bufferControlFullAction - object is wrapWhenFull, the buffer is accepting new - packets by deleting enough of the oldest packets - to make room for new ones as they arrive. Otherwise, - if the status is full(2) and the - bufferControlFullAction object is lockWhenFull, - then the buffer has stopped collecting packets. - - When this object is set to full(2) the probe must - not later set it to spaceAvailable(1) except in the - case of a significant gain in resources such as - an increase of bufferControlOctetsGranted. In - - - particular, the wrap-mode action of deleting old - packets to make room for newly arrived packets - must not affect the value of this object." - ::= { bufferControlEntry 3 } - - bufferControlFullAction OBJECT-TYPE - SYNTAX INTEGER { - lockWhenFull(1), - wrapWhenFull(2) -- FIFO - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "Controls the action of the buffer when it - reaches the full status. When in the lockWhenFull(1) - state and a packet is added to the buffer that - fills the buffer, the bufferControlFullStatus will - be set to full(2) and this buffer will stop capturing - packets." - ::= { bufferControlEntry 4 } - - bufferControlCaptureSliceSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The maximum number of octets of each packet - that will be saved in this capture buffer. - For example, if a 1500 octet packet is received by - the probe and this object is set to 500, then only - 500 octets of the packet will be stored in the - associated capture buffer. If this variable is set - to 0, the capture buffer will save as many octets - as is possible. - - This object may not be modified if the associated - bufferControlStatus object is equal to valid(1)." - DEFVAL { 100 } - ::= { bufferControlEntry 5 } - - bufferControlDownloadSliceSize OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The maximum number of octets of each packet - in this capture buffer that will be returned in - an SNMP retrieval of that packet. For example, - - - if 500 octets of a packet have been stored in the - associated capture buffer, the associated - bufferControlDownloadOffset is 0, and this - object is set to 100, then the captureBufferPacket - object that contains the packet will contain only - the first 100 octets of the packet. - - A prudent manager will take into account possible - interoperability or fragmentation problems that may - occur if the download slice size is set too large. - In particular, conformant SNMP implementations are not - required to accept messages whose length exceeds 484 - octets, although they are encouraged to support larger - datagrams whenever feasible." - DEFVAL { 100 } - ::= { bufferControlEntry 6 } - - bufferControlDownloadOffset OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The offset of the first octet of each packet - in this capture buffer that will be returned in - an SNMP retrieval of that packet. For example, - if 500 octets of a packet have been stored in the - associated capture buffer and this object is set to - 100, then the captureBufferPacket object that - contains the packet will contain bytes starting - 100 octets into the packet." - DEFVAL { 0 } - ::= { bufferControlEntry 7 } - - bufferControlMaxOctetsRequested OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The requested maximum number of octets to be - saved in this captureBuffer, including any - implementation-specific overhead. If this variable - is set to -1, the capture buffer will save as many - octets as is possible. - - When this object is created or modified, the probe - should set bufferControlMaxOctetsGranted as closely - to this object as is possible for the particular probe - implementation and available resources. However, if - - - the object has the special value of -1, the probe - must set bufferControlMaxOctetsGranted to -1." - DEFVAL { -1 } - ::= { bufferControlEntry 8 } - - bufferControlMaxOctetsGranted OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The maximum number of octets that can be - saved in this captureBuffer, including overhead. - If this variable is -1, the capture buffer will save - as many octets as possible. - - When the bufferControlMaxOctetsRequested object is - created or modified, the probe should set this object - as closely to the requested value as is possible for - the particular probe implementation and available - resources. - However, if the request object has the special value - of -1, the probe must set this object to -1. - The probe must not lower this value except as a result - of a modification to the associated - bufferControlMaxOctetsRequested object. - - When this maximum number of octets is reached - and a new packet is to be added to this - capture buffer and the corresponding - bufferControlFullAction is set to wrapWhenFull(2), - enough of the oldest packets associated with this - capture buffer shall be deleted by the agent so - that the new packet can be added. If the - corresponding bufferControlFullAction is set to - lockWhenFull(1), the new packet shall be discarded. - In either case, the probe must set - bufferControlFullStatus to full(2). - - When the value of this object changes to a value less - than the current value, entries are deleted from - the captureBufferTable associated with this - bufferControlEntry. Enough of the - oldest of these captureBufferEntries shall be - deleted by the agent so that the number of octets - used remains less than or equal to the new value of - this object. - - When the value of this object changes to a value - - - greater than the current value, the number of - associated captureBufferEntries may be allowed to - grow." - ::= { bufferControlEntry 9 } - - bufferControlCapturedPackets OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets currently in this - captureBuffer." - ::= { bufferControlEntry 10 } - - bufferControlTurnOnTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime when this capture buffer was - first turned on." - ::= { bufferControlEntry 11 } - - bufferControlOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it." - ::= { bufferControlEntry 12 } - - bufferControlStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this buffer Control Entry." - ::= { bufferControlEntry 13 } - - captureBufferTable OBJECT-TYPE - SYNTAX SEQUENCE OF CaptureBufferEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of packets captured off of a channel." - ::= { capture 2 } - - - - captureBufferEntry OBJECT-TYPE - SYNTAX CaptureBufferEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A packet captured off of an attached network. As an - example, an instance of the captureBufferPacketData - object might be named captureBufferPacketData.3.1783" - INDEX { captureBufferControlIndex, captureBufferIndex } - ::= { captureBufferTable 1 } - - CaptureBufferEntry ::= SEQUENCE { - captureBufferControlIndex INTEGER (1..65535), - captureBufferIndex INTEGER (1..2147483647), - captureBufferPacketID INTEGER, - captureBufferPacketData OCTET STRING, - captureBufferPacketLength INTEGER, - captureBufferPacketTime INTEGER, - captureBufferPacketStatus INTEGER - } - - captureBufferControlIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The index of the bufferControlEntry with which - this packet is associated." - ::= { captureBufferEntry 1 } - - captureBufferIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry - in the captureBuffer table associated with a - particular bufferControlEntry. This index will - start at 1 and increase by one for each new packet - added with the same captureBufferControlIndex. - - Should this value reach 2147483647, the next packet - added with the same captureBufferControlIndex shall - cause this value to wrap around to 1." - ::= { captureBufferEntry 2 } - - captureBufferPacketID OBJECT-TYPE - SYNTAX INTEGER - - - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that describes the order of packets - that are received on a particular interface. - The packetID of a packet captured on an - interface is defined to be greater than the - packetID's of all packets captured previously on - the same interface. As the captureBufferPacketID - object has a maximum positive value of 2^31 - 1, - any captureBufferPacketID object shall have the - value of the associated packet's packetID mod 2^31." - ::= { captureBufferEntry 3 } - - captureBufferPacketData OBJECT-TYPE - SYNTAX OCTET STRING - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The data inside the packet, starting at the - beginning of the packet plus any offset specified in - the associated bufferControlDownloadOffset, - including any link level headers. The length of the - data in this object is the minimum of the length of - the captured packet minus the offset, the length of - the associated bufferControlCaptureSliceSize minus - the offset, and the associated - bufferControlDownloadSliceSize. If this minimum is - less than zero, this object shall have a length of - zero." - ::= { captureBufferEntry 4 } - - captureBufferPacketLength OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The actual length (off the wire) of the packet stored - in this entry, including FCS octets." - ::= { captureBufferEntry 5 } - - captureBufferPacketTime OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of milliseconds that had passed since - this capture buffer was first turned on when this - - - packet was captured." - ::= { captureBufferEntry 6 } - - captureBufferPacketStatus OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A value which indicates the error status of this - packet. - - The value of this object is defined in the same way as - filterPktStatus. The value is a sum. This sum - initially takes the value zero. Then, for each - error, E, that has been discovered in this packet, - 2 raised to a value representing E is added to the - sum. - - The errors defined for a packet captured off of an - Ethernet interface are as follows: - - bit # Error - 0 Packet is longer than 1518 octets - 1 Packet is shorter than 64 octets - 2 Packet experienced a CRC or Alignment - error - 3 First packet in this capture buffer after - it was detected that some packets were - not processed correctly. - 4 Packet's order in buffer is only - approximate (May only be set for packets - sent from the probe) - - For example, an Ethernet fragment would have a - value of 6 (2^1 + 2^2). - - As this MIB is expanded to new media types, this - object will have other media-specific errors defined." - ::= { captureBufferEntry 7 } - - - -- The Event Group - - -- Implementation of the Event group is optional. - -- - -- The Event group controls the generation and notification - -- of events from this device. Each entry in the eventTable - -- describes the parameters of the event that can be - - - -- triggered. Each event entry is fired by an associated - -- condition located elsewhere in the MIB. An event entry - -- may also be associated- with a function elsewhere in the - -- MIB that will be executed when the event is generated. For - -- example, a channel may be turned on or off by the firing - -- of an event. - -- - -- Each eventEntry may optionally specify that a log entry - -- be created on its behalf whenever the event occurs. - -- Each entry may also specify that notification should - -- occur by way of SNMP trap messages. In this case, the - -- community for the trap message is given in the associated - -- eventCommunity object. The enterprise and specific trap - -- fields of the trap are determined by the condition that - -- triggered the event. Two traps are defined: risingAlarm - -- and fallingAlarm. If the eventTable is triggered by a - -- condition specified elsewhere, the enterprise and - -- specific trap fields must be specified for traps - -- generated for that condition. - - eventTable OBJECT-TYPE - SYNTAX SEQUENCE OF EventEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of events to be generated." - ::= { event 1 } - - eventEntry OBJECT-TYPE - SYNTAX EventEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of parameters that describe an event to be - generated when certain conditions are met. As an - example, an instance of the eventLastTimeSent object - might be named eventLastTimeSent.6" - INDEX { eventIndex } - ::= { eventTable 1 } - - EventEntry ::= SEQUENCE { - eventIndex INTEGER (1..65535), - eventDescription DisplayString (SIZE (0..127)), - eventType INTEGER, - eventCommunity OCTET STRING (SIZE (0..127)), - eventLastTimeSent TimeTicks, - eventOwner OwnerString, - eventStatus EntryStatus - - - } - - eventIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry in the - event table. Each such entry defines one event that - is to be generated when the appropriate conditions - occur." - ::= { eventEntry 1 } - - eventDescription OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..127)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "A comment describing this event entry." - ::= { eventEntry 2 } - - eventType OBJECT-TYPE - SYNTAX INTEGER { - none(1), - log(2), - snmp-trap(3), -- send an SNMP trap - log-and-trap(4) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The type of notification that the probe will make - about this event. In the case of log, an entry is - made in the log table for each event. In the case of - snmp-trap, an SNMP trap is sent to one or more - management stations." - ::= { eventEntry 3 } - - eventCommunity OBJECT-TYPE - SYNTAX OCTET STRING (SIZE (0..127)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "If an SNMP trap is to be sent, it will be sent to - the SNMP community specified by this octet string. - In the future this table will be extended to include - the party security mechanism. This object shall be - set to a string of length zero if it is intended that - - - that mechanism be used to specify the destination of - the trap." - ::= { eventEntry 4 } - - eventLastTimeSent OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime at the time this event - entry last generated an event. If this entry has - not generated any events, this value will be - zero." - ::= { eventEntry 5 } - - eventOwner OBJECT-TYPE - SYNTAX OwnerString - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The entity that configured this entry and is - therefore using the resources assigned to it. - - If this object contains a string starting with - 'monitor' and has associated entries in the log - table, all connected management stations should - retrieve those log entries, as they may have - significance to all management stations connected to - this device" - ::= { eventEntry 6 } - - eventStatus OBJECT-TYPE - SYNTAX EntryStatus - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The status of this event entry. - - If this object is not equal to valid(1), all - associated log entries shall be deleted by the - agent." - ::= { eventEntry 7 } - - -- - logTable OBJECT-TYPE - SYNTAX SEQUENCE OF LogEntry - ACCESS not-accessible - STATUS mandatory - - - DESCRIPTION - "A list of events that have been logged." - ::= { event 2 } - - logEntry OBJECT-TYPE - SYNTAX LogEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A set of data describing an event that has been - logged. For example, an instance of the - logDescription object might be named - logDescription.6.47" - INDEX { logEventIndex, logIndex } - ::= { logTable 1 } - - LogEntry ::= SEQUENCE { - logEventIndex INTEGER (1..65535), - logIndex INTEGER (1..2147483647), - logTime TimeTicks, - logDescription DisplayString (SIZE (0..255)) - } - - logEventIndex OBJECT-TYPE - SYNTAX INTEGER (1..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The event entry that generated this log - entry. The log identified by a particular - value of this index is associated with the same - eventEntry as identified by the same value - of eventIndex." - ::= { logEntry 1 } - - logIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An index that uniquely identifies an entry - in the log table amongst those generated by the - same eventEntries. These indexes are - assigned beginning with 1 and increase by one - with each new log entry. The association - between values of logIndex and logEntries - is fixed for the lifetime of each logEntry. - The agent may choose to delete the oldest - - - instances of logEntry as required because of - lack of memory. It is an implementation-specific - matter as to when this deletion may occur." - ::= { logEntry 2 } - - logTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime when this log entry was - created." - ::= { logEntry 3 } - - logDescription OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An implementation dependent description of the - event that activated this log entry." - ::= { logEntry 4 } - - -- These definitions use the TRAP-TYPE macro as - -- defined in RFC 1215 [10] - - -- Remote Network Monitoring Traps - - risingAlarm TRAP-TYPE - ENTERPRISE rmon - VARIABLES { alarmIndex, alarmVariable, alarmSampleType, - alarmValue, alarmRisingThreshold } - DESCRIPTION - "The SNMP trap that is generated when an alarm - entry crosses its rising threshold and generates - an event that is configured for sending SNMP - traps." - ::= 1 - - fallingAlarm TRAP-TYPE - ENTERPRISE rmon - VARIABLES { alarmIndex, alarmVariable, alarmSampleType, - alarmValue, alarmFallingThreshold } - DESCRIPTION - "The SNMP trap that is generated when an alarm - entry crosses its falling threshold and generates - an event that is configured for sending SNMP - traps." - - - ::= 2 - - END +RMON-MIB DEFINITIONS ::= BEGIN + IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, OBJECT-IDENTITY, + NOTIFICATION-TYPE, mib-2, Counter32, + Integer32, TimeTicks FROM SNMPv2-SMI + + TEXTUAL-CONVENTION, DisplayString FROM SNMPv2-TC + + MODULE-COMPLIANCE, OBJECT-GROUP, + NOTIFICATION-GROUP FROM SNMPv2-CONF; + + +-- Remote Network Monitoring MIB + +rmonMibModule MODULE-IDENTITY + LAST-UPDATED "200005110000Z" -- 11 May, 2000 + ORGANIZATION "IETF RMON MIB Working Group" + CONTACT-INFO + "Steve Waldbusser + Phone: +1-650-948-6500 + Fax: +1-650-745-0671 + Email: [email protected]" + DESCRIPTION + "Remote network monitoring devices, often called + monitors or probes, are instruments that exist for + the purpose of managing a network. This MIB defines + objects for managing remote network monitoring devices." + + REVISION "200005110000Z" -- 11 May, 2000 + DESCRIPTION + "Reformatted into SMIv2 format. + + This version published as RFC 2819." + + REVISION "199502010000Z" -- 1 Feb, 1995 + DESCRIPTION + "Bug fixes, clarifications and minor changes based on + implementation experience, published as RFC1757 [18]. + + Two changes were made to object definitions: + + 1) A new status bit has been defined for the + captureBufferPacketStatus object, indicating that the + packet order within the capture buffer may not be identical to + the packet order as received off the wire. This bit may only + be used for packets transmitted by the probe. Older NMS + applications can safely ignore this status bit, which might be + used by newer agents. + + 2) The packetMatch trap has been removed. This trap was never + actually 'approved' and was not added to this document along + with the risingAlarm and fallingAlarm traps. The packetMatch + trap could not be throttled, which could cause disruption of + normal network traffic under some circumstances. An NMS should + configure a risingAlarm threshold on the appropriate + channelMatches instance if a trap is desired for a packetMatch + event. Note that logging of packetMatch events is still + supported--only trap generation for such events has been + removed. + + In addition, several clarifications to individual object + definitions have been added to assist agent and NMS + implementors: + + - global definition of 'good packets' and 'bad packets' + + - more detailed text governing conceptual row creation and + modification + + - instructions for probes relating to interface changes and + disruptions + + - clarification of some ethernet counter definitions + + - recommended formula for calculating network utilization + + - clarification of channel and captureBuffer behavior for some + unusual conditions + + - examples of proper instance naming for each table" + + REVISION "199111010000Z" -- 1 Nov, 1991 + DESCRIPTION + "The original version of this MIB, published as RFC1271." + ::= { rmonConformance 8 } + + rmon OBJECT IDENTIFIER ::= { mib-2 16 } + + + -- textual conventions + +OwnerString ::= TEXTUAL-CONVENTION + STATUS current + + DESCRIPTION + "This data type is used to model an administratively + assigned name of the owner of a resource. Implementations + must accept values composed of well-formed NVT ASCII + sequences. In addition, implementations should accept + values composed of well-formed UTF-8 sequences. + + It is suggested that this name contain one or more of + the following: IP address, management station name, + network manager's name, location, or phone number. + In some cases the agent itself will be the owner of + an entry. In these cases, this string shall be set + to a string starting with 'monitor'. + + SNMP access control is articulated entirely in terms + of the contents of MIB views; access to a particular + SNMP object instance depends only upon its presence + or absence in a particular MIB view and never upon + its value or the value of related object instances. + Thus, objects of this type afford resolution of + resource contention only among cooperating + managers; they realize no access control function + with respect to uncooperative parties." + SYNTAX OCTET STRING (SIZE (0..127)) + +EntryStatus ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "The status of a table entry. + + Setting this object to the value invalid(4) has the + effect of invalidating the corresponding entry. + That is, it effectively disassociates the mapping + identified with said entry. + It is an implementation-specific matter as to whether + the agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared to + receive tabular information from agents that corresponds + to entries currently not in use. Proper + interpretation of such entries requires examination + of the relevant EntryStatus object. + + An existing instance of this object cannot be set to + createRequest(2). This object may only be set to + createRequest(2) when this instance is created. When + this object is created, the agent may wish to create + supplemental object instances with default values + to complete a conceptual row in this table. Because the + creation of these default objects is entirely at the option + of the agent, the manager must not assume that any will be + created, but may make use of any that are created. + Immediately after completing the create operation, the agent + must set this object to underCreation(3). + + When in the underCreation(3) state, an entry is allowed to + exist in a possibly incomplete, possibly inconsistent state, + usually to allow it to be modified in multiple PDUs. When in + this state, an entry is not fully active. + Entries shall exist in the underCreation(3) state until + the management station is finished configuring the entry + and sets this object to valid(1) or aborts, setting this + object to invalid(4). If the agent determines that an + entry has been in the underCreation(3) state for an + abnormally long time, it may decide that the management + station has crashed. If the agent makes this decision, + it may set this object to invalid(4) to reclaim the + entry. A prudent agent will understand that the + management station may need to wait for human input + and will allow for that possibility in its + determination of this abnormally long period. + + An entry in the valid(1) state is fully configured and + consistent and fully represents the configuration or + operation such a row is intended to represent. For + example, it could be a statistical function that is + configured and active, or a filter that is available + in the list of filters processed by the packet capture + process. + + A manager is restricted to changing the state of an entry in + the following ways: + + To: valid createRequest underCreation invalid + From: + valid OK NO OK OK + createRequest N/A N/A N/A N/A + underCreation OK NO OK OK + invalid NO NO NO OK + nonExistent NO OK NO OK + + In the table above, it is not applicable to move the state + from the createRequest state to any other state because the + manager will never find the variable in that state. The + nonExistent state is not a value of the enumeration, rather + it means that the entryStatus variable does not exist at all. + + An agent may allow an entryStatus variable to change state in + additional ways, so long as the semantics of the states are + followed. This allowance is made to ease the implementation of + the agent and is made despite the fact that managers should + never exercise these additional state transitions." + SYNTAX INTEGER { + valid(1), + createRequest(2), + underCreation(3), + invalid(4) + } + + statistics OBJECT IDENTIFIER ::= { rmon 1 } + history OBJECT IDENTIFIER ::= { rmon 2 } + alarm OBJECT IDENTIFIER ::= { rmon 3 } + hosts OBJECT IDENTIFIER ::= { rmon 4 } + hostTopN OBJECT IDENTIFIER ::= { rmon 5 } + matrix OBJECT IDENTIFIER ::= { rmon 6 } + filter OBJECT IDENTIFIER ::= { rmon 7 } + capture OBJECT IDENTIFIER ::= { rmon 8 } + event OBJECT IDENTIFIER ::= { rmon 9 } + rmonConformance OBJECT IDENTIFIER ::= { rmon 20 } + +-- The Ethernet Statistics Group +-- +-- Implementation of the Ethernet Statistics group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The ethernet statistics group contains statistics measured by the +-- probe for each monitored interface on this device. These +-- statistics take the form of free running counters that start from +-- zero when a valid entry is created. +-- +-- This group currently has statistics defined only for +-- Ethernet interfaces. Each etherStatsEntry contains statistics +-- for one Ethernet interface. The probe must create one +-- etherStats entry for each monitored Ethernet interface +-- on the device. + +etherStatsTable OBJECT-TYPE + SYNTAX SEQUENCE OF EtherStatsEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of Ethernet statistics entries." + ::= { statistics 1 } + +etherStatsEntry OBJECT-TYPE + SYNTAX EtherStatsEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A collection of statistics kept for a particular + Ethernet interface. As an example, an instance of the + etherStatsPkts object might be named etherStatsPkts.1" + INDEX { etherStatsIndex } + ::= { etherStatsTable 1 } + +EtherStatsEntry ::= SEQUENCE { + etherStatsIndex Integer32, + etherStatsDataSource OBJECT IDENTIFIER, + etherStatsDropEvents Counter32, + etherStatsOctets Counter32, + etherStatsPkts Counter32, + etherStatsBroadcastPkts Counter32, + etherStatsMulticastPkts Counter32, + etherStatsCRCAlignErrors Counter32, + etherStatsUndersizePkts Counter32, + etherStatsOversizePkts Counter32, + etherStatsFragments Counter32, + etherStatsJabbers Counter32, + etherStatsCollisions Counter32, + etherStatsPkts64Octets Counter32, + etherStatsPkts65to127Octets Counter32, + etherStatsPkts128to255Octets Counter32, + etherStatsPkts256to511Octets Counter32, + etherStatsPkts512to1023Octets Counter32, + etherStatsPkts1024to1518Octets Counter32, + etherStatsOwner OwnerString, + etherStatsStatus EntryStatus +} + +etherStatsIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of this object uniquely identifies this + etherStats entry." + ::= { etherStatsEntry 1 } + +etherStatsDataSource OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-create + STATUS current + + DESCRIPTION + "This object identifies the source of the data that + this etherStats entry is configured to analyze. This + source can be any ethernet interface on this device. + In order to identify a particular interface, this object + shall identify the instance of the ifIndex object, + defined in RFC 2233 [17], for the desired interface. + For example, if an entry were to receive data from + interface #1, this object would be set to ifIndex.1. + + The statistics in this group reflect all packets + on the local network segment attached to the identified + interface. + + An agent may or may not be able to tell if fundamental + changes to the media of the interface have occurred and + necessitate an invalidation of this entry. For example, a + hot-pluggable ethernet card could be pulled out and replaced + by a token-ring card. In such a case, if the agent has such + knowledge of the change, it is recommended that it + invalidate this entry. + + This object may not be modified if the associated + etherStatsStatus object is equal to valid(1)." + ::= { etherStatsEntry 2 } + +etherStatsDropEvents OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of events in which packets + were dropped by the probe due to lack of resources. + Note that this number is not necessarily the number of + packets dropped; it is just the number of times this + condition has been detected." + ::= { etherStatsEntry 3 } + +etherStatsOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of octets of data (including + those in bad packets) received on the + network (excluding framing bits but including + FCS octets). + + This object can be used as a reasonable estimate of + 10-Megabit ethernet utilization. If greater precision is + desired, the etherStatsPkts and etherStatsOctets objects + should be sampled before and after a common interval. The + differences in the sampled values are Pkts and Octets, + respectively, and the number of seconds in the interval is + Interval. These values are used to calculate the Utilization + as follows: + + Pkts * (9.6 + 6.4) + (Octets * .8) + Utilization = ------------------------------------- + Interval * 10,000 + + The result of this equation is the value Utilization which + is the percent utilization of the ethernet segment on a + scale of 0 to 100 percent." + ::= { etherStatsEntry 4 } + +etherStatsPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad packets, + broadcast packets, and multicast packets) received." + ::= { etherStatsEntry 5 } + +etherStatsBroadcastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of good packets received that were + directed to the broadcast address. Note that this + does not include multicast packets." + ::= { etherStatsEntry 6 } + +etherStatsMulticastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of good packets received that were + directed to a multicast address. Note that this number + does not include packets directed to the broadcast + address." + ::= { etherStatsEntry 7 } + +etherStatsCRCAlignErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received that + had a length (excluding framing bits, but + including FCS octets) of between 64 and 1518 + octets, inclusive, but had either a bad + Frame Check Sequence (FCS) with an integral + number of octets (FCS Error) or a bad FCS with + a non-integral number of octets (Alignment Error)." + ::= { etherStatsEntry 8 } + +etherStatsUndersizePkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received that were + less than 64 octets long (excluding framing bits, + but including FCS octets) and were otherwise well + formed." + ::= { etherStatsEntry 9 } + +etherStatsOversizePkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received that were + longer than 1518 octets (excluding framing bits, + but including FCS octets) and were otherwise + well formed." + ::= { etherStatsEntry 10 } + +etherStatsFragments OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received that were less than + 64 octets in length (excluding framing bits but including + FCS octets) and had either a bad Frame Check Sequence + (FCS) with an integral number of octets (FCS Error) or a + bad FCS with a non-integral number of octets (Alignment + Error). + + Note that it is entirely normal for etherStatsFragments to + increment. This is because it counts both runts (which are + normal occurrences due to collisions) and noise hits." + ::= { etherStatsEntry 11 } + +etherStatsJabbers OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received that were + longer than 1518 octets (excluding framing bits, + but including FCS octets), and had either a bad + Frame Check Sequence (FCS) with an integral number + of octets (FCS Error) or a bad FCS with a non-integral + number of octets (Alignment Error). + + Note that this definition of jabber is different + than the definition in IEEE-802.3 section 8.2.1.5 + (10BASE5) and section 10.3.1.4 (10BASE2). These + documents define jabber as the condition where any + packet exceeds 20 ms. The allowed range to detect + jabber is between 20 ms and 150 ms." + ::= { etherStatsEntry 12 } + +etherStatsCollisions OBJECT-TYPE + SYNTAX Counter32 + UNITS "Collisions" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The best estimate of the total number of collisions + on this Ethernet segment. + + The value returned will depend on the location of the + RMON probe. Section 8.2.1.3 (10BASE-5) and section + 10.3.1.3 (10BASE-2) of IEEE standard 802.3 states that a + station must detect a collision, in the receive mode, if + three or more stations are transmitting simultaneously. A + repeater port must detect a collision when two or more + stations are transmitting simultaneously. Thus a probe + placed on a repeater port could record more collisions + than a probe connected to a station on the same segment + would. + + Probe location plays a much smaller role when considering + 10BASE-T. 14.2.1.4 (10BASE-T) of IEEE standard 802.3 + defines a collision as the simultaneous presence of signals + on the DO and RD circuits (transmitting and receiving + at the same time). A 10BASE-T station can only detect + collisions when it is transmitting. Thus probes placed on + a station and a repeater, should report the same number of + collisions. + + Note also that an RMON probe inside a repeater should + ideally report collisions between the repeater and one or + more other hosts (transmit collisions as defined by IEEE + 802.3k) plus receiver collisions observed on any coax + segments to which the repeater is connected." + ::= { etherStatsEntry 13 } + +etherStatsPkts64Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were 64 octets in length + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 14 } + +etherStatsPkts65to127Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were between + 65 and 127 octets in length inclusive + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 15 } + +etherStatsPkts128to255Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were between + 128 and 255 octets in length inclusive + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 16 } + +etherStatsPkts256to511Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were between + 256 and 511 octets in length inclusive + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 17 } + +etherStatsPkts512to1023Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were between + 512 and 1023 octets in length inclusive + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 18 } + +etherStatsPkts1024to1518Octets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets (including bad + packets) received that were between + 1024 and 1518 octets in length inclusive + (excluding framing bits but including FCS octets)." + ::= { etherStatsEntry 19 } + +etherStatsOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { etherStatsEntry 20 } + +etherStatsStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this etherStats entry." + ::= { etherStatsEntry 21 } + +-- The History Control Group + +-- Implementation of the History Control group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The history control group controls the periodic statistical +-- sampling of data from various types of networks. The +-- historyControlTable stores configuration entries that each +-- define an interface, polling period, and other parameters. +-- Once samples are taken, their data is stored in an entry +-- in a media-specific table. Each such entry defines one +-- sample, and is associated with the historyControlEntry that +-- caused the sample to be taken. Each counter in the +-- etherHistoryEntry counts the same event as its similarly-named +-- counterpart in the etherStatsEntry, except that each value here +-- is a cumulative sum during a sampling period. +-- +-- If the probe keeps track of the time of day, it should start +-- the first sample of the history at a time such that +-- when the next hour of the day begins, a sample is +-- started at that instant. This tends to make more +-- user-friendly reports, and enables comparison of reports +-- from different probes that have relatively accurate time +-- of day. +-- +-- The probe is encouraged to add two history control entries +-- per monitored interface upon initialization that describe a short +-- term and a long term polling period. Suggested parameters are 30 +-- seconds for the short term polling period and 30 minutes for +-- the long term period. + +historyControlTable OBJECT-TYPE + SYNTAX SEQUENCE OF HistoryControlEntry + MAX-ACCESS not-accessible + + STATUS current + DESCRIPTION + "A list of history control entries." + ::= { history 1 } + +historyControlEntry OBJECT-TYPE + SYNTAX HistoryControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of parameters that set up a periodic sampling of + statistics. As an example, an instance of the + historyControlInterval object might be named + historyControlInterval.2" + INDEX { historyControlIndex } + ::= { historyControlTable 1 } + +HistoryControlEntry ::= SEQUENCE { + historyControlIndex Integer32, + historyControlDataSource OBJECT IDENTIFIER, + historyControlBucketsRequested Integer32, + historyControlBucketsGranted Integer32, + historyControlInterval Integer32, + historyControlOwner OwnerString, + historyControlStatus EntryStatus +} + +historyControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the + historyControl table. Each such entry defines a + set of samples at a particular interval for an + interface on the device." + ::= { historyControlEntry 1 } + +historyControlDataSource OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object identifies the source of the data for + which historical data was collected and + placed in a media-specific table on behalf of this + historyControlEntry. This source can be any + interface on this device. In order to identify + a particular interface, this object shall identify + the instance of the ifIndex object, defined + in RFC 2233 [17], for the desired interface. + For example, if an entry were to receive data from + interface #1, this object would be set to ifIndex.1. + + The statistics in this group reflect all packets + on the local network segment attached to the identified + interface. + + An agent may or may not be able to tell if fundamental + changes to the media of the interface have occurred and + necessitate an invalidation of this entry. For example, a + hot-pluggable ethernet card could be pulled out and replaced + by a token-ring card. In such a case, if the agent has such + knowledge of the change, it is recommended that it + invalidate this entry. + + This object may not be modified if the associated + historyControlStatus object is equal to valid(1)." + ::= { historyControlEntry 2 } + +historyControlBucketsRequested OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The requested number of discrete time intervals + over which data is to be saved in the part of the + media-specific table associated with this + historyControlEntry. + + When this object is created or modified, the probe + should set historyControlBucketsGranted as closely to + this object as is possible for the particular probe + implementation and available resources." + DEFVAL { 50 } + ::= { historyControlEntry 3 } + +historyControlBucketsGranted OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of discrete sampling intervals + over which data shall be saved in the part of + the media-specific table associated with this + historyControlEntry. + + When the associated historyControlBucketsRequested + object is created or modified, the probe + should set this object as closely to the requested + value as is possible for the particular + probe implementation and available resources. The + probe must not lower this value except as a result + of a modification to the associated + historyControlBucketsRequested object. + + There will be times when the actual number of + buckets associated with this entry is less than + the value of this object. In this case, at the + end of each sampling interval, a new bucket will + be added to the media-specific table. + + When the number of buckets reaches the value of + this object and a new bucket is to be added to the + media-specific table, the oldest bucket associated + with this historyControlEntry shall be deleted by + the agent so that the new bucket can be added. + + When the value of this object changes to a value less + than the current value, entries are deleted + from the media-specific table associated with this + historyControlEntry. Enough of the oldest of these + entries shall be deleted by the agent so that their + number remains less than or equal to the new value of + this object. + + When the value of this object changes to a value greater + than the current value, the number of associated media- + specific entries may be allowed to grow." + ::= { historyControlEntry 4 } + +historyControlInterval OBJECT-TYPE + SYNTAX Integer32 (1..3600) + UNITS "Seconds" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The interval in seconds over which the data is + sampled for each bucket in the part of the + media-specific table associated with this + historyControlEntry. This interval can + be set to any number of seconds between 1 and + 3600 (1 hour). + + Because the counters in a bucket may overflow at their + maximum value with no indication, a prudent manager will + take into account the possibility of overflow in any of + the associated counters. It is important to consider the + minimum time in which any counter could overflow on a + particular media type and set the historyControlInterval + object to a value less than this interval. This is + typically most important for the 'octets' counter in any + media-specific table. For example, on an Ethernet + network, the etherHistoryOctets counter could overflow + in about one hour at the Ethernet's maximum + utilization. + + This object may not be modified if the associated + historyControlStatus object is equal to valid(1)." + DEFVAL { 1800 } + ::= { historyControlEntry 5 } + +historyControlOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { historyControlEntry 6 } + +historyControlStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this historyControl entry. + + Each instance of the media-specific table associated + with this historyControlEntry will be deleted by the agent + if this historyControlEntry is not equal to valid(1)." + ::= { historyControlEntry 7 } + +-- The Ethernet History Group + +-- Implementation of the Ethernet History group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Ethernet History group records periodic statistical samples +-- from a network and stores them for later retrieval. +-- Once samples are taken, their data is stored in an entry +-- in a media-specific table. Each such entry defines one +-- sample, and is associated with the historyControlEntry that +-- caused the sample to be taken. This group defines the +-- etherHistoryTable, for Ethernet networks. +-- + +etherHistoryTable OBJECT-TYPE + SYNTAX SEQUENCE OF EtherHistoryEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of Ethernet history entries." + ::= { history 2 } + +etherHistoryEntry OBJECT-TYPE + SYNTAX EtherHistoryEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An historical sample of Ethernet statistics on a particular + Ethernet interface. This sample is associated with the + historyControlEntry which set up the parameters for + a regular collection of these samples. As an example, an + instance of the etherHistoryPkts object might be named + etherHistoryPkts.2.89" + INDEX { etherHistoryIndex , etherHistorySampleIndex } + ::= { etherHistoryTable 1 } + +EtherHistoryEntry ::= SEQUENCE { + etherHistoryIndex Integer32, + etherHistorySampleIndex Integer32, + etherHistoryIntervalStart TimeTicks, + etherHistoryDropEvents Counter32, + etherHistoryOctets Counter32, + etherHistoryPkts Counter32, + etherHistoryBroadcastPkts Counter32, + etherHistoryMulticastPkts Counter32, + etherHistoryCRCAlignErrors Counter32, + etherHistoryUndersizePkts Counter32, + etherHistoryOversizePkts Counter32, + etherHistoryFragments Counter32, + etherHistoryJabbers Counter32, + etherHistoryCollisions Counter32, + etherHistoryUtilization Integer32 +} + +etherHistoryIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The history of which this entry is a part. The + history identified by a particular value of this + index is the same history as identified + by the same value of historyControlIndex." + ::= { etherHistoryEntry 1 } + +etherHistorySampleIndex OBJECT-TYPE + SYNTAX Integer32 (1..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies the particular + sample this entry represents among all samples + associated with the same historyControlEntry. + This index starts at 1 and increases by one + as each new sample is taken." + ::= { etherHistoryEntry 2 } + +etherHistoryIntervalStart OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime at the start of the interval + over which this sample was measured. If the probe + keeps track of the time of day, it should start + the first sample of the history at a time such that + when the next hour of the day begins, a sample is + started at that instant. Note that following this + rule may require the probe to delay collecting the + first sample of the history, as each sample must be + of the same interval. Also note that the sample which + is currently being collected is not accessible in this + table until the end of its interval." + ::= { etherHistoryEntry 3 } + +etherHistoryDropEvents OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of events in which packets + were dropped by the probe due to lack of resources + during this sampling interval. Note that this number + is not necessarily the number of packets dropped, it + is just the number of times this condition has been + detected." + ::= { etherHistoryEntry 4 } + +etherHistoryOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of octets of data (including + those in bad packets) received on the + network (excluding framing bits but including + FCS octets)." + ::= { etherHistoryEntry 5 } + +etherHistoryPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets (including bad packets) + received during this sampling interval." + ::= { etherHistoryEntry 6 } + +etherHistoryBroadcastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets received during this + sampling interval that were directed to the + broadcast address." + ::= { etherHistoryEntry 7 } + +etherHistoryMulticastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets received during this + sampling interval that were directed to a + multicast address. Note that this number does not + include packets addressed to the broadcast address." + ::= { etherHistoryEntry 8 } + +etherHistoryCRCAlignErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets received during this + sampling interval that had a length (excluding + framing bits but including FCS octets) between + 64 and 1518 octets, inclusive, but had either a bad Frame + Check Sequence (FCS) with an integral number of octets + (FCS Error) or a bad FCS with a non-integral number + of octets (Alignment Error)." + ::= { etherHistoryEntry 9 } + +etherHistoryUndersizePkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets received during this + sampling interval that were less than 64 octets + long (excluding framing bits but including FCS + octets) and were otherwise well formed." + ::= { etherHistoryEntry 10 } + +etherHistoryOversizePkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets received during this + sampling interval that were longer than 1518 + octets (excluding framing bits but including + FCS octets) but were otherwise well formed." + ::= { etherHistoryEntry 11 } + +etherHistoryFragments OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of packets received during this + sampling interval that were less than 64 octets in + length (excluding framing bits but including FCS + octets) had either a bad Frame Check Sequence (FCS) + with an integral number of octets (FCS Error) or a bad + FCS with a non-integral number of octets (Alignment + Error). + + Note that it is entirely normal for etherHistoryFragments to + increment. This is because it counts both runts (which are + normal occurrences due to collisions) and noise hits." + ::= { etherHistoryEntry 12 } + +etherHistoryJabbers OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets received during this + sampling interval that were longer than 1518 octets + (excluding framing bits but including FCS octets), + and had either a bad Frame Check Sequence (FCS) + with an integral number of octets (FCS Error) or + a bad FCS with a non-integral number of octets + (Alignment Error). + + Note that this definition of jabber is different + than the definition in IEEE-802.3 section 8.2.1.5 + (10BASE5) and section 10.3.1.4 (10BASE2). These + documents define jabber as the condition where any + packet exceeds 20 ms. The allowed range to detect + jabber is between 20 ms and 150 ms." + ::= { etherHistoryEntry 13 } + +etherHistoryCollisions OBJECT-TYPE + SYNTAX Counter32 + UNITS "Collisions" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The best estimate of the total number of collisions + on this Ethernet segment during this sampling + interval. + + The value returned will depend on the location of the + RMON probe. Section 8.2.1.3 (10BASE-5) and section + 10.3.1.3 (10BASE-2) of IEEE standard 802.3 states that a + station must detect a collision, in the receive mode, if + three or more stations are transmitting simultaneously. A + repeater port must detect a collision when two or more + stations are transmitting simultaneously. Thus a probe + placed on a repeater port could record more collisions + than a probe connected to a station on the same segment + would. + + Probe location plays a much smaller role when considering + 10BASE-T. 14.2.1.4 (10BASE-T) of IEEE standard 802.3 + defines a collision as the simultaneous presence of signals + on the DO and RD circuits (transmitting and receiving + at the same time). A 10BASE-T station can only detect + collisions when it is transmitting. Thus probes placed on + a station and a repeater, should report the same number of + collisions. + + Note also that an RMON probe inside a repeater should + ideally report collisions between the repeater and one or + more other hosts (transmit collisions as defined by IEEE + 802.3k) plus receiver collisions observed on any coax + segments to which the repeater is connected." + ::= { etherHistoryEntry 14 } + +etherHistoryUtilization OBJECT-TYPE + SYNTAX Integer32 (0..10000) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The best estimate of the mean physical layer + network utilization on this interface during this + sampling interval, in hundredths of a percent." + ::= { etherHistoryEntry 15 } + +-- The Alarm Group + +-- Implementation of the Alarm group is optional. The Alarm Group +-- requires the implementation of the Event group. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Alarm group periodically takes statistical samples from +-- variables in the probe and compares them to thresholds that have +-- been configured. The alarm table stores configuration +-- entries that each define a variable, polling period, and +-- threshold parameters. If a sample is found to cross the +-- threshold values, an event is generated. Only variables that +-- resolve to an ASN.1 primitive type of INTEGER (INTEGER, Integer32, +-- Counter32, Counter64, Gauge32, or TimeTicks) may be monitored in +-- this way. +-- +-- This function has a hysteresis mechanism to limit the generation +-- of events. This mechanism generates one event as a threshold +-- is crossed in the appropriate direction. No more events are +-- generated for that threshold until the opposite threshold is +-- crossed. +-- +-- In the case of a sampling a deltaValue, a probe may implement +-- this mechanism with more precision if it takes a delta sample +-- twice per period, each time comparing the sum of the latest two +-- samples to the threshold. This allows the detection of threshold +-- crossings that span the sampling boundary. Note that this does +-- not require any special configuration of the threshold value. +-- It is suggested that probes implement this more precise algorithm. + +alarmTable OBJECT-TYPE + SYNTAX SEQUENCE OF AlarmEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of alarm entries." + ::= { alarm 1 } + +alarmEntry OBJECT-TYPE + SYNTAX AlarmEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of parameters that set up a periodic checking + for alarm conditions. For example, an instance of the + alarmValue object might be named alarmValue.8" + INDEX { alarmIndex } + ::= { alarmTable 1 } + +AlarmEntry ::= SEQUENCE { + alarmIndex Integer32, + alarmInterval Integer32, + alarmVariable OBJECT IDENTIFIER, + alarmSampleType INTEGER, + alarmValue Integer32, + alarmStartupAlarm INTEGER, + alarmRisingThreshold Integer32, + alarmFallingThreshold Integer32, + alarmRisingEventIndex Integer32, + alarmFallingEventIndex Integer32, + alarmOwner OwnerString, + alarmStatus EntryStatus +} + +alarmIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the + alarm table. Each such entry defines a + diagnostic sample at a particular interval + for an object on the device." + ::= { alarmEntry 1 } + +alarmInterval OBJECT-TYPE + SYNTAX Integer32 + UNITS "Seconds" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The interval in seconds over which the data is + sampled and compared with the rising and falling + thresholds. When setting this variable, care + should be taken in the case of deltaValue + sampling - the interval should be set short enough + that the sampled variable is very unlikely to + increase or decrease by more than 2^31 - 1 during + a single sampling interval. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 2 } + +alarmVariable OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The object identifier of the particular variable to be + sampled. Only variables that resolve to an ASN.1 primitive + type of INTEGER (INTEGER, Integer32, Counter32, Counter64, + Gauge, or TimeTicks) may be sampled. + + Because SNMP access control is articulated entirely + in terms of the contents of MIB views, no access + control mechanism exists that can restrict the value of + this object to identify only those objects that exist + in a particular MIB view. Because there is thus no + acceptable means of restricting the read access that + could be obtained through the alarm mechanism, the + probe must only grant write access to this object in + those views that have read access to all objects on + the probe. + + During a set operation, if the supplied variable name is + not available in the selected MIB view, a badValue error + must be returned. If at any time the variable name of + an established alarmEntry is no longer available in the + selected MIB view, the probe must change the status of + this alarmEntry to invalid(4). + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 3 } + +alarmSampleType OBJECT-TYPE + SYNTAX INTEGER { + absoluteValue(1), + deltaValue(2) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The method of sampling the selected variable and + calculating the value to be compared against the + thresholds. If the value of this object is + absoluteValue(1), the value of the selected variable + will be compared directly with the thresholds at the + end of the sampling interval. If the value of this + object is deltaValue(2), the value of the selected + variable at the last sample will be subtracted from + the current value, and the difference compared with + the thresholds. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 4 } + +alarmValue OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of the statistic during the last sampling + period. For example, if the sample type is deltaValue, + this value will be the difference between the samples + at the beginning and end of the period. If the sample + type is absoluteValue, this value will be the sampled + value at the end of the period. + This is the value that is compared with the rising and + falling thresholds. + + The value during the current sampling period is not + made available until the period is completed and will + remain available until the next period completes." + ::= { alarmEntry 5 } + +alarmStartupAlarm OBJECT-TYPE + SYNTAX INTEGER { + risingAlarm(1), + fallingAlarm(2), + risingOrFallingAlarm(3) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The alarm that may be sent when this entry is first + set to valid. If the first sample after this entry + becomes valid is greater than or equal to the + risingThreshold and alarmStartupAlarm is equal to + risingAlarm(1) or risingOrFallingAlarm(3), then a single + rising alarm will be generated. If the first sample + after this entry becomes valid is less than or equal + to the fallingThreshold and alarmStartupAlarm is equal + to fallingAlarm(2) or risingOrFallingAlarm(3), then a + single falling alarm will be generated. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 6 } + +alarmRisingThreshold OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "A threshold for the sampled statistic. When the current + sampled value is greater than or equal to this threshold, + and the value at the last sampling interval was less than + this threshold, a single event will be generated. + A single event will also be generated if the first + sample after this entry becomes valid is greater than or + equal to this threshold and the associated + alarmStartupAlarm is equal to risingAlarm(1) or + risingOrFallingAlarm(3). + + After a rising event is generated, another such event + will not be generated until the sampled value + falls below this threshold and reaches the + alarmFallingThreshold. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 7 } + +alarmFallingThreshold OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "A threshold for the sampled statistic. When the current + sampled value is less than or equal to this threshold, + and the value at the last sampling interval was greater than + this threshold, a single event will be generated. + A single event will also be generated if the first + sample after this entry becomes valid is less than or + equal to this threshold and the associated + alarmStartupAlarm is equal to fallingAlarm(2) or + risingOrFallingAlarm(3). + + After a falling event is generated, another such event + will not be generated until the sampled value + rises above this threshold and reaches the + alarmRisingThreshold. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 8 } + +alarmRisingEventIndex OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The index of the eventEntry that is + used when a rising threshold is crossed. The + eventEntry identified by a particular value of + this index is the same as identified by the same value + of the eventIndex object. If there is no + corresponding entry in the eventTable, then + no association exists. In particular, if this value + is zero, no associated event will be generated, as + zero is not a valid event index. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 9 } + +alarmFallingEventIndex OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The index of the eventEntry that is + used when a falling threshold is crossed. The + eventEntry identified by a particular value of + this index is the same as identified by the same value + of the eventIndex object. If there is no + corresponding entry in the eventTable, then + no association exists. In particular, if this value + is zero, no associated event will be generated, as + zero is not a valid event index. + + This object may not be modified if the associated + alarmStatus object is equal to valid(1)." + ::= { alarmEntry 10 } + +alarmOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { alarmEntry 11 } + +alarmStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this alarm entry." + ::= { alarmEntry 12 } + +-- The Host Group + +-- Implementation of the Host group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The host group discovers new hosts on the network by +-- keeping a list of source and destination MAC Addresses seen +-- in good packets. For each of these addresses, the host group +-- keeps a set of statistics. The hostControlTable controls +-- which interfaces this function is performed on, and contains +-- some information about the process. On behalf of each +-- hostControlEntry, data is collected on an interface and placed +-- in both the hostTable and the hostTimeTable. If the +-- monitoring device finds itself short of resources, it may +-- delete entries as needed. It is suggested that the device +-- delete the least recently used entries first. + +-- The hostTable contains entries for each address discovered on +-- a particular interface. Each entry contains statistical +-- data about that host. This table is indexed by the +-- MAC address of the host, through which a random access +-- may be achieved. + +-- The hostTimeTable contains data in the same format as the +-- hostTable, and must contain the same set of hosts, but is +-- indexed using hostTimeCreationOrder rather than hostAddress. +-- The hostTimeCreationOrder is an integer which reflects +-- the relative order in which a particular entry was discovered +-- and thus inserted into the table. As this order, and thus +-- the index, is among those entries currently in the table, +-- the index for a particular entry may change if an +-- (earlier) entry is deleted. Thus the association between +-- hostTimeCreationOrder and hostTimeEntry may be broken at +-- any time. + +-- The hostTimeTable has two important uses. The first is the +-- fast download of this potentially large table. Because the +-- index of this table runs from 1 to the size of the table, +-- inclusive, its values are predictable. This allows very +-- efficient packing of variables into SNMP PDU's and allows +-- a table transfer to have multiple packets outstanding. +-- These benefits increase transfer rates tremendously. + +-- The second use of the hostTimeTable is the efficient discovery +-- by the management station of new entries added to the table. +-- After the management station has downloaded the entire table, +-- it knows that new entries will be added immediately after the +-- end of the current table. It can thus detect new entries there +-- and retrieve them easily. + +-- Because the association between hostTimeCreationOrder and +-- hostTimeEntry may be broken at any time, the management +-- station must monitor the related hostControlLastDeleteTime +-- object. When the management station thus detects a deletion, +-- it must assume that any such associations have been broken, +-- and invalidate any it has stored locally. This includes +-- restarting any download of the hostTimeTable that may have been +-- in progress, as well as rediscovering the end of the +-- hostTimeTable so that it may detect new entries. If the +-- management station does not detect the broken association, +-- it may continue to refer to a particular host by its +-- creationOrder while unwittingly retrieving the data associated +-- with another host entirely. If this happens while downloading +-- the host table, the management station may fail to download +-- all of the entries in the table. + + +hostControlTable OBJECT-TYPE + SYNTAX SEQUENCE OF HostControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of host table control entries." + ::= { hosts 1 } + +hostControlEntry OBJECT-TYPE + SYNTAX HostControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of parameters that set up the discovery of hosts + on a particular interface and the collection of statistics + about these hosts. For example, an instance of the + hostControlTableSize object might be named + hostControlTableSize.1" + INDEX { hostControlIndex } + ::= { hostControlTable 1 } + +HostControlEntry ::= SEQUENCE { + + hostControlIndex Integer32, + hostControlDataSource OBJECT IDENTIFIER, + hostControlTableSize Integer32, + hostControlLastDeleteTime TimeTicks, + hostControlOwner OwnerString, + hostControlStatus EntryStatus +} + +hostControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the + hostControl table. Each such entry defines + a function that discovers hosts on a particular interface + and places statistics about them in the hostTable and + the hostTimeTable on behalf of this hostControlEntry." + ::= { hostControlEntry 1 } + +hostControlDataSource OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object identifies the source of the data for + this instance of the host function. This source + can be any interface on this device. In order + to identify a particular interface, this object shall + identify the instance of the ifIndex object, defined + in RFC 2233 [17], for the desired interface. + For example, if an entry were to receive data from + interface #1, this object would be set to ifIndex.1. + + The statistics in this group reflect all packets + on the local network segment attached to the identified + interface. + + An agent may or may not be able to tell if fundamental + changes to the media of the interface have occurred and + necessitate an invalidation of this entry. For example, a + hot-pluggable ethernet card could be pulled out and replaced + by a token-ring card. In such a case, if the agent has such + knowledge of the change, it is recommended that it + invalidate this entry. + + This object may not be modified if the associated + hostControlStatus object is equal to valid(1)." + ::= { hostControlEntry 2 } + +hostControlTableSize OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of hostEntries in the hostTable and the + hostTimeTable associated with this hostControlEntry." + ::= { hostControlEntry 3 } + +hostControlLastDeleteTime OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime when the last entry + was deleted from the portion of the hostTable + associated with this hostControlEntry. If no + deletions have occurred, this value shall be zero." + ::= { hostControlEntry 4 } + +hostControlOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { hostControlEntry 5 } + +hostControlStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this hostControl entry. + + If this object is not equal to valid(1), all associated + entries in the hostTable, hostTimeTable, and the + hostTopNTable shall be deleted by the agent." + ::= { hostControlEntry 6 } + +hostTable OBJECT-TYPE + SYNTAX SEQUENCE OF HostEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of host entries." + ::= { hosts 2 } + +hostEntry OBJECT-TYPE + SYNTAX HostEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A collection of statistics for a particular host that has + been discovered on an interface of this device. For example, + an instance of the hostOutBroadcastPkts object might be + named hostOutBroadcastPkts.1.6.8.0.32.27.3.176" + INDEX { hostIndex, hostAddress } + ::= { hostTable 1 } + +HostEntry ::= SEQUENCE { + hostAddress OCTET STRING, + hostCreationOrder Integer32, + hostIndex Integer32, + hostInPkts Counter32, + hostOutPkts Counter32, + hostInOctets Counter32, + hostOutOctets Counter32, + hostOutErrors Counter32, + hostOutBroadcastPkts Counter32, + hostOutMulticastPkts Counter32 +} + +hostAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The physical address of this host." + ::= { hostEntry 1 } + +hostCreationOrder OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that defines the relative ordering of + the creation time of hosts captured for a + particular hostControlEntry. This index shall + be between 1 and N, where N is the value of + the associated hostControlTableSize. The ordering + of the indexes is based on the order of each entry's + insertion into the table, in which entries added earlier + have a lower index value than entries added later. + + It is important to note that the order for a + particular entry may change as an (earlier) entry + is deleted from the table. Because this order may + change, management stations should make use of the + hostControlLastDeleteTime variable in the + hostControlEntry associated with the relevant + portion of the hostTable. By observing + this variable, the management station may detect + the circumstances where a previous association + between a value of hostCreationOrder + and a hostEntry may no longer hold." + ::= { hostEntry 2 } + +hostIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The set of collected host statistics of which + this entry is a part. The set of hosts + identified by a particular value of this + index is associated with the hostControlEntry + as identified by the same value of hostControlIndex." + ::= { hostEntry 3 } + +hostInPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted to this + address since it was added to the hostTable." + ::= { hostEntry 4 } + +hostOutPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets, including bad packets, transmitted + by this address since it was added to the hostTable." + ::= { hostEntry 5 } + +hostInOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets transmitted to this address since + it was added to the hostTable (excluding framing + bits but including FCS octets), except for those + octets in bad packets." + ::= { hostEntry 6 } + +hostOutOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets transmitted by this address since + it was added to the hostTable (excluding framing + bits but including FCS octets), including those + octets in bad packets." + ::= { hostEntry 7 } + +hostOutErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of bad packets transmitted by this address + since this host was added to the hostTable." + ::= { hostEntry 8 } + +hostOutBroadcastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted by this + address that were directed to the broadcast address + since this host was added to the hostTable." + ::= { hostEntry 9 } + +hostOutMulticastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted by this + address that were directed to a multicast address + since this host was added to the hostTable. + Note that this number does not include packets + directed to the broadcast address." + ::= { hostEntry 10 } + +-- host Time Table + +hostTimeTable OBJECT-TYPE + SYNTAX SEQUENCE OF HostTimeEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of time-ordered host table entries." + ::= { hosts 3 } + +hostTimeEntry OBJECT-TYPE + SYNTAX HostTimeEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A collection of statistics for a particular host that has + been discovered on an interface of this device. This + collection includes the relative ordering of the creation + time of this object. For example, an instance of the + hostTimeOutBroadcastPkts object might be named + hostTimeOutBroadcastPkts.1.687" + INDEX { hostTimeIndex, hostTimeCreationOrder } + ::= { hostTimeTable 1 } + +HostTimeEntry ::= SEQUENCE { + hostTimeAddress OCTET STRING, + hostTimeCreationOrder Integer32, + hostTimeIndex Integer32, + hostTimeInPkts Counter32, + hostTimeOutPkts Counter32, + hostTimeInOctets Counter32, + hostTimeOutOctets Counter32, + hostTimeOutErrors Counter32, + hostTimeOutBroadcastPkts Counter32, + hostTimeOutMulticastPkts Counter32 +} + +hostTimeAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The physical address of this host." + ::= { hostTimeEntry 1 } + +hostTimeCreationOrder OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in + the hostTime table among those entries associated + with the same hostControlEntry. This index shall + be between 1 and N, where N is the value of + the associated hostControlTableSize. The ordering + of the indexes is based on the order of each entry's + insertion into the table, in which entries added earlier + have a lower index value than entries added later. + Thus the management station has the ability to + learn of new entries added to this table without + downloading the entire table. + + It is important to note that the index for a + particular entry may change as an (earlier) entry + is deleted from the table. Because this order may + change, management stations should make use of the + hostControlLastDeleteTime variable in the + hostControlEntry associated with the relevant + portion of the hostTimeTable. By observing + this variable, the management station may detect + the circumstances where a download of the table + may have missed entries, and where a previous + association between a value of hostTimeCreationOrder + and a hostTimeEntry may no longer hold." + ::= { hostTimeEntry 2 } + +hostTimeIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The set of collected host statistics of which + this entry is a part. The set of hosts + identified by a particular value of this + index is associated with the hostControlEntry + as identified by the same value of hostControlIndex." + ::= { hostTimeEntry 3 } + +hostTimeInPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted to this + address since it was added to the hostTimeTable." + ::= { hostTimeEntry 4 } + +hostTimeOutPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets, including bad packets, transmitted + by this address since it was added to the hostTimeTable." + ::= { hostTimeEntry 5 } + +hostTimeInOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets transmitted to this address since + it was added to the hostTimeTable (excluding framing + bits but including FCS octets), except for those + octets in bad packets." + ::= { hostTimeEntry 6 } + +hostTimeOutOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets transmitted by this address since + it was added to the hostTimeTable (excluding framing + bits but including FCS octets), including those + octets in bad packets." + ::= { hostTimeEntry 7 } + +hostTimeOutErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of bad packets transmitted by this address + since this host was added to the hostTimeTable." + ::= { hostTimeEntry 8 } + +hostTimeOutBroadcastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted by this + address that were directed to the broadcast address + since this host was added to the hostTimeTable." + ::= { hostTimeEntry 9 } + +hostTimeOutMulticastPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of good packets transmitted by this + address that were directed to a multicast address + since this host was added to the hostTimeTable. + Note that this number does not include packets directed + to the broadcast address." + ::= { hostTimeEntry 10 } + +-- The Host Top "N" Group + +-- Implementation of the Host Top N group is optional. The Host Top N +-- group requires the implementation of the host group. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Host Top N group is used to prepare reports that describe +-- the hosts that top a list ordered by one of their statistics. +-- The available statistics are samples of one of their +-- base statistics, over an interval specified by the management +-- station. Thus, these statistics are rate based. The management +-- station also selects how many such hosts are reported. + +-- The hostTopNControlTable is used to initiate the generation of +-- such a report. The management station may select the parameters +-- of such a report, such as which interface, which statistic, +-- how many hosts, and the start and stop times of the sampling. +-- When the report is prepared, entries are created in the +-- hostTopNTable associated with the relevant hostTopNControlEntry. +-- These entries are static for each report after it has been +-- prepared. + +hostTopNControlTable OBJECT-TYPE + SYNTAX SEQUENCE OF HostTopNControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of top N host control entries." + ::= { hostTopN 1 } + +hostTopNControlEntry OBJECT-TYPE + SYNTAX HostTopNControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of parameters that control the creation of a report + of the top N hosts according to several metrics. For + example, an instance of the hostTopNDuration object might + be named hostTopNDuration.3" + INDEX { hostTopNControlIndex } + ::= { hostTopNControlTable 1 } + +HostTopNControlEntry ::= SEQUENCE { + hostTopNControlIndex Integer32, + hostTopNHostIndex Integer32, + hostTopNRateBase INTEGER, + hostTopNTimeRemaining Integer32, + hostTopNDuration Integer32, + hostTopNRequestedSize Integer32, + hostTopNGrantedSize Integer32, + hostTopNStartTime TimeTicks, + hostTopNOwner OwnerString, + hostTopNStatus EntryStatus +} + +hostTopNControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry + in the hostTopNControl table. Each such + entry defines one top N report prepared for + one interface." + ::= { hostTopNControlEntry 1 } + +hostTopNHostIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The host table for which a top N report will be prepared + on behalf of this entry. The host table identified by a + particular value of this index is associated with the same + host table as identified by the same value of + hostIndex. + + This object may not be modified if the associated + hostTopNStatus object is equal to valid(1)." + ::= { hostTopNControlEntry 2 } + +hostTopNRateBase OBJECT-TYPE + SYNTAX INTEGER { + hostTopNInPkts(1), + hostTopNOutPkts(2), + hostTopNInOctets(3), + hostTopNOutOctets(4), + hostTopNOutErrors(5), + hostTopNOutBroadcastPkts(6), + hostTopNOutMulticastPkts(7) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The variable for each host that the hostTopNRate + variable is based upon. + + This object may not be modified if the associated + hostTopNStatus object is equal to valid(1)." + ::= { hostTopNControlEntry 3 } + +hostTopNTimeRemaining OBJECT-TYPE + SYNTAX Integer32 + UNITS "Seconds" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The number of seconds left in the report currently being + collected. When this object is modified by the management + station, a new collection is started, possibly aborting + a currently running report. The new value is used + as the requested duration of this report, which is + loaded into the associated hostTopNDuration object. + + When this object is set to a non-zero value, any + associated hostTopNEntries shall be made + inaccessible by the monitor. While the value of this + object is non-zero, it decrements by one per second until + it reaches zero. During this time, all associated + hostTopNEntries shall remain inaccessible. At the time + that this object decrements to zero, the report is made + accessible in the hostTopNTable. Thus, the hostTopN + table needs to be created only at the end of the collection + interval." + DEFVAL { 0 } + ::= { hostTopNControlEntry 4 } + +hostTopNDuration OBJECT-TYPE + SYNTAX Integer32 + UNITS "Seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of seconds that this report has collected + during the last sampling interval, or if this + report is currently being collected, the number + of seconds that this report is being collected + during this sampling interval. + + When the associated hostTopNTimeRemaining object is set, + this object shall be set by the probe to the same value + and shall not be modified until the next time + the hostTopNTimeRemaining is set. + + This value shall be zero if no reports have been + requested for this hostTopNControlEntry." + DEFVAL { 0 } + ::= { hostTopNControlEntry 5 } + +hostTopNRequestedSize OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The maximum number of hosts requested for the top N + table. + + When this object is created or modified, the probe + should set hostTopNGrantedSize as closely to this + object as is possible for the particular probe + implementation and available resources." + DEFVAL { 10 } + ::= { hostTopNControlEntry 6 } + +hostTopNGrantedSize OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum number of hosts in the top N table. + + When the associated hostTopNRequestedSize object is + created or modified, the probe should set this + object as closely to the requested value as is possible + for the particular implementation and available + resources. The probe must not lower this value except + as a result of a set to the associated + hostTopNRequestedSize object. + + Hosts with the highest value of hostTopNRate shall be + placed in this table in decreasing order of this rate + until there is no more room or until there are no more + hosts." + ::= { hostTopNControlEntry 7 } + +hostTopNStartTime OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime when this top N report was + last started. In other words, this is the time that + the associated hostTopNTimeRemaining object was + modified to start the requested report." + ::= { hostTopNControlEntry 8 } + +hostTopNOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { hostTopNControlEntry 9 } + +hostTopNStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this hostTopNControl entry. + + If this object is not equal to valid(1), all associated + hostTopNEntries shall be deleted by the agent." + ::= { hostTopNControlEntry 10 } + +hostTopNTable OBJECT-TYPE + SYNTAX SEQUENCE OF HostTopNEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of top N host entries." + ::= { hostTopN 2 } + +hostTopNEntry OBJECT-TYPE + SYNTAX HostTopNEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of statistics for a host that is part of a top N + report. For example, an instance of the hostTopNRate + object might be named hostTopNRate.3.10" + INDEX { hostTopNReport, hostTopNIndex } + ::= { hostTopNTable 1 } + +HostTopNEntry ::= SEQUENCE { + hostTopNReport Integer32, + hostTopNIndex Integer32, + hostTopNAddress OCTET STRING, + hostTopNRate Integer32 +} + +hostTopNReport OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This object identifies the top N report of which + this entry is a part. The set of hosts + identified by a particular value of this + object is part of the same report as identified + by the same value of the hostTopNControlIndex object." + ::= { hostTopNEntry 1 } + +hostTopNIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in + the hostTopN table among those in the same report. + This index is between 1 and N, where N is the + number of entries in this table. Increasing values + of hostTopNIndex shall be assigned to entries with + decreasing values of hostTopNRate until index N + is assigned to the entry with the lowest value of + hostTopNRate or there are no more hostTopNEntries." + ::= { hostTopNEntry 2 } + +hostTopNAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The physical address of this host." + ::= { hostTopNEntry 3 } + +hostTopNRate OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The amount of change in the selected variable + during this sampling interval. The selected + variable is this host's instance of the object + selected by hostTopNRateBase." + ::= { hostTopNEntry 4 } + +-- The Matrix Group + +-- Implementation of the Matrix group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Matrix group consists of the matrixControlTable, matrixSDTable +-- and the matrixDSTable. These tables store statistics for a +-- particular conversation between two addresses. As the device +-- detects a new conversation, including those to a non-unicast +-- address, it creates a new entry in both of the matrix tables. +-- It must only create new entries based on information +-- received in good packets. If the monitoring device finds +-- itself short of resources, it may delete entries as needed. +-- It is suggested that the device delete the least recently used +-- entries first. + +matrixControlTable OBJECT-TYPE + SYNTAX SEQUENCE OF MatrixControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information entries for the + traffic matrix on each interface." + ::= { matrix 1 } + +matrixControlEntry OBJECT-TYPE + SYNTAX MatrixControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Information about a traffic matrix on a particular + interface. For example, an instance of the + matrixControlLastDeleteTime object might be named + matrixControlLastDeleteTime.1" + INDEX { matrixControlIndex } + ::= { matrixControlTable 1 } + +MatrixControlEntry ::= SEQUENCE { + matrixControlIndex Integer32, + matrixControlDataSource OBJECT IDENTIFIER, + matrixControlTableSize Integer32, + matrixControlLastDeleteTime TimeTicks, + matrixControlOwner OwnerString, + matrixControlStatus EntryStatus +} + +matrixControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the + matrixControl table. Each such entry defines + a function that discovers conversations on a particular + interface and places statistics about them in the + matrixSDTable and the matrixDSTable on behalf of this + matrixControlEntry." + ::= { matrixControlEntry 1 } + +matrixControlDataSource OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object identifies the source of + the data from which this entry creates a traffic matrix. + This source can be any interface on this device. In + order to identify a particular interface, this object + shall identify the instance of the ifIndex object, + defined in RFC 2233 [17], for the desired + interface. For example, if an entry were to receive data + from interface #1, this object would be set to ifIndex.1. + + The statistics in this group reflect all packets + on the local network segment attached to the identified + interface. + + An agent may or may not be able to tell if fundamental + changes to the media of the interface have occurred and + necessitate an invalidation of this entry. For example, a + hot-pluggable ethernet card could be pulled out and replaced + by a token-ring card. In such a case, if the agent has such + knowledge of the change, it is recommended that it + invalidate this entry. + + This object may not be modified if the associated + matrixControlStatus object is equal to valid(1)." + ::= { matrixControlEntry 2 } + +matrixControlTableSize OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of matrixSDEntries in the matrixSDTable + for this interface. This must also be the value of + the number of entries in the matrixDSTable for this + interface." + ::= { matrixControlEntry 3 } + +matrixControlLastDeleteTime OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime when the last entry + was deleted from the portion of the matrixSDTable + or matrixDSTable associated with this matrixControlEntry. + If no deletions have occurred, this value shall be + zero." + ::= { matrixControlEntry 4 } + +matrixControlOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { matrixControlEntry 5 } + +matrixControlStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this matrixControl entry. + If this object is not equal to valid(1), all associated + entries in the matrixSDTable and the matrixDSTable + shall be deleted by the agent." + ::= { matrixControlEntry 6 } + +matrixSDTable OBJECT-TYPE + SYNTAX SEQUENCE OF MatrixSDEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of traffic matrix entries indexed by + source and destination MAC address." + ::= { matrix 2 } + +matrixSDEntry OBJECT-TYPE + SYNTAX MatrixSDEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A collection of statistics for communications between + two addresses on a particular interface. For example, + an instance of the matrixSDPkts object might be named + matrixSDPkts.1.6.8.0.32.27.3.176.6.8.0.32.10.8.113" + INDEX { matrixSDIndex, + matrixSDSourceAddress, matrixSDDestAddress } + ::= { matrixSDTable 1 } + +MatrixSDEntry ::= SEQUENCE { + matrixSDSourceAddress OCTET STRING, + matrixSDDestAddress OCTET STRING, + matrixSDIndex Integer32, + matrixSDPkts Counter32, + matrixSDOctets Counter32, + matrixSDErrors Counter32 +} + +matrixSDSourceAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The source physical address." + ::= { matrixSDEntry 1 } + +matrixSDDestAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The destination physical address." + ::= { matrixSDEntry 2 } + +matrixSDIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The set of collected matrix statistics of which + this entry is a part. The set of matrix statistics + identified by a particular value of this index + is associated with the same matrixControlEntry + as identified by the same value of matrixControlIndex." + ::= { matrixSDEntry 3 } + +matrixSDPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets transmitted from the source + address to the destination address (this number includes + bad packets)." + ::= { matrixSDEntry 4 } + +matrixSDOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets (excluding framing bits but + including FCS octets) contained in all packets + transmitted from the source address to the + destination address." + ::= { matrixSDEntry 5 } + +matrixSDErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of bad packets transmitted from + the source address to the destination address." + ::= { matrixSDEntry 6 } + +-- Traffic matrix tables from destination to source + +matrixDSTable OBJECT-TYPE + SYNTAX SEQUENCE OF MatrixDSEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of traffic matrix entries indexed by + destination and source MAC address." + ::= { matrix 3 } + +matrixDSEntry OBJECT-TYPE + SYNTAX MatrixDSEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A collection of statistics for communications between + two addresses on a particular interface. For example, + an instance of the matrixSDPkts object might be named + matrixSDPkts.1.6.8.0.32.10.8.113.6.8.0.32.27.3.176" + INDEX { matrixDSIndex, + matrixDSDestAddress, matrixDSSourceAddress } + ::= { matrixDSTable 1 } + +MatrixDSEntry ::= SEQUENCE { + matrixDSSourceAddress OCTET STRING, + matrixDSDestAddress OCTET STRING, + matrixDSIndex Integer32, + matrixDSPkts Counter32, + matrixDSOctets Counter32, + matrixDSErrors Counter32 +} + +matrixDSSourceAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The source physical address." + ::= { matrixDSEntry 1 } + +matrixDSDestAddress OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The destination physical address." + ::= { matrixDSEntry 2 } + +matrixDSIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The set of collected matrix statistics of which + this entry is a part. The set of matrix statistics + identified by a particular value of this index + is associated with the same matrixControlEntry + as identified by the same value of matrixControlIndex." + ::= { matrixDSEntry 3 } + +matrixDSPkts OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets transmitted from the source + address to the destination address (this number includes + bad packets)." + ::= { matrixDSEntry 4 } + +matrixDSOctets OBJECT-TYPE + SYNTAX Counter32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of octets (excluding framing bits + but including FCS octets) contained in all packets + transmitted from the source address to the + destination address." + ::= { matrixDSEntry 5 } + +matrixDSErrors OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of bad packets transmitted from + the source address to the destination address." + ::= { matrixDSEntry 6 } + +-- The Filter Group + +-- Implementation of the Filter group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Filter group allows packets to be captured with an +-- arbitrary filter expression. A logical data and +-- event stream or "channel" is formed by the packets +-- that match the filter expression. +-- +-- This filter mechanism allows the creation of an arbitrary +-- logical expression with which to filter packets. Each +-- filter associated with a channel is OR'ed with the others. +-- Within a filter, any bits checked in the data and status are +-- AND'ed with respect to other bits in the same filter. The +-- NotMask also allows for checking for inequality. Finally, +-- the channelAcceptType object allows for inversion of the +-- whole equation. +-- +-- If a management station wishes to receive a trap to alert it +-- that new packets have been captured and are available for +-- download, it is recommended that it set up an alarm entry that +-- monitors the value of the relevant channelMatches instance. +-- +-- The channel can be turned on or off, and can also +-- generate events when packets pass through it. + +filterTable OBJECT-TYPE + SYNTAX SEQUENCE OF FilterEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of packet filter entries." + ::= { filter 1 } + +filterEntry OBJECT-TYPE + SYNTAX FilterEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of parameters for a packet filter applied on a + particular interface. As an example, an instance of the + filterPktData object might be named filterPktData.12" + INDEX { filterIndex } + ::= { filterTable 1 } + +FilterEntry ::= SEQUENCE { + filterIndex Integer32, + filterChannelIndex Integer32, + filterPktDataOffset Integer32, + filterPktData OCTET STRING, + filterPktDataMask OCTET STRING, + filterPktDataNotMask OCTET STRING, + filterPktStatus Integer32, + filterPktStatusMask Integer32, + filterPktStatusNotMask Integer32, + filterOwner OwnerString, + filterStatus EntryStatus +} + +filterIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry + in the filter table. Each such entry defines + one filter that is to be applied to every packet + received on an interface." + ::= { filterEntry 1 } + +filterChannelIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object identifies the channel of which this filter + is a part. The filters identified by a particular value + of this object are associated with the same channel as + identified by the same value of the channelIndex object." + ::= { filterEntry 2 } + +filterPktDataOffset OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The offset from the beginning of each packet where + a match of packet data will be attempted. This offset + is measured from the point in the physical layer + packet after the framing bits, if any. For example, + in an Ethernet frame, this point is at the beginning of + the destination MAC address. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + DEFVAL { 0 } + + ::= { filterEntry 3 } + +filterPktData OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The data that is to be matched with the input packet. + For each packet received, this filter and the accompanying + filterPktDataMask and filterPktDataNotMask will be + adjusted for the offset. The only bits relevant to this + match algorithm are those that have the corresponding + filterPktDataMask bit equal to one. The following three + rules are then applied to every packet: + + (1) If the packet is too short and does not have data + corresponding to part of the filterPktData, the packet + will fail this data match. + + (2) For each relevant bit from the packet with the + corresponding filterPktDataNotMask bit set to zero, if + the bit from the packet is not equal to the corresponding + bit from the filterPktData, then the packet will fail + this data match. + + (3) If for every relevant bit from the packet with the + corresponding filterPktDataNotMask bit set to one, the + bit from the packet is equal to the corresponding bit + from the filterPktData, then the packet will fail this + data match. + + Any packets that have not failed any of the three matches + above have passed this data match. In particular, a zero + length filter will match any packet. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 4 } + +filterPktDataMask OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The mask that is applied to the match process. + After adjusting this mask for the offset, only those + bits in the received packet that correspond to bits set + in this mask are relevant for further processing by the + match algorithm. The offset is applied to filterPktDataMask + in the same way it is applied to the filter. For the + purposes of the matching algorithm, if the associated + filterPktData object is longer than this mask, this mask is + conceptually extended with '1' bits until it reaches the + length of the filterPktData object. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 5 } + +filterPktDataNotMask OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The inversion mask that is applied to the match + process. After adjusting this mask for the offset, + those relevant bits in the received packet that correspond + to bits cleared in this mask must all be equal to their + corresponding bits in the filterPktData object for the packet + to be accepted. In addition, at least one of those relevant + bits in the received packet that correspond to bits set in + this mask must be different to its corresponding bit in the + filterPktData object. + + For the purposes of the matching algorithm, if the associated + filterPktData object is longer than this mask, this mask is + conceptually extended with '0' bits until it reaches the + length of the filterPktData object. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 6 } + +filterPktStatus OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status that is to be matched with the input packet. + The only bits relevant to this match algorithm are those that + have the corresponding filterPktStatusMask bit equal to one. + The following two rules are then applied to every packet: + + (1) For each relevant bit from the packet status with the + corresponding filterPktStatusNotMask bit set to zero, if + the bit from the packet status is not equal to the + corresponding bit from the filterPktStatus, then the + packet will fail this status match. + + (2) If for every relevant bit from the packet status with the + corresponding filterPktStatusNotMask bit set to one, the + bit from the packet status is equal to the corresponding + bit from the filterPktStatus, then the packet will fail + this status match. + + Any packets that have not failed either of the two matches + above have passed this status match. In particular, a zero + length status filter will match any packet's status. + + The value of the packet status is a sum. This sum + initially takes the value zero. Then, for each + error, E, that has been discovered in this packet, + 2 raised to a value representing E is added to the sum. + The errors and the bits that represent them are dependent + on the media type of the interface that this channel + is receiving packets from. + + The errors defined for a packet captured off of an + Ethernet interface are as follows: + + bit # Error + 0 Packet is longer than 1518 octets + 1 Packet is shorter than 64 octets + 2 Packet experienced a CRC or Alignment error + + For example, an Ethernet fragment would have a + value of 6 (2^1 + 2^2). + + As this MIB is expanded to new media types, this object + will have other media-specific errors defined. + + For the purposes of this status matching algorithm, if the + packet status is longer than this filterPktStatus object, + this object is conceptually extended with '0' bits until it + reaches the size of the packet status. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 7 } + +filterPktStatusMask OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The mask that is applied to the status match process. + Only those bits in the received packet that correspond to + bits set in this mask are relevant for further processing + by the status match algorithm. For the purposes + of the matching algorithm, if the associated filterPktStatus + object is longer than this mask, this mask is conceptually + extended with '1' bits until it reaches the size of the + filterPktStatus. In addition, if a packet status is longer + than this mask, this mask is conceptually extended with '0' + bits until it reaches the size of the packet status. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 8 } + +filterPktStatusNotMask OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The inversion mask that is applied to the status match + process. Those relevant bits in the received packet status + that correspond to bits cleared in this mask must all be + equal to their corresponding bits in the filterPktStatus + object for the packet to be accepted. In addition, at least + one of those relevant bits in the received packet status + that correspond to bits set in this mask must be different + to its corresponding bit in the filterPktStatus object for + the packet to be accepted. + + For the purposes of the matching algorithm, if the associated + filterPktStatus object or a packet status is longer than this + mask, this mask is conceptually extended with '0' bits until + it reaches the longer of the lengths of the filterPktStatus + object and the packet status. + + This object may not be modified if the associated + filterStatus object is equal to valid(1)." + ::= { filterEntry 9 } + +filterOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { filterEntry 10 } + +filterStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this filter entry." + ::= { filterEntry 11 } + +channelTable OBJECT-TYPE + SYNTAX SEQUENCE OF ChannelEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of packet channel entries." + ::= { filter 2 } + +channelEntry OBJECT-TYPE + SYNTAX ChannelEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of parameters for a packet channel applied on a + particular interface. As an example, an instance of the + channelMatches object might be named channelMatches.3" + INDEX { channelIndex } + ::= { channelTable 1 } + +ChannelEntry ::= SEQUENCE { + channelIndex Integer32, + channelIfIndex Integer32, + channelAcceptType INTEGER, + channelDataControl INTEGER, + channelTurnOnEventIndex Integer32, + channelTurnOffEventIndex Integer32, + channelEventIndex Integer32, + channelEventStatus INTEGER, + channelMatches Counter32, + channelDescription DisplayString, + channelOwner OwnerString, + channelStatus EntryStatus +} + +channelIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the channel + table. Each such entry defines one channel, a logical + data and event stream. + + It is suggested that before creating a channel, an + application should scan all instances of the + filterChannelIndex object to make sure that there are no + pre-existing filters that would be inadvertently be linked + to the channel." + ::= { channelEntry 1 } + +channelIfIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The value of this object uniquely identifies the + interface on this remote network monitoring device to which + the associated filters are applied to allow data into this + channel. The interface identified by a particular value + of this object is the same interface as identified by the + same value of the ifIndex object, defined in RFC 2233 [17]. + + The filters in this group are applied to all packets on + the local network segment attached to the identified + interface. + + An agent may or may not be able to tell if fundamental + changes to the media of the interface have occurred and + necessitate an invalidation of this entry. For example, a + hot-pluggable ethernet card could be pulled out and replaced + by a token-ring card. In such a case, if the agent has such + knowledge of the change, it is recommended that it + invalidate this entry. + + This object may not be modified if the associated + channelStatus object is equal to valid(1)." + ::= { channelEntry 2 } + +channelAcceptType OBJECT-TYPE + SYNTAX INTEGER { + acceptMatched(1), + acceptFailed(2) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object controls the action of the filters + associated with this channel. If this object is equal + to acceptMatched(1), packets will be accepted to this + channel if they are accepted by both the packet data and + packet status matches of an associated filter. If + this object is equal to acceptFailed(2), packets will + be accepted to this channel only if they fail either + the packet data match or the packet status match of + each of the associated filters. + + In particular, a channel with no associated filters will + match no packets if set to acceptMatched(1) case and will + match all packets in the acceptFailed(2) case. + + This object may not be modified if the associated + channelStatus object is equal to valid(1)." + ::= { channelEntry 3 } + +channelDataControl OBJECT-TYPE + SYNTAX INTEGER { + on(1), + off(2) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object controls the flow of data through this channel. + If this object is on(1), data, status and events flow + through this channel. If this object is off(2), data, + status and events will not flow through this channel." + DEFVAL { off } + ::= { channelEntry 4 } + +channelTurnOnEventIndex OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The value of this object identifies the event + that is configured to turn the associated + channelDataControl from off to on when the event is + generated. The event identified by a particular value + of this object is the same event as identified by the + same value of the eventIndex object. If there is no + corresponding entry in the eventTable, then no + association exists. In fact, if no event is intended + for this channel, channelTurnOnEventIndex must be + set to zero, a non-existent event index. + + This object may not be modified if the associated + channelStatus object is equal to valid(1)." + ::= { channelEntry 5 } + +channelTurnOffEventIndex OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The value of this object identifies the event + that is configured to turn the associated + channelDataControl from on to off when the event is + generated. The event identified by a particular value + of this object is the same event as identified by the + same value of the eventIndex object. If there is no + corresponding entry in the eventTable, then no + association exists. In fact, if no event is intended + for this channel, channelTurnOffEventIndex must be + set to zero, a non-existent event index. + + This object may not be modified if the associated + channelStatus object is equal to valid(1)." + ::= { channelEntry 6 } + +channelEventIndex OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The value of this object identifies the event + that is configured to be generated when the + associated channelDataControl is on and a packet + is matched. The event identified by a particular value + of this object is the same event as identified by the + same value of the eventIndex object. If there is no + corresponding entry in the eventTable, then no + association exists. In fact, if no event is intended + for this channel, channelEventIndex must be + set to zero, a non-existent event index. + + This object may not be modified if the associated + channelStatus object is equal to valid(1)." + ::= { channelEntry 7 } + +channelEventStatus OBJECT-TYPE + SYNTAX INTEGER { + eventReady(1), + eventFired(2), + eventAlwaysReady(3) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The event status of this channel. + + If this channel is configured to generate events + when packets are matched, a means of controlling + the flow of those events is often needed. When + this object is equal to eventReady(1), a single + event may be generated, after which this object + will be set by the probe to eventFired(2). While + in the eventFired(2) state, no events will be + generated until the object is modified to + eventReady(1) (or eventAlwaysReady(3)). The + management station can thus easily respond to a + notification of an event by re-enabling this object. + + If the management station wishes to disable this + flow control and allow events to be generated + at will, this object may be set to + eventAlwaysReady(3). Disabling the flow control + is discouraged as it can result in high network + traffic or other performance problems." + DEFVAL { eventReady } + ::= { channelEntry 8 } + +channelMatches OBJECT-TYPE + SYNTAX Counter32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times this channel has matched a packet. + Note that this object is updated even when + channelDataControl is set to off." + ::= { channelEntry 9 } + +channelDescription OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..127)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "A comment describing this channel." + ::= { channelEntry 10 } + +channelOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { channelEntry 11 } + +channelStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this channel entry." + ::= { channelEntry 12 } + +-- The Packet Capture Group + +-- Implementation of the Packet Capture group is optional. The Packet +-- Capture Group requires implementation of the Filter Group. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Packet Capture group allows packets to be captured +-- upon a filter match. The bufferControlTable controls +-- the captured packets output from a channel that is +-- associated with it. The captured packets are placed +-- in entries in the captureBufferTable. These entries are +-- associated with the bufferControlEntry on whose behalf they +-- were stored. + +bufferControlTable OBJECT-TYPE + SYNTAX SEQUENCE OF BufferControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of buffers control entries." + ::= { capture 1 } + +bufferControlEntry OBJECT-TYPE + SYNTAX BufferControlEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of parameters that control the collection of a stream + of packets that have matched filters. As an example, an + instance of the bufferControlCaptureSliceSize object might + be named bufferControlCaptureSliceSize.3" + + INDEX { bufferControlIndex } + ::= { bufferControlTable 1 } + +BufferControlEntry ::= SEQUENCE { + bufferControlIndex Integer32, + bufferControlChannelIndex Integer32, + bufferControlFullStatus INTEGER, + bufferControlFullAction INTEGER, + bufferControlCaptureSliceSize Integer32, + bufferControlDownloadSliceSize Integer32, + bufferControlDownloadOffset Integer32, + bufferControlMaxOctetsRequested Integer32, + bufferControlMaxOctetsGranted Integer32, + bufferControlCapturedPackets Integer32, + bufferControlTurnOnTime TimeTicks, + bufferControlOwner OwnerString, + bufferControlStatus EntryStatus +} + +bufferControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry + in the bufferControl table. The value of this + index shall never be zero. Each such + entry defines one set of packets that is + captured and controlled by one or more filters." + ::= { bufferControlEntry 1 } + +bufferControlChannelIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "An index that identifies the channel that is the + source of packets for this bufferControl table. + The channel identified by a particular value of this + index is the same as identified by the same value of + the channelIndex object. + + This object may not be modified if the associated + bufferControlStatus object is equal to valid(1)." + ::= { bufferControlEntry 2 } + +bufferControlFullStatus OBJECT-TYPE + SYNTAX INTEGER { + spaceAvailable(1), + full(2) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This object shows whether the buffer has room to + accept new packets or if it is full. + + If the status is spaceAvailable(1), the buffer is + accepting new packets normally. If the status is + full(2) and the associated bufferControlFullAction + object is wrapWhenFull, the buffer is accepting new + packets by deleting enough of the oldest packets + to make room for new ones as they arrive. Otherwise, + if the status is full(2) and the + bufferControlFullAction object is lockWhenFull, + then the buffer has stopped collecting packets. + + When this object is set to full(2) the probe must + not later set it to spaceAvailable(1) except in the + case of a significant gain in resources such as + an increase of bufferControlOctetsGranted. In + particular, the wrap-mode action of deleting old + packets to make room for newly arrived packets + must not affect the value of this object." + ::= { bufferControlEntry 3 } + +bufferControlFullAction OBJECT-TYPE + SYNTAX INTEGER { + lockWhenFull(1), + wrapWhenFull(2) -- FIFO + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "Controls the action of the buffer when it + reaches the full status. When in the lockWhenFull(1) + state and a packet is added to the buffer that + fills the buffer, the bufferControlFullStatus will + be set to full(2) and this buffer will stop capturing + packets." + ::= { bufferControlEntry 4 } + +bufferControlCaptureSliceSize OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The maximum number of octets of each packet + that will be saved in this capture buffer. + For example, if a 1500 octet packet is received by + the probe and this object is set to 500, then only + 500 octets of the packet will be stored in the + associated capture buffer. If this variable is set + to 0, the capture buffer will save as many octets + as is possible. + + This object may not be modified if the associated + bufferControlStatus object is equal to valid(1)." + DEFVAL { 100 } + ::= { bufferControlEntry 5 } + +bufferControlDownloadSliceSize OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The maximum number of octets of each packet + in this capture buffer that will be returned in + an SNMP retrieval of that packet. For example, + if 500 octets of a packet have been stored in the + associated capture buffer, the associated + bufferControlDownloadOffset is 0, and this + object is set to 100, then the captureBufferPacket + object that contains the packet will contain only + the first 100 octets of the packet. + + A prudent manager will take into account possible + interoperability or fragmentation problems that may + occur if the download slice size is set too large. + In particular, conformant SNMP implementations are not + required to accept messages whose length exceeds 484 + octets, although they are encouraged to support larger + datagrams whenever feasible." + DEFVAL { 100 } + ::= { bufferControlEntry 6 } + +bufferControlDownloadOffset OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The offset of the first octet of each packet + in this capture buffer that will be returned in + an SNMP retrieval of that packet. For example, + if 500 octets of a packet have been stored in the + associated capture buffer and this object is set to + 100, then the captureBufferPacket object that + contains the packet will contain bytes starting + 100 octets into the packet." + DEFVAL { 0 } + ::= { bufferControlEntry 7 } + +bufferControlMaxOctetsRequested OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The requested maximum number of octets to be + saved in this captureBuffer, including any + implementation-specific overhead. If this variable + is set to -1, the capture buffer will save as many + octets as is possible. + + When this object is created or modified, the probe + should set bufferControlMaxOctetsGranted as closely + to this object as is possible for the particular probe + implementation and available resources. However, if + the object has the special value of -1, the probe + must set bufferControlMaxOctetsGranted to -1." + DEFVAL { -1 } + ::= { bufferControlEntry 8 } + +bufferControlMaxOctetsGranted OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum number of octets that can be + saved in this captureBuffer, including overhead. + If this variable is -1, the capture buffer will save + as many octets as possible. + + When the bufferControlMaxOctetsRequested object is + created or modified, the probe should set this object + as closely to the requested value as is possible for the + particular probe implementation and available resources. + However, if the request object has the special value + of -1, the probe must set this object to -1. + + The probe must not lower this value except as a result of + a modification to the associated + bufferControlMaxOctetsRequested object. + + When this maximum number of octets is reached + and a new packet is to be added to this + capture buffer and the corresponding + bufferControlFullAction is set to wrapWhenFull(2), + enough of the oldest packets associated with this + capture buffer shall be deleted by the agent so + that the new packet can be added. If the corresponding + bufferControlFullAction is set to lockWhenFull(1), + the new packet shall be discarded. In either case, + the probe must set bufferControlFullStatus to + full(2). + + When the value of this object changes to a value less + than the current value, entries are deleted from + the captureBufferTable associated with this + bufferControlEntry. Enough of the + oldest of these captureBufferEntries shall be + deleted by the agent so that the number of octets + used remains less than or equal to the new value of + this object. + + When the value of this object changes to a value greater + than the current value, the number of associated + captureBufferEntries may be allowed to grow." + ::= { bufferControlEntry 9 } + +bufferControlCapturedPackets OBJECT-TYPE + SYNTAX Integer32 + UNITS "Packets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of packets currently in this captureBuffer." + ::= { bufferControlEntry 10 } + +bufferControlTurnOnTime OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime when this capture buffer was + first turned on." + ::= { bufferControlEntry 11 } + +bufferControlOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it." + ::= { bufferControlEntry 12 } + +bufferControlStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this buffer Control Entry." + ::= { bufferControlEntry 13 } + +captureBufferTable OBJECT-TYPE + SYNTAX SEQUENCE OF CaptureBufferEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of packets captured off of a channel." + ::= { capture 2 } + +captureBufferEntry OBJECT-TYPE + SYNTAX CaptureBufferEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A packet captured off of an attached network. As an + example, an instance of the captureBufferPacketData + object might be named captureBufferPacketData.3.1783" + INDEX { captureBufferControlIndex, captureBufferIndex } + ::= { captureBufferTable 1 } + +CaptureBufferEntry ::= SEQUENCE { + captureBufferControlIndex Integer32, + captureBufferIndex Integer32, + captureBufferPacketID Integer32, + captureBufferPacketData OCTET STRING, + captureBufferPacketLength Integer32, + captureBufferPacketTime Integer32, + captureBufferPacketStatus Integer32 +} + +captureBufferControlIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The index of the bufferControlEntry with which + this packet is associated." + ::= { captureBufferEntry 1 } + +captureBufferIndex OBJECT-TYPE + SYNTAX Integer32 (1..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry + in the captureBuffer table associated with a + particular bufferControlEntry. This index will + start at 1 and increase by one for each new packet + added with the same captureBufferControlIndex. + + Should this value reach 2147483647, the next packet + added with the same captureBufferControlIndex shall + cause this value to wrap around to 1." + ::= { captureBufferEntry 2 } + +captureBufferPacketID OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that describes the order of packets + that are received on a particular interface. + The packetID of a packet captured on an + interface is defined to be greater than the + packetID's of all packets captured previously on + the same interface. As the captureBufferPacketID + object has a maximum positive value of 2^31 - 1, + any captureBufferPacketID object shall have the + value of the associated packet's packetID mod 2^31." + ::= { captureBufferEntry 3 } + +captureBufferPacketData OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The data inside the packet, starting at the beginning + of the packet plus any offset specified in the + associated bufferControlDownloadOffset, including any + link level headers. The length of the data in this object + is the minimum of the length of the captured packet minus + the offset, the length of the associated + bufferControlCaptureSliceSize minus the offset, and the + associated bufferControlDownloadSliceSize. If this minimum + is less than zero, this object shall have a length of zero." + ::= { captureBufferEntry 4 } + +captureBufferPacketLength OBJECT-TYPE + SYNTAX Integer32 + UNITS "Octets" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The actual length (off the wire) of the packet stored + in this entry, including FCS octets." + ::= { captureBufferEntry 5 } + +captureBufferPacketTime OBJECT-TYPE + SYNTAX Integer32 + UNITS "Milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of milliseconds that had passed since + this capture buffer was first turned on when this + packet was captured." + ::= { captureBufferEntry 6 } + +captureBufferPacketStatus OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "A value which indicates the error status of this packet. + + The value of this object is defined in the same way as + filterPktStatus. The value is a sum. This sum + initially takes the value zero. Then, for each + error, E, that has been discovered in this packet, + 2 raised to a value representing E is added to the sum. + + The errors defined for a packet captured off of an + Ethernet interface are as follows: + + bit # Error + 0 Packet is longer than 1518 octets + 1 Packet is shorter than 64 octets + 2 Packet experienced a CRC or Alignment error + 3 First packet in this capture buffer after + it was detected that some packets were + not processed correctly. + 4 Packet's order in buffer is only approximate + (May only be set for packets sent from + the probe) + + For example, an Ethernet fragment would have a + value of 6 (2^1 + 2^2). + + As this MIB is expanded to new media types, this object + will have other media-specific errors defined." + ::= { captureBufferEntry 7 } + +-- The Event Group + +-- Implementation of the Event group is optional. +-- Consult the MODULE-COMPLIANCE macro for the authoritative +-- conformance information for this MIB. +-- +-- The Event group controls the generation and notification +-- of events from this device. Each entry in the eventTable +-- describes the parameters of the event that can be triggered. +-- Each event entry is fired by an associated condition located +-- elsewhere in the MIB. An event entry may also be associated +-- with a function elsewhere in the MIB that will be executed +-- when the event is generated. For example, a channel may +-- be turned on or off by the firing of an event. +-- +-- Each eventEntry may optionally specify that a log entry +-- be created on its behalf whenever the event occurs. +-- Each entry may also specify that notification should +-- occur by way of SNMP trap messages. In this case, the +-- community for the trap message is given in the associated +-- eventCommunity object. The enterprise and specific trap +-- fields of the trap are determined by the condition that +-- triggered the event. Two traps are defined: risingAlarm and +-- fallingAlarm. If the eventTable is triggered by a condition +-- specified elsewhere, the enterprise and specific trap fields +-- must be specified for traps generated for that condition. + +eventTable OBJECT-TYPE + SYNTAX SEQUENCE OF EventEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of events to be generated." + ::= { event 1 } + +eventEntry OBJECT-TYPE + SYNTAX EventEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of parameters that describe an event to be generated + when certain conditions are met. As an example, an instance + of the eventLastTimeSent object might be named + eventLastTimeSent.6" + INDEX { eventIndex } + ::= { eventTable 1 } + +EventEntry ::= SEQUENCE { + eventIndex Integer32, + eventDescription DisplayString, + eventType INTEGER, + eventCommunity OCTET STRING, + eventLastTimeSent TimeTicks, + eventOwner OwnerString, + eventStatus EntryStatus +} + +eventIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry in the + event table. Each such entry defines one event that + is to be generated when the appropriate conditions + occur." + ::= { eventEntry 1 } + +eventDescription OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..127)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "A comment describing this event entry." + ::= { eventEntry 2 } + +eventType OBJECT-TYPE + SYNTAX INTEGER { + none(1), + log(2), + snmptrap(3), -- send an SNMP trap + logandtrap(4) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The type of notification that the probe will make + about this event. In the case of log, an entry is + made in the log table for each event. In the case of + snmp-trap, an SNMP trap is sent to one or more + management stations." + ::= { eventEntry 3 } + +eventCommunity OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..127)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "If an SNMP trap is to be sent, it will be sent to + the SNMP community specified by this octet string." + ::= { eventEntry 4 } + +eventLastTimeSent OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime at the time this event + entry last generated an event. If this entry has + not generated any events, this value will be + zero." + ::= { eventEntry 5 } + +eventOwner OBJECT-TYPE + SYNTAX OwnerString + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The entity that configured this entry and is therefore + using the resources assigned to it. + + If this object contains a string starting with 'monitor' + and has associated entries in the log table, all connected + management stations should retrieve those log entries, + as they may have significance to all management stations + connected to this device" + ::= { eventEntry 6 } + +eventStatus OBJECT-TYPE + SYNTAX EntryStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The status of this event entry. + + If this object is not equal to valid(1), all associated + log entries shall be deleted by the agent." + ::= { eventEntry 7 } + +-- +logTable OBJECT-TYPE + SYNTAX SEQUENCE OF LogEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of events that have been logged." + ::= { event 2 } + +logEntry OBJECT-TYPE + SYNTAX LogEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A set of data describing an event that has been + logged. For example, an instance of the logDescription + object might be named logDescription.6.47" + INDEX { logEventIndex, logIndex } + ::= { logTable 1 } + +LogEntry ::= SEQUENCE { + logEventIndex Integer32, + logIndex Integer32, + logTime TimeTicks, + logDescription DisplayString +} + +logEventIndex OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The event entry that generated this log + entry. The log identified by a particular + value of this index is associated with the same + eventEntry as identified by the same value + of eventIndex." + ::= { logEntry 1 } + +logIndex OBJECT-TYPE + SYNTAX Integer32 (1..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An index that uniquely identifies an entry + in the log table amongst those generated by the + same eventEntries. These indexes are + assigned beginning with 1 and increase by one + with each new log entry. The association + between values of logIndex and logEntries + is fixed for the lifetime of each logEntry. + The agent may choose to delete the oldest + instances of logEntry as required because of + lack of memory. It is an implementation-specific + matter as to when this deletion may occur." + ::= { logEntry 2 } + +logTime OBJECT-TYPE + SYNTAX TimeTicks + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of sysUpTime when this log entry was created." + ::= { logEntry 3 } + +logDescription OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An implementation dependent description of the + event that activated this log entry." + ::= { logEntry 4 } + +-- Remote Network Monitoring Traps + +rmonEventsV2 OBJECT-IDENTITY + STATUS current + DESCRIPTION "Definition point for RMON notifications." + ::= { rmon 0 } + +risingAlarm NOTIFICATION-TYPE + OBJECTS { alarmIndex, alarmVariable, alarmSampleType, + alarmValue, alarmRisingThreshold } + STATUS current + DESCRIPTION + "The SNMP trap that is generated when an alarm + entry crosses its rising threshold and generates + an event that is configured for sending SNMP + traps." + ::= { rmonEventsV2 1 } + +fallingAlarm NOTIFICATION-TYPE + OBJECTS { alarmIndex, alarmVariable, alarmSampleType, + alarmValue, alarmFallingThreshold } + STATUS current + DESCRIPTION + "The SNMP trap that is generated when an alarm + entry crosses its falling threshold and generates + an event that is configured for sending SNMP + traps." + ::= { rmonEventsV2 2 } + +-- Conformance information + +rmonCompliances OBJECT IDENTIFIER ::= { rmonConformance 9 } +rmonGroups OBJECT IDENTIFIER ::= { rmonConformance 10 } + +-- Compliance Statements +rmonCompliance MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The requirements for conformance to the RMON MIB. At least + one of the groups in this module must be implemented to + conform to the RMON MIB. Implementations of this MIB + must also implement the system group of MIB-II [16] and the + IF-MIB [17]." + MODULE -- this module + + GROUP rmonEtherStatsGroup + DESCRIPTION + "The RMON Ethernet Statistics Group is optional." + + GROUP rmonHistoryControlGroup + DESCRIPTION + "The RMON History Control Group is optional." + + GROUP rmonEthernetHistoryGroup + DESCRIPTION + "The RMON Ethernet History Group is optional." + + GROUP rmonAlarmGroup + DESCRIPTION + "The RMON Alarm Group is optional." + + GROUP rmonHostGroup + DESCRIPTION + "The RMON Host Group is mandatory when the + rmonHostTopNGroup is implemented." + + GROUP rmonHostTopNGroup + DESCRIPTION + "The RMON Host Top N Group is optional." + + GROUP rmonMatrixGroup + DESCRIPTION + "The RMON Matrix Group is optional." + + GROUP rmonFilterGroup + DESCRIPTION + "The RMON Filter Group is mandatory when the + rmonPacketCaptureGroup is implemented." + + GROUP rmonPacketCaptureGroup + DESCRIPTION + "The RMON Packet Capture Group is optional." + + GROUP rmonEventGroup + DESCRIPTION + "The RMON Event Group is mandatory when the + rmonAlarmGroup is implemented." + ::= { rmonCompliances 1 } + + rmonEtherStatsGroup OBJECT-GROUP + OBJECTS { + etherStatsIndex, etherStatsDataSource, + etherStatsDropEvents, etherStatsOctets, etherStatsPkts, + etherStatsBroadcastPkts, etherStatsMulticastPkts, + etherStatsCRCAlignErrors, etherStatsUndersizePkts, + etherStatsOversizePkts, etherStatsFragments, + etherStatsJabbers, etherStatsCollisions, + etherStatsPkts64Octets, etherStatsPkts65to127Octets, + etherStatsPkts128to255Octets, + etherStatsPkts256to511Octets, + etherStatsPkts512to1023Octets, + etherStatsPkts1024to1518Octets, + etherStatsOwner, etherStatsStatus + } + STATUS current + DESCRIPTION + "The RMON Ethernet Statistics Group." + ::= { rmonGroups 1 } + + rmonHistoryControlGroup OBJECT-GROUP + OBJECTS { + historyControlIndex, historyControlDataSource, + historyControlBucketsRequested, + historyControlBucketsGranted, historyControlInterval, + historyControlOwner, historyControlStatus + } + STATUS current + DESCRIPTION + "The RMON History Control Group." + ::= { rmonGroups 2 } + + rmonEthernetHistoryGroup OBJECT-GROUP + OBJECTS { + etherHistoryIndex, etherHistorySampleIndex, + etherHistoryIntervalStart, etherHistoryDropEvents, + etherHistoryOctets, etherHistoryPkts, + etherHistoryBroadcastPkts, etherHistoryMulticastPkts, + etherHistoryCRCAlignErrors, etherHistoryUndersizePkts, + etherHistoryOversizePkts, etherHistoryFragments, + etherHistoryJabbers, etherHistoryCollisions, + etherHistoryUtilization + } + STATUS current + DESCRIPTION + "The RMON Ethernet History Group." + ::= { rmonGroups 3 } + + rmonAlarmGroup OBJECT-GROUP + OBJECTS { + alarmIndex, alarmInterval, alarmVariable, + alarmSampleType, alarmValue, alarmStartupAlarm, + alarmRisingThreshold, alarmFallingThreshold, + alarmRisingEventIndex, alarmFallingEventIndex, + alarmOwner, alarmStatus + } + STATUS current + DESCRIPTION + "The RMON Alarm Group." + ::= { rmonGroups 4 } + + rmonHostGroup OBJECT-GROUP + OBJECTS { + hostControlIndex, hostControlDataSource, + hostControlTableSize, hostControlLastDeleteTime, + hostControlOwner, hostControlStatus, + hostAddress, hostCreationOrder, hostIndex, + hostInPkts, hostOutPkts, hostInOctets, + hostOutOctets, hostOutErrors, hostOutBroadcastPkts, + hostOutMulticastPkts, hostTimeAddress, + hostTimeCreationOrder, hostTimeIndex, + hostTimeInPkts, hostTimeOutPkts, hostTimeInOctets, + hostTimeOutOctets, hostTimeOutErrors, + hostTimeOutBroadcastPkts, hostTimeOutMulticastPkts + } + STATUS current + DESCRIPTION + "The RMON Host Group." + ::= { rmonGroups 5 } + + rmonHostTopNGroup OBJECT-GROUP + OBJECTS { + hostTopNControlIndex, hostTopNHostIndex, + hostTopNRateBase, hostTopNTimeRemaining, + hostTopNDuration, hostTopNRequestedSize, + hostTopNGrantedSize, hostTopNStartTime, + hostTopNOwner, hostTopNStatus, + hostTopNReport, hostTopNIndex, + hostTopNAddress, hostTopNRate + } + STATUS current + DESCRIPTION + "The RMON Host Top 'N' Group." + ::= { rmonGroups 6 } + + rmonMatrixGroup OBJECT-GROUP + OBJECTS { + matrixControlIndex, matrixControlDataSource, + matrixControlTableSize, matrixControlLastDeleteTime, + matrixControlOwner, matrixControlStatus, + matrixSDSourceAddress, matrixSDDestAddress, + matrixSDIndex, matrixSDPkts, + matrixSDOctets, matrixSDErrors, + matrixDSSourceAddress, matrixDSDestAddress, + matrixDSIndex, matrixDSPkts, + matrixDSOctets, matrixDSErrors + } + STATUS current + DESCRIPTION + "The RMON Matrix Group." + ::= { rmonGroups 7 } + + rmonFilterGroup OBJECT-GROUP + OBJECTS { + filterIndex, filterChannelIndex, filterPktDataOffset, + filterPktData, filterPktDataMask, + filterPktDataNotMask, filterPktStatus, + filterPktStatusMask, filterPktStatusNotMask, + filterOwner, filterStatus, + channelIndex, channelIfIndex, channelAcceptType, + channelDataControl, channelTurnOnEventIndex, + channelTurnOffEventIndex, channelEventIndex, + channelEventStatus, channelMatches, + channelDescription, channelOwner, channelStatus + } + STATUS current + DESCRIPTION + "The RMON Filter Group." + ::= { rmonGroups 8 } + + rmonPacketCaptureGroup OBJECT-GROUP + OBJECTS { + bufferControlIndex, bufferControlChannelIndex, + bufferControlFullStatus, bufferControlFullAction, + bufferControlCaptureSliceSize, + bufferControlDownloadSliceSize, + bufferControlDownloadOffset, + bufferControlMaxOctetsRequested, + bufferControlMaxOctetsGranted, + bufferControlCapturedPackets, + bufferControlTurnOnTime, + bufferControlOwner, bufferControlStatus, + captureBufferControlIndex, captureBufferIndex, + captureBufferPacketID, captureBufferPacketData, + captureBufferPacketLength, captureBufferPacketTime, + captureBufferPacketStatus + } + STATUS current + DESCRIPTION + "The RMON Packet Capture Group." + ::= { rmonGroups 9 } + + rmonEventGroup OBJECT-GROUP + OBJECTS { + eventIndex, eventDescription, eventType, + eventCommunity, eventLastTimeSent, + eventOwner, eventStatus, + logEventIndex, logIndex, logTime, + logDescription + } + STATUS current + DESCRIPTION + "The RMON Event Group." + ::= { rmonGroups 10 } + + rmonNotificationGroup NOTIFICATION-GROUP + NOTIFICATIONS { risingAlarm, fallingAlarm } + STATUS current + DESCRIPTION + "The RMON Notification Group." + ::= { rmonGroups 11 } +END diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk index fb1dcb6c41..36b9764bc8 100644 --- a/lib/snmp/vsn.mk +++ b/lib/snmp/vsn.mk @@ -18,6 +18,6 @@ # %CopyrightEnd% APPLICATION = snmp -SNMP_VSN = 4.21.7 +SNMP_VSN = 4.22 PRE_VSN = APP_VSN = "$(APPLICATION)-$(SNMP_VSN)$(PRE_VSN)" diff --git a/lib/ssl/doc/src/ssl.xml b/lib/ssl/doc/src/ssl.xml index 50268ae206..4910a6f1b8 100644 --- a/lib/ssl/doc/src/ssl.xml +++ b/lib/ssl/doc/src/ssl.xml @@ -62,8 +62,8 @@ </c></p> <p>For valid options - see <seealso marker="kernel:inet">inet(3) </seealso> and - <seealso marker="kernel:gen_tcp">gen_tcp(3) </seealso>. + see <seealso marker="kernel:inet">inet(3)</seealso> and + <seealso marker="kernel:gen_tcp">gen_tcp(3)</seealso>. </p> <p> <c>ssloption() = {verify, verify_type()} | diff --git a/lib/stdlib/doc/src/ets.xml b/lib/stdlib/doc/src/ets.xml index efd9514db6..0486090e92 100644 --- a/lib/stdlib/doc/src/ets.xml +++ b/lib/stdlib/doc/src/ets.xml @@ -671,7 +671,7 @@ ets:is_compiled_ms(Broken).</code> <c>duplicate_bag</c>, the function returns a list of arbitrary length.</p> <p>Note that the time order of object insertions is preserved; - The first object inserted with the given key will be first + the first object inserted with the given key will be first in the resulting list, and so on.</p> <p>Insert and look-up times in tables of type <c>set</c>, <c>bag</c> and <c>duplicate_bag</c> are constant, regardless diff --git a/lib/stdlib/doc/src/lists.xml b/lib/stdlib/doc/src/lists.xml index 7042c84437..96a0942710 100644 --- a/lib/stdlib/doc/src/lists.xml +++ b/lib/stdlib/doc/src/lists.xml @@ -663,7 +663,7 @@ splitwith(Pred, List) -> <desc> <p>Returns the sub-list of <c><anno>List1</anno></c> starting at position 1 and with (max) <c><anno>Len</anno></c> elements. It is not an error for - <c><anno>Len</anno></c> to exceed the length of the list -- in that case + <c><anno>Len</anno></c> to exceed the length of the list, in that case the whole list is returned.</p> </desc> </func> diff --git a/lib/stdlib/doc/src/supervisor.xml b/lib/stdlib/doc/src/supervisor.xml index 35f4f82264..9b41672bf1 100644 --- a/lib/stdlib/doc/src/supervisor.xml +++ b/lib/stdlib/doc/src/supervisor.xml @@ -55,7 +55,8 @@ monitoring its child processes. The basic idea of a supervisor is that it should keep its child processes alive by restarting them when necessary.</p> - <p>The children of a supervisor is defined as a list of <em>child specifications</em>. When the supervisor is started, the child + <p>The children of a supervisor is defined as a list of + <em>child specifications</em>. When the supervisor is started, the child processes are started in order from left to right according to this list. When the supervisor terminates, it first terminates its child processes in reversed start order, from right to left.</p> @@ -100,7 +101,8 @@ </item> </list> <p>To prevent a supervisor from getting into an infinite loop of - child process terminations and restarts, a <em>maximum restart frequency</em> is defined using two integer values <c>MaxR</c> + child process terminations and restarts, a <em>maximum restart frequency</em> + is defined using two integer values <c>MaxR</c> and <c>MaxT</c>. If more than <c>MaxR</c> restarts occur within <c>MaxT</c> seconds, the supervisor terminates all child processes and then itself. @@ -114,7 +116,7 @@ child_spec() = {Id,StartFunc,Restart,Shutdown,Type,Modules} M = F = atom() A = [term()] Restart = permanent | transient | temporary - Shutdown = brutal_kill | int()>=0 | infinity + Shutdown = brutal_kill | int()>0 | infinity Type = worker | supervisor Modules = [Module] | dynamic Module = atom()</pre> @@ -197,7 +199,8 @@ child_spec() = {Id,StartFunc,Restart,Shutdown,Type,Modules} the callback module, if the child process is a supervisor, gen_server or gen_fsm. If the child process is an event manager (gen_event) with a dynamic set of callback modules, - <c>Modules</c> should be <c>dynamic</c>. See <em>OTP Design Principles</em> for more information about release handling.</p> + <c>Modules</c> should be <c>dynamic</c>. See <em>OTP Design Principles</em> + for more information about release handling.</p> </item> <item> <p>Internally, the supervisor also keeps track of the pid @@ -443,7 +446,8 @@ child_spec() = {Id,StartFunc,Restart,Shutdown,Type,Modules} </func> <func> <name name="which_children" arity="1"/> - <fsummary>Return information about all children specifications and child processes belonging to a supervisor.</fsummary> + <fsummary>Return information about all children specifications and + child processes belonging to a supervisor.</fsummary> <desc> <p>Returns a newly created list with information about all child specifications and child processes belonging to @@ -477,7 +481,8 @@ child_spec() = {Id,StartFunc,Restart,Shutdown,Type,Modules} </func> <func> <name name="count_children" arity="1"/> - <fsummary>Return counts for the number of childspecs, active children, supervisors and workers.</fsummary> + <fsummary>Return counts for the number of childspecs, active children, + supervisors and workers.</fsummary> <desc> <p>Returns a property list (see <c>proplists</c>) containing the counts for each of the following elements of the supervisor's @@ -527,7 +532,8 @@ child_spec() = {Id,StartFunc,Restart,Shutdown,Type,Modules} <v>Args = term()</v> <v>Result = {ok,{{RestartStrategy,MaxR,MaxT},[ChildSpec]}} | ignore</v> <v> RestartStrategy = <seealso marker="#type-strategy">strategy()</seealso></v> - <v> MaxR = MaxT = integer()>=0</v> + <v> MaxR = integer()>=0</v> + <v> MaxT = integer()>0</v> <v> ChildSpec = <seealso marker="#type-child_spec">child_spec()</seealso></v> </type> <desc> diff --git a/lib/stdlib/src/supervisor.erl b/lib/stdlib/src/supervisor.erl index f315064b03..c10da1989c 100644 --- a/lib/stdlib/src/supervisor.erl +++ b/lib/stdlib/src/supervisor.erl @@ -624,13 +624,12 @@ check_flags(What) -> {bad_flags, What}. update_childspec(State, StartSpec) when ?is_simple(State) -> - case check_startspec(StartSpec) of - {ok, [Child]} -> - {ok, State#state{children = [Child]}}; - Error -> - {error, Error} - end; - + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; update_childspec(State, StartSpec) -> case check_startspec(StartSpec) of {ok, Children} -> @@ -650,7 +649,7 @@ update_childspec1([Child|OldC], Children, KeepOld) -> end; update_childspec1([], Children, KeepOld) -> %% Return them in (kept) reverse start order. - lists:reverse(Children ++ KeepOld). + lists:reverse(Children ++ KeepOld). update_chsp(OldCh, Children) -> case lists:map(fun(Ch) when OldCh#child.name =:= Ch#child.name -> @@ -1148,9 +1147,9 @@ remove_child(Child, State) -> %% Args: SupName = {local, atom()} | {global, atom()} | self %% Type = {Strategy, MaxIntensity, Period} %% Strategy = one_for_one | one_for_all | simple_one_for_one | -%% rest_for_one -%% MaxIntensity = integer() -%% Period = integer() +%% rest_for_one +%% MaxIntensity = integer() >= 0 +%% Period = integer() > 0 %% Mod :== atom() %% Args :== term() %% Purpose: Check that Type is of correct type (!) @@ -1201,7 +1200,7 @@ supname(N, _) -> N. %%% where Name is an atom %%% Func is {Mod, Fun, Args} == {atom(), atom(), list()} %%% RestartType is permanent | temporary | transient -%%% Shutdown = integer() | infinity | brutal_kill +%%% Shutdown = integer() > 0 | infinity | brutal_kill %%% ChildType = supervisor | worker %%% Modules = [atom()] | dynamic %%% Returns: {ok, [child_rec()]} | Error diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl index 08197ee36e..d7ce432786 100644 --- a/lib/test_server/src/test_server.erl +++ b/lib/test_server/src/test_server.erl @@ -44,7 +44,7 @@ -export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]). -export([app_test/1, app_test/2]). -export([is_native/1]). --export([comment/1]). +-export([comment/1, make_priv_dir/0]). -export([os_type/0]). -export([run_on_shielded_node/2]). -export([is_cover/0,is_debug/0,is_commercial/0]). @@ -754,6 +754,25 @@ run_test_case_msgloop(Ref, Pid, CaptureStdout, Terminate, Comment, CurrConf) -> {set_curr_conf,From,NewCurrConf} -> From ! {self(),set_curr_conf,ok}, run_test_case_msgloop(Ref,Pid,CaptureStdout,Terminate,Comment,NewCurrConf); + {make_priv_dir,From} when CurrConf == undefined -> + From ! {self(),make_priv_dir,{error,no_priv_dir_in_config}}; + {make_priv_dir,From} -> + Result = + case proplists:get_value(priv_dir, element(2, CurrConf)) of + undefined -> + {error,no_priv_dir_in_config}; + PrivDir -> + case file:make_dir(PrivDir) of + ok -> + ok; + {error, eexist} -> + ok; + MkDirError -> + {error,{MkDirError,PrivDir}} + end + end, + From ! {self(),make_priv_dir,Result}, + run_test_case_msgloop(Ref,Pid,CaptureStdout,Terminate,Comment,CurrConf); {'EXIT',Pid,{Ref,Time,Value,Loc,Opts}} -> RetVal = {Time/1000000,Value,mod_loc(Loc),Opts,Comment}, run_test_case_msgloop(Ref,Pid,CaptureStdout,{true,RetVal},Comment,undefined); @@ -948,17 +967,20 @@ output(Msg,Sender) -> local_or_remote_apply({test_server_ctrl,output,[Msg,Sender]}). call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) -> + %% Starter is also the group leader process Starter = self(), Data = {Mod,Func,TCPid,TCExitReason,Loc}, EndConfProc = fun() -> + group_leader(Starter, self()), Supervisor = self(), EndConfApply = fun() -> case catch apply(Mod,end_per_testcase,[Func,Conf]) of {'EXIT',Why} -> + timer:sleep(1), group_leader() ! {printout,12, - "ERROR! ~p:end_per_testcase(~p, ~p)" + "WARNING! ~p:end_per_testcase(~p, ~p)" " crashed!\n\tReason: ~p\n", [Mod,Func,Conf,Why]}; _ -> @@ -973,6 +995,11 @@ call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) -> {'EXIT',Pid,Reason} -> Starter ! {self(),{call_end_conf,Data,{error,Reason}}} after TVal -> + exit(Pid, kill), + group_leader() ! {printout,12, + "WARNING! ~p:end_per_testcase(~p, ~p)" + " failed!\n\tReason: timetrap timeout" + " after ~w ms!\n", [Mod,Func,Conf,TVal]}, Starter ! {self(),{call_end_conf,Data,{error,timeout}}} end end, @@ -1027,6 +1054,10 @@ spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid, E = {failed,{Mod,end_per_testcase,Why}}, {Result,E} end, + group_leader() ! {printout,12, + "WARNING! ~p:end_per_testcase(~p, ~p)" + " failed!\n\tReason: timetrap timeout" + " after ~w ms!\n", [Mod,Func,EndConf,TVal]}, FailLoc = proplists:get_value(tc_fail_loc, EndConf1), case catch do_end_tc_call(Mod,Func, FailLoc, {Pid,Report,[EndConf1]}, Why) of @@ -1176,6 +1207,9 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit, exit({Ref,Time,Value,Loc,Opts}). run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) -> + %% save current state in controller loop + sync_send(group_leader(),set_curr_conf,{{Mod,Func},hd(Args)}, + 5000, fun() -> exit(no_answer_from_group_leader) end), case RunInit of run_init -> put(test_server_init_or_end_conf,{init_per_testcase,Func}), @@ -1234,8 +1268,8 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) -> %% call user callback function if defined EndConf1 = user_callback(TCCallback, Mod, Func, 'end', EndConf), %% update current state in controller loop - sync_send(group_leader(),set_curr_conf,EndConf1, - 5000, fun() -> exit(no_answer_from_group_leader) end), + sync_send(group_leader(),set_curr_conf,EndConf1, 5000, + fun() -> exit(no_answer_from_group_leader) end), {FWReturn1,TSReturn1,EndConf2} = case end_per_testcase(Mod, Func, EndConf1) of SaveCfg1={save_config,_} -> @@ -2575,11 +2609,23 @@ read_comment() -> MsgLooper = group_leader(), MsgLooper ! {read_comment,self()}, receive - {MsgLooper,read_comment,Comment} -> - Comment + {MsgLooper,read_comment,Comment} -> Comment + after + 5000 -> "" + end. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% make_priv_dir() -> ok +%% +%% Order test server to create the private directory +%% for the current test case. +make_priv_dir() -> + MsgLooper = group_leader(), + group_leader() ! {make_priv_dir,self()}, + receive + {MsgLooper,make_priv_dir,Result} -> Result after - 5000 -> - "" + 5000 -> error end. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl index 4b649c3ec5..79c45de071 100644 --- a/lib/test_server/src/test_server_ctrl.erl +++ b/lib/test_server/src/test_server_ctrl.erl @@ -164,6 +164,7 @@ -export([start_get_totals/1, stop_get_totals/0]). -export([get_levels/0, set_levels/3]). -export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]). +-export([create_priv_dir/1]). -export([cover/2, cover/3, cover/7, cross_cover_analyse/1, cross_cover_analyse/2, trc/1, stop_trace/0]). -export([testcase_callback/1]). @@ -219,8 +220,8 @@ -define(user_skip_color, "#FF8000"). -record(state,{jobs=[],levels={1,19,10}, - multiply_timetraps=1,scale_timetraps=true, - finish=false, + multiply_timetraps=1, scale_timetraps=true, + create_priv_dir=auto_per_run, finish=false, target_info, trc=false, cover=false, wait_for_node=[], testcase_callback=undefined, idle_notify=[], get_totals=false, random_seed=undefined}). @@ -506,6 +507,9 @@ scale_timetraps(Bool) -> get_timetrap_parameters() -> controller_call(get_timetrap_parameters). +create_priv_dir(Value) -> + controller_call({create_priv_dir,Value}). + trc(TraceFile) -> controller_call({trace,TraceFile}, 2*?ACCEPT_TIMEOUT). @@ -646,8 +650,8 @@ init([Param]) -> contact_main_target(local) -> %% When used by a general framework, global registration of %% test_server should not be required. - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> + case get_fw_mod(undefined) of + undefined -> %% Local target! The global test_server process implemented by %% test_server.erl will not be started, so we simulate it by %% globally registering this process instead. @@ -811,6 +815,7 @@ handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) -> [SpecName,{State#state.multiply_timetraps, State#state.scale_timetraps}], LogDir, Name, State#state.levels, + State#state.create_priv_dir, State#state.testcase_callback, ExtraTools1), NewJobs = [{Name,Pid}|State#state.jobs], {reply, ok, State#state{jobs=NewJobs}}; @@ -820,6 +825,7 @@ handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) -> [SpecList,{State#state.multiply_timetraps, State#state.scale_timetraps}], LogDir, Name, State#state.levels, + State#state.create_priv_dir, State#state.testcase_callback, ExtraTools1), NewJobs = [{Name,Pid}|State#state.jobs], {reply, ok, State#state{jobs=NewJobs}}; @@ -837,6 +843,7 @@ handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) -> {State#state.multiply_timetraps, State#state.scale_timetraps}], LogDir, Name, State#state.levels, + State#state.create_priv_dir, State#state.testcase_callback, ExtraTools1), NewJobs = [{Name,Pid}|State#state.jobs], {reply, ok, State#state{jobs=NewJobs}} @@ -1045,6 +1052,18 @@ handle_call({cover,App,Analyse}, _From, State) -> {reply,ok,State#state{cover={App,Analyse}}}; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% handle_call({create_priv_dir,Value}, _, State) -> ok | {error,Reason} +%% +%% Set create_priv_dir to either auto_per_run (create common priv dir once +%% per test run), manual_per_tc (the priv dir name will be unique for each +%% test case, but the user has to call test_server:make_priv_dir/0 to create +%% it), or auto_per_tc (unique priv dir created automatically for each test +%% case). + +handle_call({create_priv_dir,Value}, _From, State) -> + {reply,ok,State#state{create_priv_dir=Value}}; + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% handle_call({testcase_callback,{Mod,Func}}, _, State) -> ok | {error,Reason} %% %% Add a callback function that will be called before and after every @@ -1321,7 +1340,7 @@ kill_all_jobs([]) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% spawn_tester(Mod, Func, Args, Dir, Name, Levels, +%% spawn_tester(Mod, Func, Args, Dir, Name, Levels, CreatePrivDir, %% TestCaseCallback, ExtraTools) -> Pid %% Mod = atom() %% Func = atom() @@ -1329,6 +1348,7 @@ kill_all_jobs([]) -> %% Dir = string() %% Name = string() %% Levels = {integer(),integer(),integer()} +%% CreatePrivDir = auto_per_run | manual_per_tc | auto_per_tc %% TestCaseCallback = {CBMod,CBFunc} | undefined %% ExtraTools = [ExtraTool,...] %% ExtraTool = CoverInfo | TraceInfo | RandomSeed @@ -1339,14 +1359,15 @@ kill_all_jobs([]) -> %% When the named function is done executing, a summary of the results %% is printed to the log files. -spawn_tester(Mod, Func, Args, Dir, Name, Levels, TCCallback, ExtraTools) -> +spawn_tester(Mod, Func, Args, Dir, Name, Levels, + CreatePrivDir, TCCallback, ExtraTools) -> spawn_link( fun() -> init_tester(Mod, Func, Args, Dir, Name, Levels, - TCCallback, ExtraTools) + CreatePrivDir, TCCallback, ExtraTools) end). init_tester(Mod, Func, Args, Dir, Name, {SumLev,MajLev,MinLev}, - TCCallback, ExtraTools) -> + CreatePrivDir, TCCallback, ExtraTools) -> process_flag(trap_exit, true), put(test_server_name, Name), put(test_server_dir, Dir), @@ -1357,8 +1378,21 @@ init_tester(Mod, Func, Args, Dir, Name, {SumLev,MajLev,MinLev}, put(test_server_summary_level, SumLev), put(test_server_major_level, MajLev), put(test_server_minor_level, MinLev), + put(test_server_create_priv_dir, CreatePrivDir), put(test_server_random_seed, proplists:get_value(random_seed, ExtraTools)), put(test_server_testcase_callback, TCCallback), + case os:getenv("TEST_SERVER_FRAMEWORK") of + FW when FW =:= false; FW =:= "undefined" -> + put(test_server_framework, '$none'); + FW -> + put(test_server_framework_name, list_to_atom(FW)), + case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of + FWName when FWName =:= false; FWName =:= "undefined" -> + put(test_server_framework_name, '$none'); + FWName -> + put(test_server_framework_name, list_to_atom(FWName)) + end + end, %% before first print, read and set logging options LogOpts = test_server_sup:framework_call(get_logopts, [], []), put(test_server_logopts, LogOpts), @@ -1678,11 +1712,7 @@ do_test_cases(TopCases, SkipCases, Config, TimetrapData) when is_list(TopCases), is_tuple(TimetrapData) -> {ok,TestDir} = start_log_file(), - FwMod = - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> ?MODULE; - FW -> list_to_atom(FW) - end, + FwMod = get_fw_mod(?MODULE), case collect_all_cases(TopCases, SkipCases) of {error,Why} -> print(1, "Error starting: ~p", [Why]), @@ -1818,7 +1848,7 @@ do_test_cases(TopCase, SkipCases, Config, TimetrapSpec) -> %% Creates the log directories, the major log file and the html log file. %% The log files are initialized with some header information. %% -%% The name of the log directory will be <Name>.LOGS/run.<Date>/ where +%% The name of the log directory will be <Name>.logs/run.<Date>/ where %% Name is the test suite name and Date is the current date and time. start_log_file() -> @@ -2107,17 +2137,17 @@ add_init_and_end_per_suite([{make,_,_}=Case|Cases], LastMod, LastRef, FwMod) -> add_init_and_end_per_suite([{skip_case,{{Mod,all},_}}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod -> {PreCases, NextMod, NextRef} = - do_add_end_per_suite_and_skip(LastMod, LastRef, Mod), + do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([{skip_case,{{Mod,_},_}}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod), + do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_}}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod), + do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([{skip_case,_}=Case|Cases], LastMod, LastRef, FwMod) -> [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; @@ -2129,7 +2159,7 @@ add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod, case proplists:get_value(suite, Props) of Suite when Suite =/= undefined, Suite =/= LastMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Suite), + do_add_init_and_end_per_suite(LastMod, LastRef, Suite, FwMod), Case1 = {conf,Ref,proplists:delete(suite,Props),{FwMod,Func}}, PreCases ++ [Case1|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; @@ -2139,19 +2169,19 @@ add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod, add_init_and_end_per_suite([{conf,_,_,{Mod,_}}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod, Mod =/= FwMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod), + do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([{conf,_,_,_}=Case|Cases], LastMod, LastRef, FwMod) -> [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; add_init_and_end_per_suite([{Mod,_}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod, Mod =/= FwMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod), + do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([{Mod,_,_}=Case|Cases], LastMod, LastRef, FwMod) when Mod =/= LastMod, Mod =/= FwMod -> {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod), + do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)]; add_init_and_end_per_suite([Case|Cases], LastMod, LastRef, FwMod)-> [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; @@ -2159,10 +2189,23 @@ add_init_and_end_per_suite([], _LastMod, undefined, _FwMod) -> []; add_init_and_end_per_suite([], _LastMod, skipped_suite, _FwMod) -> []; -add_init_and_end_per_suite([], LastMod, LastRef, _FwMod) -> - [{conf,LastRef,[],{LastMod,end_per_suite}}]. +add_init_and_end_per_suite([], LastMod, LastRef, FwMod) -> + %% we'll add end_per_suite here even if it's not exported + %% (and simply let the call fail if it's missing) + case erlang:function_exported(LastMod, end_per_suite, 1) of + true -> + [{conf,LastRef,[],{LastMod,end_per_suite}}]; + false -> + %% let's call a "fake" end_per_suite if it exists + case erlang:function_exported(FwMod, end_per_suite, 1) of + true -> + [{conf,LastRef,[{suite,LastMod}],{FwMod,end_per_suite}}]; + false -> + [{conf,LastRef,[],{LastMod,end_per_suite}}] + end + end. -do_add_init_and_end_per_suite(LastMod, LastRef, Mod) -> +do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod) -> case code:is_loaded(Mod) of false -> code:load_file(Mod); _ -> ok @@ -2173,7 +2216,16 @@ do_add_init_and_end_per_suite(LastMod, LastRef, Mod) -> Ref = make_ref(), {[{conf,Ref,[],{Mod,init_per_suite}}],Mod,Ref}; false -> - {[],Mod,undefined} + %% let's call a "fake" init_per_suite if it exists + case erlang:function_exported(FwMod, init_per_suite, 1) of + true -> + Ref = make_ref(), + {[{conf,Ref,[{suite,Mod}], + {FwMod,init_per_suite}}],Mod,Ref}; + false -> + {[],Mod,undefined} + end + end, Cases = if LastRef==undefined -> @@ -2181,20 +2233,44 @@ do_add_init_and_end_per_suite(LastMod, LastRef, Mod) -> LastRef==skipped_suite -> Init; true -> - %% Adding end_per_suite here without checking if the - %% function is actually exported. This is because a - %% conf case must have an end case - so if it doesn't - %% exist, it will only fail... - [{conf,LastRef,[],{LastMod,end_per_suite}}|Init] + %% we'll add end_per_suite here even if it's not exported + %% (and simply let the call fail if it's missing) + case erlang:function_exported(LastMod, end_per_suite, 1) of + true -> + [{conf,LastRef,[],{LastMod,end_per_suite}}|Init]; + false -> + %% let's call a "fake" end_per_suite if it exists + case erlang:function_exported(FwMod, end_per_suite, 1) of + true -> + [{conf,LastRef,[{suite,Mod}], + {FwMod,end_per_suite}}|Init]; + false -> + [{conf,LastRef,[],{LastMod,end_per_suite}}|Init] + end + end end, {Cases,NextMod,NextRef}. -do_add_end_per_suite_and_skip(LastMod, LastRef, Mod) -> +do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod) -> case LastRef of No when No==undefined ; No==skipped_suite -> {[],Mod,skipped_suite}; _Ref -> - {[{conf,LastRef,[],{LastMod,end_per_suite}}],Mod,skipped_suite} + case erlang:function_exported(LastMod, end_per_suite, 1) of + true -> + {[{conf,LastRef,[],{LastMod,end_per_suite}}], + Mod,skipped_suite}; + false -> + case erlang:function_exported(FwMod, end_per_suite, 1) of + true -> + %% let's call "fake" end_per_suite if it exists + {[{conf,LastRef,[],{FwMod,end_per_suite}}], + Mod,skipped_suite}; + false -> + {[{conf,LastRef,[],{LastMod,end_per_suite}}], + Mod,skipped_suite} + end + end end. @@ -2754,7 +2830,15 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0, {skipped,TcSkip}, {failed,TcFail}]}] end, - TSDirs = [{priv_dir,get(test_server_priv_dir)},{data_dir,get_data_dir(Mod)}], + + case get(test_server_create_priv_dir) of + auto_per_run -> % use common priv_dir + TSDirs = [{priv_dir,get(test_server_priv_dir)}, + {data_dir,get_data_dir(Mod)}]; + _ -> + TSDirs = [{data_dir,get_data_dir(Mod)}] + end, + ActualCfg = if not StartConf -> update_config(hd(Config), TSDirs ++ CfgProps); @@ -2764,7 +2848,8 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0, end, Mode0), update_config(hd(Config), TSDirs ++ [{tc_group_path,GroupPath} | CfgProps]) - end, + end, + CurrMode = curr_mode(Ref, Mode0, Mode), ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init, target, TimetrapData, CurrMode), @@ -2916,8 +3001,13 @@ run_test_cases_loop([{conf,_Ref,_Props,_X}=Conf|_Cases0], run_test_cases_loop([{Mod,Case}|Cases], Config, TimetrapData, Mode, Status) -> ActualCfg = - update_config(hd(Config), [{priv_dir,get(test_server_priv_dir)}, - {data_dir,get_data_dir(Mod)}]), + case get(test_server_create_priv_dir) of + auto_per_run -> + update_config(hd(Config), [{priv_dir,get(test_server_priv_dir)}, + {data_dir,get_data_dir(Mod)}]); + _ -> + update_config(hd(Config), [{data_dir,get_data_dir(Mod)}]) + end, run_test_cases_loop([{Mod,Case,[ActualCfg]}|Cases], Config, TimetrapData, Mode, Status); @@ -3254,7 +3344,7 @@ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) -> print(major, "=started ~s", [lists:flatten(timestamp_get(""))]), print(major, "=result skipped: ~s", [Comment1]), print(2,"*** Skipping test case #~w ~p ***", [CaseNum,{Mod,Func}]), - TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]), + TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]), GroupName = case get_name(Mode) of undefined -> ""; Name -> cast_to_list(Name) @@ -3268,7 +3358,7 @@ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) -> "<td>" ++ Col0 ++ "0.000s" ++ Col1 ++ "</td>" "<td><font color=\"~s\">SKIPPED</font></td>" "<td>~s</td></tr>\n", - [num2str(CaseNum),Mod,GroupName,Func,ResultCol,Comment1]), + [num2str(CaseNum),fw_name(Mod),GroupName,Func,ResultCol,Comment1]), if CaseNum > 0 -> {US,AS} = get(test_server_skipped), case Type of @@ -3638,9 +3728,14 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where, %% if this runs on a parallel test case process, %% copy the dictionary from the main process do_if_parallel(Main, fun() -> process_flag(trap_exit, true) end, ok), - CopyDict = fun() -> lists:foreach(fun({Key,Val}) -> put(Key, Val) end, State) end, + CopyDict = fun() -> lists:foreach(fun({Key,Val}) -> + put(Key, Val) + end, State) + end, do_if_parallel(Main, CopyDict, ok), - do_if_parallel(Main, fun() -> put(test_server_common_io_handler, {tc,Main}) end, ok), + do_if_parallel(Main, fun() -> + put(test_server_common_io_handler, {tc,Main}) + end, ok), %% if io is being buffered, send start io session message %% (no matter if case runs on parallel or main process) case get(test_server_common_io_handler) of @@ -3660,8 +3755,35 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where, MinorBase = filename:basename(MinorName), print(major, "=logfile ~s", [filename:basename(MinorName)]), - Args1 = [[{tc_logfile,MinorName} | proplists:delete(tc_logfile,hd(Args))]], - test_server_sup:framework_call(report, [tc_start,{{?pl2a(Mod),Func},MinorName}]), + UpdatedArgs = + %% maybe create unique private directory for test case or config func + case get(test_server_create_priv_dir) of + auto_per_run -> + update_config(hd(Args), [{tc_logfile,MinorName}]); + PrivDirMode -> + RunDir = filename:dirname(MinorName), + Ext = + if Num == 0 -> + {_,S,Us} = now(), + lists:flatten(io_lib:format(".~w.~w", [S,Us])); + true -> + %% create unique private directory for test case + RunDir = filename:dirname(MinorName), + lists:flatten(io_lib:format(".~w", [Num])) + end, + PrivDir = filename:join(RunDir, ?priv_dir) ++ Ext, + if PrivDirMode == auto_per_tc -> + ok = file:make_dir(PrivDir); + PrivDirMode == manual_per_tc -> + ok + end, + update_config(hd(Args), [{priv_dir,PrivDir++"/"}, + {tc_logfile,MinorName}]) + end, + + test_server_sup:framework_call(report, + [tc_start,{{?pl2a(Mod),Func},MinorName}]), + print_props((RunInit==skip_init), get_props(Mode)), GroupName = case get_name(Mode) of undefined -> ""; @@ -3675,12 +3797,13 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where, "<td>" ++ Col0 ++ "~s" ++ Col1 ++ "</td>" "<td><a href=\"~s\">~p</a></td>" "<td><a href=\"~s#top\"><</a> <a href=\"~s#end\">></a></td>", - [num2str(Num),Mod,GroupName,MinorBase,Func,MinorBase,MinorBase]), + [num2str(Num),fw_name(Mod),GroupName,MinorBase,Func, + MinorBase,MinorBase]), do_if_parallel(Main, ok, fun erlang:yield/0), %% run the test case {Result,DetectedFail,ProcsBefore,ProcsAfter} = - run_test_case_apply(Num, Mod, Func, Args1, get_name(Mode), + run_test_case_apply(Num, Mod, Func, [UpdatedArgs], get_name(Mode), RunInit, Where, TimetrapData), {Time,RetVal,Loc,Opts,Comment} = case Result of @@ -4122,6 +4245,46 @@ progress(ok, _CaseNum, Mod, Func, _Loc, RetVal, Time, %%-------------------------------------------------------------------- %% various help functions +get_fw_mod(Mod) -> + case get(test_server_framework) of + undefined -> + case os:getenv("TEST_SERVER_FRAMEWORK") of + FW when FW =:= false; FW =:= "undefined" -> + Mod; + FW -> + list_to_atom(FW) + end; + '$none' -> Mod; + FW -> FW + end. + +fw_name(?MODULE) -> + test_server; +fw_name(Mod) -> + case get(test_server_framework_name) of + undefined -> + case get_fw_mod(undefined) of + undefined -> + Mod; + Mod -> + case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of + FWName when FWName =:= false; FWName =:= "undefined" -> + Mod; + FWName -> + list_to_atom(FWName) + end; + _ -> + Mod + end; + '$none' -> + Mod; + FWName -> + case get_fw_mod(Mod) of + Mod -> FWName; + _ -> Mod + end + end. + if_auto_skip(Reason={failed,{_,init_per_testcase,_}}, True, _False) -> {Reason,True()}; if_auto_skip({_T,{skip,Reason={failed,{_,init_per_testcase,_}}},_Opts}, True, _False) -> @@ -4225,8 +4388,8 @@ get_font_style1(default) -> %% set to false. format_exception(Reason={_Error,Stack}) when is_list(Stack) -> - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> + case get_fw_mod(undefined) of + undefined -> case application:get_env(test_server, format_exception) of {ok,false} -> {"~p",Reason}; @@ -4234,7 +4397,7 @@ format_exception(Reason={_Error,Stack}) when is_list(Stack) -> do_format_exception(Reason) end; FW -> - case application:get_env(list_to_atom(FW), format_exception) of + case application:get_env(FW, format_exception) of {ok,false} -> {"~p",Reason}; _ -> @@ -4830,8 +4993,8 @@ collect_case([Case | Cases], St, Acc) -> collect_case(Cases, NewSt, Acc ++ FlatCases). collect_case_invoke(Mod, Case, MFA, St) -> - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> + case get_fw_mod(undefined) of + undefined -> case catch apply(Mod, Case, [suite]) of {'EXIT',_} -> {ok,[MFA],St}; @@ -4839,7 +5002,9 @@ collect_case_invoke(Mod, Case, MFA, St) -> collect_subcases(Mod, Case, MFA, St, Suite) end; _ -> - Suite = test_server_sup:framework_call(get_suite, [?pl2a(Mod),Case], []), + Suite = test_server_sup:framework_call(get_suite, + [?pl2a(Mod),Case], + []), collect_subcases(Mod, Case, MFA, St, Suite) end. diff --git a/system/doc/design_principles/sup_princ.xml b/system/doc/design_principles/sup_princ.xml index c473e257fa..353430752c 100644 --- a/system/doc/design_principles/sup_princ.xml +++ b/system/doc/design_principles/sup_princ.xml @@ -136,7 +136,7 @@ init(...) -> M = F = atom() A = [term()] Restart = permanent | transient | temporary - Shutdown = brutal_kill | integer()>=0 | infinity + Shutdown = brutal_kill | integer()>0 | infinity Type = worker | supervisor Modules = [Module] | dynamic Module = atom()]]></code> diff --git a/system/doc/efficiency_guide/profiling.xml b/system/doc/efficiency_guide/profiling.xml index 65ba4b3369..230aa3504b 100644 --- a/system/doc/efficiency_guide/profiling.xml +++ b/system/doc/efficiency_guide/profiling.xml @@ -116,7 +116,7 @@ minimize runtime performance impact. Using fprof is just a matter of calling a few library functions, see <seealso marker="tools:fprof">fprof</seealso> - manual page under the application tools.<c>fprof</c> was introduced in + manual page under the application tools.<c> fprof</c> was introduced in version R8 of Erlang/OTP. </p> </section> diff --git a/system/doc/tutorial/c_portdriver.xmlsrc b/system/doc/tutorial/c_portdriver.xmlsrc index f875fa80d2..e6887a25d1 100644 --- a/system/doc/tutorial/c_portdriver.xmlsrc +++ b/system/doc/tutorial/c_portdriver.xmlsrc @@ -68,8 +68,8 @@ start(SharedLib) -> case erl_ddll:load_driver(".", SharedLib) of ok -> ok; -\011{error, already_loaded} -> ok; -\011_ -> exit({error, could_not_load_driver}) + {error, already_loaded} -> ok; + _ -> exit({error, could_not_load_driver}) end, spawn(?MODULE, init, [SharedLib]). @@ -102,7 +102,7 @@ loop(Port) -> {call, Caller, Msg} -> Port ! {self(), {command, encode(Msg)}}, receive -\011 {Port, {data, Data}} -> + {Port, {data, Data}} -> Caller ! {complex, decode(Data)} end, loop(Port) |