Skip site navigation (1)Skip section navigation (2)

FreeBSD Manual Pages


home | help
et_collector(3)		   Erlang Module Definition	       et_collector(3)

       et_collector  -	Collect	trace events and provide a backing storage ap-
       propriate for iteration

       Interface module	for the	Event Trace (ET) application

       start_link(Options) -> {ok, CollectorPid} | {error, Reason}


		 Options = [option()]
		 option() = {parent_pid, pid()}	| {event_order,	event_order()}
		 |   {dict_insert,  {filter,  collector},  collector_fun()}  |
		 {dict_insert,	 {filter,   event_filter_name()},   event_fil-
		 ter_fun()} | {dict_insert, {subscriber, pid()}, dict_val()} |
		 {dict_insert,	 dict_key(),   dict_val()}   |	 {dict_delete,
		 dict_key()} | {trace_client, trace_client()} |	{trace_global,
		 boolean()} | {trace_pattern, trace_pattern()} |  {trace_port,
		 integer()} | {trace_max_queue,	integer()}
		 event_order() = trace_ts | event_ts
		 trace_pattern()	 =	  {report_module(),	   ex-
		 tended_dbg_match_spec()} | undefined
		 report_module() = atom() | undefined
		 extended_dbg_match_spec() = detail_level() | dbg_match_spec()
		 detail_level()	= min |	max | integer(X) when X	=< 0, X	>= 100
		 trace_client()	    =	  {event_file,	   file_name()}	     |
		 {dbg_trace_type(), dbg_trace_parameters()}
		 file_name() = string()
		 collector_fun() = trace_filter_fun() |	event_filter_fun()
		 trace_filter_fun()  = fun(TraceData) -> false | true |	{true,
		 event_filter_fun() = fun(Event) -> false | true | {true, New-
		 event_filter_name() = atom()
		 TraceData = erlang_trace_data()
		 Event = NewEvent = record(event)
		 dict_key() = term()
		 dict_val() = term()
		 CollectorPid =	pid()
		 Reason	= term()

	      Start a collector	process.

	      The  collector  collects	trace events and keeps them ordered by
	      their timestamp. The timestamp may either	reflect	the time  when
	      the actual trace data was	generated (trace_ts) or	when the trace
	      data was transformed into	an event  record  (event_ts).  If  the
	      time  stamp  is missing in the trace data	(missing timestamp op-
	      tion  to	erlang:trace/4)	 the  trace_ts	will  be  set  to  the

	      Events  are  reported  to	the collector directly with the	report
	      function or indirectly via one or	more trace  clients.  All  re-
	      ported  events  are first	filtered thru the collector filter be-
	      fore they	are stored by the collector. By	replacing the  default
	      collector	 filter	with a customized dito it is possible to allow
	      any trace	data as	input. The collector filter  is	 a  dictionary
	      entry  with the predefined key {filter, collector} and the value
	      is a fun of arity	1. See et_selector:make_event/1	for  interface
	      details, such as which erlang:trace/1 tuples that	are accepted.

	      The collector has	a built-in dictionary service. Any term	may be
	      stored as	value in the dictionary	and bound  to  a  unique  key.
	      When  new	values are inserted with an existing key, the new val-
	      ues will overwrite the existing ones. Processes may subscribe on
	      dictionary  updates  by  using {subscriber, pid()} as dictionary
	      key. All dictionary updates will be propagated to	the subscriber
	      processes	 matching  the	pattern	{{subscriber, '_'}, '_'} where
	      the first	'_' is interpreted as a	pid().

	      In global	trace mode, the	 collector  will  automatically	 start
	      tracing  on  all connected Erlang	nodes. When a node connects, a
	      port tracer will be started on that  node	 and  a	 corresponding
	      trace client on the collector node.

	      Default values:

		* parent_pid - self().

		* event_order -	trace_ts.

		* trace_global - false.

		* trace_pattern	- undefined.

		* trace_port - 4711.

		* trace_max_queue - 50.

       stop(CollectorPid) -> ok


		 CollectorPid =	pid()

	      Stop a collector process.

       save_event_file(CollectorPid,  FileName,	 Options) -> ok	| {error, Rea-


		 CollectorPid =	pid()
		 FileName = string()
		 Options = [option()]
		 Reason	= term()
		 option() = event_option() | file_option() | table_option()
		 event_option()	= existing
		 file_option() = write | append
		 table_option()	= keep | clear

	      Save the events to a file.

	      By default the currently stored events (existing)	are written to
	      a	 brand	new  file (write) and the events are kept (keep) after
	      they have	been written to	the file.

	      Instead of keeping the events after writing them to file,	it  is
	      possible	to  remove  all	stored events after they have success-
	      fully written to file (clear).

	      The options defaults to existing,	write and keep.

       report(Handle, TraceOrEvent) -> {ok, Continuation} | exit(Reason)
       report_event(Handle, DetailLevel, FromTo, Label,	Contents) -> {ok, Con-
       tinuation} | exit(Reason)
       report_event(Handle,  DetailLevel,  From,  To, Label, Contents) -> {ok,
       Continuation} | exit(Reason)


		 Handle	= Initial | Continuation
		 Initial = collector_pid()
		 collector_pid() = pid()
		 Continuation =	record(table_handle)
		 TraceOrEvent	=   record(event)   |	dbg_trace_tuple()    |
		 Reason	= term()
		 DetailLevel = integer(X) when X =< 0, X >= 100
		 From =	actor()
		 To = actor()
		 FromTo	= actor()
		 Label = atom()	| string() | term()
		 Contents = [{Key, Value}] | term()
		 actor() = term()

	      Report an	event to the collector.

	      All events are filtered thru the collector filter, which option-
	      ally may transform or discard the	event. The first  call	should
	      use  the	pid  of	 the collector process as report handle, while
	      subsequent calls should use the table handle.

       make_key(Type, Stuff) ->	Key


		 Type =	record(table_handle) | trace_ts	| event_ts
		 Stuff = record(event) | Key
		 Key = record(event_ts)	| record(trace_ts)

	      Make a key out of	an event record	or an old key.

       get_global_pid()	-> CollectorPid	| exit(Reason)


		 CollectorPid =	pid()
		 Reason	= term()

	      Return a the identity of the globally  registered	 collector  if
	      there is any.

       change_pattern(CollectorPid, RawPattern)	-> {old_pattern, TracePattern}


		 CollectorPid =	pid()
		 RawPattern = {report_module(),	extended_dbg_match_spec()}
		 report_module() = atom() | undefined
		 extended_dbg_match_spec() = detail_level() | dbg_match_spec()
		 RawPattern = detail_level()
		 detail_level()	= min |	max | integer(X) when X	=< 0, X	>= 100
		 TracePattern =	{report_module(), dbg_match_spec_match_spec()}

	      Change active trace pattern globally on all trace	nodes.

       dict_insert(CollectorPid, {filter, collector}, FilterFun) -> ok
       dict_insert(CollectorPid, {subscriber, SubscriberPid}, Void) -> ok
       dict_insert(CollectorPid, Key, Val) -> ok


		 CollectorPid =	pid()
		 FilterFun = filter_fun()
		 SubscriberPid = pid()
		 Void =	term()
		 Key = term()
		 Val = term()

	      Insert  a	 dictionary  entry  and	send a {et, {dict_insert, Key,
	      Val}} tuple to all registered subscribers.

	      If the entry is a	new subscriber,	it will	 imply	that  the  new
	      subscriber  process  first will get one message for each already
	      stored dictionary	entry, before it and all old subscribers  will
	      get  this	 particular  entry. The	collector process links	to and
	      then  supervises	the  subscriber	 process.  If  the  subscriber
	      process  dies  it	will imply that	it gets	unregistered as	with a
	      normal dict_delete/2.

       dict_lookup(CollectorPid, Key) -> [Val]


		 CollectorPid =	pid()
		 FilterFun = filter_fun()
		 CollectorPid =	pid()
		 Key = term()
		 Val = term()

	      Lookup a dictionary entry	and return zero	or one value.

       dict_delete(CollectorPid, Key) -> ok


		 CollectorPid =	pid()
		 SubscriberPid = pid()
		 Key = {subscriber, SubscriberPid} | term()

	      Delete a dictionary entry	and send a  {et,  {dict_delete,	 Key}}
	      tuple to all registered subscribers.

	      If  the  deleted entry is	a registered subscriber, it will imply
	      that the subscriber process gets is unregistered	as  subscriber
	      as well as it gets it final message.

       dict_match(CollectorPid,	Pattern) -> [Match]


		 CollectorPid =	pid()
		 Pattern = '_' | {key_pattern(), val_pattern()}
		 key_pattern() = ets_match_object_pattern()
		 val_pattern() = ets_match_object_pattern()
		 Match = {key(), val()}
		 key() = term()
		 val() = term()

	      Match some dictionary entries

       multicast(_CollectorPid,	Msg) ->	ok


		 CollectorPid =	pid()
		 CollectorPid =	pid()
		 Msg = term()

	      Sends a message to all registered	subscribers.

       start_trace_client(CollectorPid,	 Type,	Parameters)  ->	 file_loaded |
       {trace_client_pid, pid()} | exit(Reason)


		 Type =	dbg_trace_client_type()
		 Parameters = dbg_trace_client_parameters()
		 Pid = dbg_trace_client_pid()

	      Load raw Erlang trace from a file, port or process.

       iterate(Handle, Prev, Limit) -> NewAcc

	      Short for	 iterate(Handle,  Prev,	 Limit,	 undefined,  Prev)  ->

       iterate(Handle, Prev, Limit, Fun, Acc) -> NewAcc


		 Handle	= collector_pid() | table_handle()
		 Prev =	first |	last | event_key()
		 Limit = done()	| forward() | backward()
		 collector_pid() = pid()
		 table_handle()	= record(table_handle)
		 event_key()	=    record(event)    |	  record(event_ts)   |
		 done()	= 0
		 forward() = infinity |	integer(X) where X > 0
		 backward() = '-infinity' | integer(X) where X < 0
		 Fun = fun(Event, Acc) -> NewAcc
		 Acc = NewAcc =	term()

	      Iterate over the currently stored	events.

	      Iterates over the	currently stored events	and applies a function
	      for each event. The iteration may	be performed forwards or back-
	      wards  and  may  be  limited  to	a  maximum  number  of	events

       clear_table(Handle) -> ok


		 Handle	= collector_pid() | table_handle()
		 collector_pid() = pid()
		 table_handle()	= record(table_handle)

	      Clear the	event table.

Ericsson AB			   et 1.6.5		       et_collector(3)


Want to link to this manual page? Use this URL:

home | help