From 66643fd1687f7c8316e374f0780c9c51cc437e71 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Wed, 24 Apr 2024 10:30:02 +0300 Subject: [PATCH 01/31] Reverts event-sink retirement --- apps/machinegun/src/mg_configurator.erl | 77 ++++- .../test/mg_prometheus_metric_SUITE.erl | 4 + apps/machinegun/test/mg_tests_SUITE.erl | 8 + .../src/mg_core_events_sink_machine.erl | 299 ++++++++++++++++++ .../mg_core_events_sink_machine_SUITE.erl | 169 ++++++++++ apps/mg_cth/src/mg_cth_configurator.erl | 85 ++++- apps/mg_woody/src/mg_woody.erl | 7 +- apps/mg_woody/src/mg_woody_event_sink.erl | 49 ++- apps/mg_woody/test/mg_event_sink_client.erl | 51 +++ .../test/mg_modernizer_tests_SUITE.erl | 4 + apps/mg_woody/test/mg_stress_SUITE.erl | 10 +- apps/mg_woody/test/mg_woody_tests_SUITE.erl | 151 ++++++++- config/config.yaml | 3 + rel_scripts/configurator.escript | 15 + 14 files changed, 889 insertions(+), 43 deletions(-) create mode 100644 apps/mg_core/src/mg_core_events_sink_machine.erl create mode 100644 apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl create mode 100644 apps/mg_woody/test/mg_event_sink_client.erl diff --git a/apps/machinegun/src/mg_configurator.erl b/apps/machinegun/src/mg_configurator.erl index 94e8a79f..f77746a8 100644 --- a/apps/machinegun/src/mg_configurator.erl +++ b/apps/machinegun/src/mg_configurator.erl @@ -21,10 +21,17 @@ event_stash_size := non_neg_integer() }. +-type event_sink_ns() :: #{ + default_processing_timeout := timeout(), + storage => mg_core_storage:options(), + worker => mg_core_worker:options() +}. + -type namespaces() :: #{mg_core:ns() => events_machines()}. -type config() :: #{ woody_server := mg_woody:woody_server(), + event_sink_ns := event_sink_ns(), namespaces := namespaces(), pulse := pulse(), quotas => [mg_core_quota_worker:options()], @@ -39,6 +46,7 @@ construct_child_specs( #{ woody_server := WoodyServer, + event_sink_ns := EventSinkNS, namespaces := Namespaces, pulse := Pulse } = Config @@ -48,12 +56,14 @@ construct_child_specs( ClusterOpts = maps:get(cluster, Config, #{}), QuotasChildSpec = quotas_child_specs(Quotas, quota), - EventMachinesChildSpec = events_machines_child_specs(Namespaces, Pulse), + EventSinkChildSpec = event_sink_ns_child_spec(EventSinkNS, event_sink, Pulse), + EventMachinesChildSpec = events_machines_child_specs(Namespaces, EventSinkNS, Pulse), WoodyServerChildSpec = mg_woody:child_spec( woody_server, #{ pulse => Pulse, - automaton => api_automaton_options(Namespaces, Pulse), + automaton => api_automaton_options(Namespaces, EventSinkNS, Pulse), + event_sink => api_event_sink_options(Namespaces, EventSinkNS, Pulse), woody_server => WoodyServer, additional_routes => [ get_startup_route(), @@ -66,6 +76,7 @@ construct_child_specs( lists:flatten([ QuotasChildSpec, + EventSinkChildSpec, EventMachinesChildSpec, ClusterSpec, WoodyServerChildSpec @@ -101,16 +112,22 @@ quotas_child_specs(Quotas, ChildID) -> || Options <- Quotas ]. --spec events_machines_child_specs(namespaces(), pulse()) -> supervisor:child_spec(). -events_machines_child_specs(NSs, Pulse) -> - NsOptions = [events_machine_options(NS, NSs, Pulse) || NS <- maps:keys(NSs)], +-spec events_machines_child_specs(namespaces(), event_sink_ns(), pulse()) -> supervisor:child_spec(). +events_machines_child_specs(NSs, EventSinkNS, Pulse) -> + NsOptions = [ + events_machine_options(NS, NSs, EventSinkNS, Pulse) + || NS <- maps:keys(NSs) + ], mg_namespace_sup:child_spec(NsOptions, namespaces_sup). --spec events_machine_options(mg_core:ns(), namespaces(), pulse()) -> mg_core_events_machine:options(). -events_machine_options(NS, NSs, Pulse) -> +-spec events_machine_options(mg_core:ns(), namespaces(), event_sink_ns(), pulse()) -> mg_core_events_machine:options(). +events_machine_options(NS, NSs, EventSinkNS, Pulse) -> NSConfigs = maps:get(NS, NSs), #{processor := ProcessorConfig, storage := Storage} = NSConfigs, - EventSinks = [event_sink_options(SinkConfig, Pulse) || SinkConfig <- maps:get(event_sinks, NSConfigs, [])], + EventSinks = [ + event_sink_options(SinkConfig, EventSinkNS, Pulse) + || SinkConfig <- maps:get(event_sinks, NSConfigs, []) + ], EventsStorage = sub_storage_options(<<"events">>, Storage), #{ namespace => NS, @@ -150,14 +167,14 @@ machine_options(NS, Config, Pulse) -> suicide_probability => maps:get(suicide_probability, Config, undefined) }. --spec api_automaton_options(namespaces(), pulse()) -> mg_woody_automaton:options(). -api_automaton_options(NSs, Pulse) -> +-spec api_automaton_options(namespaces(), event_sink_ns(), pulse()) -> mg_woody_automaton:options(). +api_automaton_options(NSs, EventSinkNS, Pulse) -> maps:fold( fun(NS, ConfigNS, Options) -> Options#{ NS => maps:merge( #{ - machine => events_machine_options(NS, NSs, Pulse) + machine => events_machine_options(NS, NSs, EventSinkNS, Pulse) }, modernizer_options(maps:get(modernizer, ConfigNS, undefined), Pulse) ) @@ -167,13 +184,47 @@ api_automaton_options(NSs, Pulse) -> NSs ). --spec event_sink_options(mg_core_events_sink:handler(), pulse()) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, Pulse) -> +-spec event_sink_options(mg_core_events_sink:handler(), event_sink_ns(), pulse()) -> mg_core_events_sink:handler(). +event_sink_options({mg_core_events_sink_machine, EventSinkConfig}, EvSinks, Pulse) -> + EventSinkNS = event_sink_namespace_options(EvSinks, Pulse), + {mg_core_events_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; +event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, _Config, Pulse) -> {mg_core_events_sink_kafka, EventSinkConfig#{ pulse => Pulse, encoder => fun mg_woody_event_sink:serialize/3 }}. +-spec event_sink_ns_child_spec(event_sink_ns(), atom(), pulse()) -> supervisor:child_spec(). +event_sink_ns_child_spec(EventSinkNS, ChildID, Pulse) -> + mg_core_events_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS, Pulse), ChildID). + +-spec api_event_sink_options(namespaces(), event_sink_ns(), pulse()) -> mg_woody_event_sink:options(). +api_event_sink_options(NSs, EventSinkNS, Pulse) -> + EventSinkMachines = collect_event_sink_machines(NSs), + {EventSinkMachines, event_sink_namespace_options(EventSinkNS, Pulse)}. + +-spec collect_event_sink_machines(namespaces()) -> [mg_core:id()]. +collect_event_sink_machines(NSs) -> + NSConfigs = maps:values(NSs), + EventSinks = ordsets:from_list([ + maps:get(machine_id, SinkConfig) + || NSConfig <- NSConfigs, {mg_core_events_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) + ]), + ordsets:to_list(EventSinks). + +-spec event_sink_namespace_options(event_sink_ns(), pulse()) -> mg_core_events_sink_machine:ns_options(). +event_sink_namespace_options(#{storage := Storage} = EventSinkNS, Pulse) -> + NS = <<"_event_sinks">>, + MachinesStorage = sub_storage_options(<<"machines">>, Storage), + EventsStorage = sub_storage_options(<<"events">>, Storage), + EventSinkNS#{ + namespace => NS, + pulse => Pulse, + storage => MachinesStorage, + events_storage => EventsStorage, + worker => worker_manager_options(EventSinkNS) + }. + -spec worker_manager_options(map()) -> mg_core_workers_manager:ns_options(). worker_manager_options(Config) -> maps:merge( diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 6cb0587c..c198c123 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -839,6 +839,10 @@ mg_config() -> [ {woody_server, #{ip => {0, 0, 0, 0}, port => 8022}}, {namespaces, #{}}, + {event_sink_ns, #{ + storage => mg_core_storage_memory, + registry => mg_core_procreg_global + }}, {pulse, {mg_pulse, #{}}} ]. diff --git a/apps/machinegun/test/mg_tests_SUITE.erl b/apps/machinegun/test/mg_tests_SUITE.erl index 35b1d66b..a43fa106 100644 --- a/apps/machinegun/test/mg_tests_SUITE.erl +++ b/apps/machinegun/test/mg_tests_SUITE.erl @@ -247,6 +247,10 @@ mg_config(#{endpoint := {IP, Port}}, C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ + {mg_core_events_sink_machine, #{ + name => machine, + machine_id => ?ES_ID + }}, {mg_core_events_sink_kafka, #{ name => kafka, topic => ?ES_ID, @@ -255,6 +259,10 @@ mg_config(#{endpoint := {IP, Port}}, C) -> ] } }}, + {event_sink_ns, #{ + storage => mg_core_storage_memory, + default_processing_timeout => 5000 + }}, {pulse, {mg_pulse, #{}}} ]. diff --git a/apps/mg_core/src/mg_core_events_sink_machine.erl b/apps/mg_core/src/mg_core_events_sink_machine.erl new file mode 100644 index 00000000..7232691b --- /dev/null +++ b/apps/mg_core/src/mg_core_events_sink_machine.erl @@ -0,0 +1,299 @@ +%%% +%%% Copyright 2017 RBKmoney +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_core_events_sink_machine). + +%% API +-export_type([event_body/0]). +-export_type([options/0]). +-export_type([storage_options/0]). +-export_type([ns_options/0]). +-export([child_spec/2]). +-export([start_link/1]). +-export([get_history/3]). +-export([repair/4]). + +%% mg_core_events_sink handler +-behaviour(mg_core_events_sink). +-export([add_events/6]). + +%% mg_core_machine handler +-behaviour(mg_core_machine). +-export([process_machine/7]). + +%% +%% API +%% +-type event_body() :: #{ + source_ns => mg_core:ns(), + source_id => mg_core:id(), + event => mg_core_events:event() +}. +-type event() :: mg_core_events:event(event_body()). +-type options() :: #{ + name := atom(), + namespace := mg_core:ns(), + machine_id := mg_core:id(), + storage := storage_options(), + worker := mg_core_workers_manager:ns_options(), + pulse := mg_core_pulse:handler(), + events_storage := mg_core_storage:options(), + default_processing_timeout := timeout() +}. +-type ns_options() :: #{ + namespace := mg_core:ns(), + storage := storage_options(), + worker := mg_core_workers_manager:ns_options(), + pulse := mg_core_pulse:handler(), + events_storage := storage_options(), + default_processing_timeout := timeout() +}. +% like mg_core_storage:options() except `name` +-type storage_options() :: mg_core_utils:mod_opts(map()). + +-spec child_spec(ns_options(), atom()) -> supervisor:child_spec(). +child_spec(Options, ChildID) -> + #{ + id => ChildID, + start => {?MODULE, start_link, [Options]}, + restart => permanent, + type => supervisor + }. + +-spec start_link(ns_options()) -> mg_core_utils:gen_start_ret(). +start_link(Options) -> + genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + mg_core_utils:lists_compact([ + mg_core_machine:child_spec(machine_options(Options), automaton), + mg_core_storage:child_spec(events_storage_options(Options), events_storage) + ]) + ). + +-spec add_events( + options(), + mg_core:ns(), + mg_core:id(), + [mg_core_events:event()], + ReqCtx, + Deadline +) -> ok when + ReqCtx :: mg_core:request_context(), + Deadline :: mg_core_deadline:deadline(). +add_events( + #{machine_id := EventSinkID} = Options, + SourceNS, + SourceMachineID, + Events, + ReqCtx, + Deadline +) -> + NSOptions = maps:without([machine_id, name], Options), + ok = mg_core_machine:call_with_lazy_start( + machine_options(NSOptions), + EventSinkID, + {add_events, SourceNS, SourceMachineID, Events}, + ReqCtx, + Deadline, + undefined + ). + +-spec get_history(ns_options(), mg_core:id(), mg_core_events:history_range()) -> [event()]. +get_history(Options, EventSinkID, HistoryRange) -> + #{events_range := EventsRange} = get_state(Options, EventSinkID), + StorageOptions = events_storage_options(Options), + Batch = mg_core_dirange:fold( + fun(EventID, Batch) -> + Key = mg_core_events:add_machine_id( + EventSinkID, + mg_core_events:event_id_to_key(EventID) + ), + mg_core_storage:add_batch_request({get, Key}, Batch) + end, + mg_core_storage:new_batch(), + mg_core_events:intersect_range(EventsRange, HistoryRange) + ), + BatchResults = mg_core_storage:run_batch(StorageOptions, Batch), + lists:map( + fun({{get, Key}, {_Context, Value}}) -> + kv_to_sink_event(EventSinkID, {Key, Value}) + end, + BatchResults + ). + +-spec repair(ns_options(), mg_core:id(), mg_core:request_context(), mg_core_deadline:deadline()) -> + ok. +repair(Options, EventSinkID, ReqCtx, Deadline) -> + mg_core_machine:repair(machine_options(Options), EventSinkID, undefined, ReqCtx, Deadline). + +%% +%% mg_core_processor handler +%% +-type state() :: #{ + events_range => mg_core_events:events_range() +}. + +-spec process_machine(Options, EventSinkID, Impact, PCtx, ReqCtx, Deadline, PackedState) -> + Result +when + Options :: ns_options(), + EventSinkID :: mg_core:id(), + Impact :: mg_core_machine:processor_impact(), + PCtx :: mg_core_machine:processing_context(), + ReqCtx :: mg_core:request_context(), + Deadline :: mg_core_deadline:deadline(), + PackedState :: mg_core_machine:machine_state(), + Result :: mg_core_machine:processor_result(). +process_machine(Options, EventSinkID, Impact, _PCtx, _ReqCtx, _Deadline, PackedState) -> + State = + case {Impact, PackedState} of + {{init, _}, null} -> new_state(); + {_, _} -> opaque_to_state(PackedState) + end, + NewState = process_machine_(Options, EventSinkID, Impact, State), + {{reply, ok}, sleep, state_to_opaque(NewState)}. + +-spec process_machine_(ns_options(), mg_core:id(), mg_core_machine:processor_impact(), state()) -> + state(). +process_machine_(_, _, {init, undefined}, State) -> + State; +process_machine_(_, _, {repair, undefined}, State) -> + State; +process_machine_( + Options, + EventSinkID, + {call, {add_events, SourceNS, SourceMachineID, Events}}, + State +) -> + {SinkEvents, NewState} = generate_sink_events(SourceNS, SourceMachineID, Events, State), + ok = store_sink_events(Options, EventSinkID, SinkEvents), + NewState. + +%% + +-spec store_sink_events(ns_options(), mg_core:id(), [event()]) -> ok. +store_sink_events(Options, EventSinkID, SinkEvents) -> + lists:foreach( + fun(SinkEvent) -> + store_event(Options, EventSinkID, SinkEvent) + end, + SinkEvents + ). + +-spec store_event(ns_options(), mg_core:id(), event()) -> ok. +store_event(Options, EventSinkID, SinkEvent) -> + {Key, Value} = sink_event_to_kv(EventSinkID, SinkEvent), + _ = mg_core_storage:put( + events_storage_options(Options), + Key, + undefined, + Value, + [] + ), + ok. + +-spec get_state(ns_options(), mg_core:id()) -> state(). +get_state(Options, EventSinkID) -> + try + #{state := State} = mg_core_machine:get(machine_options(Options), EventSinkID), + opaque_to_state(State) + catch + throw:{logic, machine_not_found} -> + new_state() + end. + +-spec new_state() -> state(). +new_state() -> + #{events_range => undefined}. + +-spec machine_options(ns_options()) -> mg_core_machine:options(). +machine_options( + Options = #{ + namespace := Namespace, storage := Storage, worker := Worker, pulse := Pulse + } +) -> + #{ + namespace => mg_core_utils:concatenate_namespaces(Namespace, <<"machines">>), + processor => {?MODULE, Options}, + storage => Storage, + worker => Worker, + pulse => Pulse + }. + +-spec events_storage_options(ns_options()) -> mg_core_storage:options(). +events_storage_options(#{namespace := NS, events_storage := StorageOptions, pulse := Handler}) -> + {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options#{name => {NS, ?MODULE, events}, pulse => Handler}}. + +%% + +-spec generate_sink_events(mg_core:ns(), mg_core:id(), [mg_core_events:event()], state()) -> + {[event()], state()}. +generate_sink_events(SourceNS, SourceMachineID, Events, State = #{events_range := EventsRange}) -> + Bodies = [generate_sink_event_body(SourceNS, SourceMachineID, Event) || Event <- Events], + {SinkEvents, NewEventsRange} = mg_core_events:generate_events_with_range(Bodies, EventsRange), + {SinkEvents, State#{events_range := NewEventsRange}}. + +-spec generate_sink_event_body(mg_core:ns(), mg_core:id(), mg_core_events:event()) -> event_body(). +generate_sink_event_body(SourceNS, SourceMachineID, Event) -> + #{ + source_ns => SourceNS, + source_id => SourceMachineID, + event => Event + }. + +%% +%% packer to opaque +%% +-spec state_to_opaque(state()) -> mg_core_storage:opaque(). +state_to_opaque(#{events_range := EventsRange}) -> + [1, mg_core_events:events_range_to_opaque(EventsRange)]. + +-spec opaque_to_state(mg_core_storage:opaque()) -> state(). +opaque_to_state([1, EventsRange]) -> + #{ + events_range => mg_core_events:opaque_to_events_range(EventsRange) + }. + +-spec sink_event_body_to_opaque(Vsn :: integer(), event_body()) -> mg_core_storage:opaque(). +sink_event_body_to_opaque(_Vsn, #{ + source_ns := SourceNS, + source_id := SourceMachineID, + event := Event +}) -> + [1, SourceNS, SourceMachineID, mg_core_events:event_to_opaque(Event)]. + +-spec opaque_to_sink_event_body(Vsn :: integer(), mg_core_storage:opaque()) -> event_body(). +opaque_to_sink_event_body(_Vsn, [1, SourceNS, SourceMachineID, Event]) -> + #{ + source_ns => SourceNS, + source_id => SourceMachineID, + event => mg_core_events:opaque_to_event(Event) + }. + +-spec sink_event_to_kv(mg_core:id(), event()) -> mg_core_storage:kv(). +sink_event_to_kv(EventSinkID, Event) -> + mg_core_events:add_machine_id( + EventSinkID, + mg_core_events:event_to_kv(Event, fun sink_event_body_to_opaque/2) + ). + +-spec kv_to_sink_event(mg_core:id(), mg_core_storage:kv()) -> event(). +kv_to_sink_event(EventSinkID, Kvs) -> + mg_core_events:kv_to_event( + mg_core_events:remove_machine_id(EventSinkID, Kvs), + fun opaque_to_sink_event_body/2 + ). diff --git a/apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl b/apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl new file mode 100644 index 00000000..249d5866 --- /dev/null +++ b/apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl @@ -0,0 +1,169 @@ +%%% +%%% Copyright 2017 RBKmoney +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_core_events_sink_machine_SUITE). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("common_test/include/ct.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). + +%% tests +-export([add_events_test/1]). +-export([get_unexisted_event_test/1]). +-export([not_idempotent_add_get_events_test/1]). + +%% Pulse +-export([handle_beat/2]). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, main} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {main, [sequence], [ + add_events_test, + get_unexisted_event_test, + not_idempotent_add_get_events_test + ]} + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + % dbg:tracer(), dbg:p(all, c), + % dbg:tpl({mg_core_events_sink_machine, '_', '_'}, x), + Apps = mg_cth:start_applications([mg_core]), + Pid = start_event_sink(event_sink_ns_options()), + true = erlang:unlink(Pid), + {Events, _} = mg_core_events:generate_events_with_range( + [{#{}, Body} || Body <- [1, 2, 3]], + undefined + ), + [{apps, Apps}, {pid, Pid}, {events, Events} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + ok = proc_lib:stop(?config(pid, C)), + mg_cth:stop_applications(?config(apps, C)). + +%% +%% tests +%% +-define(ES_ID, <<"event_sink_id">>). +-define(SOURCE_NS, <<"source_ns">>). +-define(SOURCE_ID, <<"source_id">>). + +-spec add_events_test(config()) -> _. +add_events_test(C) -> + ?assertEqual(ok, add_events(C)). + +-spec get_unexisted_event_test(config()) -> _. +get_unexisted_event_test(_C) -> + [] = mg_core_events_sink_machine:get_history( + event_sink_ns_options(), + ?ES_ID, + {42, undefined, forward} + ). + +-spec not_idempotent_add_get_events_test(config()) -> _. +not_idempotent_add_get_events_test(C) -> + ?assertEqual(ok, add_events(C)), + ConfigEvents = [ + #{event => Event, source_ns => ?SOURCE_NS, source_id => ?SOURCE_ID} + || Event <- ?config(events, C) + ], + ExpectedEvents = lists:zip( + lists:seq(1, erlang:length(?config(events, C)) * 2), + ConfigEvents ++ ConfigEvents + ), + ?assertEqual(ExpectedEvents, get_history(C)). + +%% +%% utils +%% + +-spec add_events(config()) -> _. +add_events(C) -> + mg_core_events_sink_machine:add_events( + event_sink_options(), + ?SOURCE_NS, + ?SOURCE_ID, + ?config(events, C), + null, + mg_core_deadline:default() + ). + +-spec get_history(config()) -> _. +get_history(_C) -> + HRange = {undefined, undefined, forward}, + % _ = ct:pal("~p", [PreparedEvents]), + EventsSinkEvents = mg_core_events_sink_machine:get_history( + event_sink_ns_options(), + ?ES_ID, + HRange + ), + [{ID, Body} || #{id := ID, body := Body} <- EventsSinkEvents]. + +-spec start_event_sink(mg_core_events_sink_machine:ns_options()) -> pid(). +start_event_sink(Options) -> + mg_core_utils:throw_if_error( + genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + [mg_core_events_sink_machine:child_spec(Options, event_sink)] + ) + ). + +-spec event_sink_ns_options() -> mg_core_events_sink_machine:ns_options(). +event_sink_ns_options() -> + #{ + namespace => ?ES_ID, + storage => mg_core_storage_memory, + worker => #{ + registry => mg_core_procreg_global + }, + pulse => ?MODULE, + default_processing_timeout => 1000, + events_storage => mg_core_storage_memory + }. + +-spec event_sink_options() -> mg_core_events_sink_machine:options(). +event_sink_options() -> + NSOptions = event_sink_ns_options(), + NSOptions#{ + name => machine, + machine_id => ?ES_ID + }. + +-spec handle_beat(_, mg_core_pulse:beat()) -> ok. +handle_beat(_, Beat) -> + ct:pal("~p", [Beat]). diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_configurator.erl index 8e8a4ba8..397ba738 100644 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ b/apps/mg_cth/src/mg_cth_configurator.erl @@ -21,8 +21,15 @@ event_stash_size := non_neg_integer() }. +-type event_sink_ns() :: #{ + default_processing_timeout := timeout(), + storage => mg_core_storage:options(), + worker => mg_core_worker:options() +}. + -type config() :: #{ woody_server := mg_woody:woody_server(), + event_sink_ns := event_sink_ns(), namespaces := #{mg_core:ns() => events_machines()}, quotas => [mg_core_quota_worker:options()] }. @@ -32,21 +39,30 @@ -spec construct_child_specs(config() | undefined) -> _. construct_child_specs(undefined) -> []; -construct_child_specs(#{woody_server := WoodyServer, namespaces := Namespaces} = Config) -> +construct_child_specs( + #{ + woody_server := WoodyServer, + event_sink_ns := EventSinkNS, + namespaces := Namespaces + } = Config +) -> Quotas = maps:get(quotas, Config, []), QuotasChSpec = quotas_child_specs(Quotas, quota), - EventMachinesChSpec = events_machines_child_specs(Namespaces), + EventSinkChSpec = event_sink_ns_child_spec(EventSinkNS, event_sink), + EventMachinesChSpec = events_machines_child_specs(Namespaces, EventSinkNS), WoodyServerChSpec = mg_woody:child_spec( woody_server, #{ woody_server => WoodyServer, - automaton => api_automaton_options(Namespaces), + automaton => api_automaton_options(Namespaces, EventSinkNS), + event_sink => api_event_sink_options(Namespaces, EventSinkNS), pulse => mg_cth_pulse } ), lists:flatten([ + EventSinkChSpec, WoodyServerChSpec, QuotasChSpec, EventMachinesChSpec @@ -61,18 +77,24 @@ quotas_child_specs(Quotas, ChildID) -> || Options <- Quotas ]. --spec events_machines_child_specs(_) -> [supervisor:child_spec()]. -events_machines_child_specs(NSs) -> +-spec events_machines_child_specs(_, _) -> [supervisor:child_spec()]. +events_machines_child_specs(NSs, EventSinkNS) -> [ - mg_core_events_machine:child_spec(events_machine_options(NS, NSs), binary_to_atom(NS, utf8)) + mg_core_events_machine:child_spec( + events_machine_options(NS, NSs, EventSinkNS), + binary_to_atom(NS, utf8) + ) || NS <- maps:keys(NSs) ]. --spec events_machine_options(mg_core:ns(), _) -> mg_core_events_machine:options(). -events_machine_options(NS, NSs) -> +-spec events_machine_options(mg_core:ns(), _, event_sink_ns()) -> mg_core_events_machine:options(). +events_machine_options(NS, NSs, EventSinkNS) -> NSConfigs = maps:get(NS, NSs), #{processor := ProcessorConfig, storage := Storage} = NSConfigs, - EventSinks = [event_sink_options(SinkConfig) || SinkConfig <- maps:get(event_sinks, NSConfigs, [])], + EventSinks = [ + event_sink_options(SinkConfig, EventSinkNS) + || SinkConfig <- maps:get(event_sinks, NSConfigs, []) + ], EventsStorage = sub_storage_options(<<"events">>, Storage), #{ namespace => NS, @@ -112,14 +134,14 @@ machine_options(NS, Config) -> suicide_probability => maps:get(suicide_probability, Config, undefined) }. --spec api_automaton_options(_) -> mg_woody_automaton:options(). -api_automaton_options(NSs) -> +-spec api_automaton_options(_, event_sink_ns()) -> mg_woody_automaton:options(). +api_automaton_options(NSs, EventSinkNS) -> maps:fold( fun(NS, ConfigNS, Options) -> Options#{ NS => maps:merge( #{ - machine => events_machine_options(NS, NSs) + machine => events_machine_options(NS, NSs, EventSinkNS) }, modernizer_options(maps:get(modernizer, ConfigNS, undefined)) ) @@ -129,13 +151,48 @@ api_automaton_options(NSs) -> NSs ). --spec event_sink_options(mg_core_events_sink:handler()) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}) -> +-spec event_sink_options(mg_core_events_sink:handler(), _) -> mg_core_events_sink:handler(). +event_sink_options({mg_core_events_sink_machine, EventSinkConfig}, EvSinks) -> + EventSinkNS = event_sink_namespace_options(EvSinks), + {mg_core_events_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; +event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, _Config) -> {mg_core_events_sink_kafka, EventSinkConfig#{ pulse => pulse(), encoder => fun mg_woody_event_sink:serialize/3 }}. +-spec event_sink_ns_child_spec(_, atom()) -> supervisor:child_spec(). +event_sink_ns_child_spec(EventSinkNS, ChildID) -> + mg_core_events_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS), ChildID). + +-spec api_event_sink_options(_, _) -> mg_woody_event_sink:options(). +api_event_sink_options(NSs, EventSinkNS) -> + EventSinkMachines = collect_event_sink_machines(NSs), + {EventSinkMachines, event_sink_namespace_options(EventSinkNS)}. + +-spec collect_event_sink_machines(_) -> [mg_core:id()]. +collect_event_sink_machines(NSs) -> + NSConfigs = maps:values(NSs), + EventSinks = ordsets:from_list([ + maps:get(machine_id, SinkConfig) + || NSConfig <- NSConfigs, + {mg_core_events_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) + ]), + ordsets:to_list(EventSinks). + +-spec event_sink_namespace_options(_) -> mg_core_events_sink_machine:ns_options(). +event_sink_namespace_options(#{storage := Storage} = EventSinkNS) -> + NS = <<"_event_sinks">>, + MachinesStorage = sub_storage_options(<<"machines">>, Storage), + EventsStorage = sub_storage_options(<<"events">>, Storage), + EventSinkNS#{ + namespace => NS, + pulse => pulse(), + storage => MachinesStorage, + events_storage => EventsStorage, + worker => worker_manager_options(EventSinkNS) + }. + -spec worker_manager_options(map()) -> mg_core_workers_manager:ns_options(). worker_manager_options(Config) -> maps:merge( diff --git a/apps/mg_woody/src/mg_woody.erl b/apps/mg_woody/src/mg_woody.erl index f3373b44..7d3eb8db 100644 --- a/apps/mg_woody/src/mg_woody.erl +++ b/apps/mg_woody/src/mg_woody.erl @@ -40,9 +40,12 @@ -type automaton() :: mg_woody_automaton:options(). +-type event_sink() :: mg_woody_event_sink:options(). + -type options() :: #{ pulse := module(), automaton := automaton(), + event_sink := event_sink(), woody_server := woody_server(), additional_routes => [woody_server_thrift_http_handler:route(any())] }. @@ -52,6 +55,7 @@ child_spec(ID, Options) -> #{ woody_server := WoodyConfig, automaton := Automaton, + event_sink := EventSink, pulse := PulseHandler } = Options, WoodyOptions = maps:merge( @@ -62,7 +66,8 @@ child_spec(ID, Options) -> port => maps:get(port, WoodyConfig), event_handler => {mg_woody_event_handler, PulseHandler}, handlers => [ - mg_woody_automaton:handler(Automaton) + mg_woody_automaton:handler(Automaton), + mg_woody_event_sink:handler(EventSink) ] }, genlib_map:compact(#{ diff --git a/apps/mg_woody/src/mg_woody_event_sink.erl b/apps/mg_woody/src/mg_woody_event_sink.erl index f2b09674..e2303de1 100644 --- a/apps/mg_woody/src/mg_woody_event_sink.erl +++ b/apps/mg_woody/src/mg_woody_event_sink.erl @@ -17,7 +17,6 @@ -module(mg_woody_event_sink). -include_lib("mg_proto/include/mg_proto_event_sink_thrift.hrl"). --include_lib("mg_proto/include/mg_proto_state_processing_thrift.hrl"). %% API -export([handler/1]). @@ -31,7 +30,7 @@ %% %% API %% --type options() :: {[mg_core:id()], _NSOptions}. +-type options() :: {[mg_core:id()], mg_core_events_sink_machine:ns_options()}. -spec handler(options()) -> mg_woody_utils:woody_handler(). handler(Options) -> @@ -40,10 +39,33 @@ handler(Options) -> %% %% woody handler %% --spec handle_function(woody:func(), woody:args(), woody_context:ctx(), options()) -> no_return(). +-spec handle_function(woody:func(), woody:args(), woody_context:ctx(), options()) -> + {ok, _Result} | no_return(). -handle_function('GetHistory', {_EventSinkID, _Range}, _WoodyContext, {_AvaliableEventSinks, _Options}) -> - erlang:throw(#mg_stateproc_EventSinkNotFound{}). +handle_function('GetHistory', {EventSinkID, Range}, WoodyContext, {AvaliableEventSinks, Options}) -> + ReqCtx = mg_woody_utils:woody_context_to_opaque(WoodyContext), + DefaultTimeout = maps:get(default_processing_timeout, Options), + DefaultDeadline = mg_core_deadline:from_timeout(DefaultTimeout), + Deadline = mg_woody_utils:get_deadline(WoodyContext, DefaultDeadline), + SinkHistory = + mg_woody_utils:handle_error( + #{ + namespace => undefined, + machine_id => EventSinkID, + request_context => ReqCtx, + deadline => Deadline + }, + fun() -> + _ = check_event_sink(AvaliableEventSinks, EventSinkID), + mg_core_events_sink_machine:get_history( + Options, + EventSinkID, + mg_woody_packer:unpack(history_range, Range) + ) + end, + pulse(Options) + ), + {ok, mg_woody_packer:pack(sink_history, SinkHistory)}. %% %% events_sink events encoder @@ -74,3 +96,20 @@ serialize(SourceNS, SourceID, Event) -> {error, Reason} -> erlang:error({?MODULE, Reason}) end. + +%% +%% Internals +%% + +-spec check_event_sink([mg_core:id()], mg_core:id()) -> ok | no_return(). +check_event_sink(AvaliableEventSinks, EventSinkID) -> + case lists:member(EventSinkID, AvaliableEventSinks) of + true -> + ok; + false -> + throw({logic, event_sink_not_found}) + end. + +-spec pulse(mg_core_events_sink_machine:ns_options()) -> mg_core_pulse:handler(). +pulse(#{pulse := Pulse}) -> + Pulse. diff --git a/apps/mg_woody/test/mg_event_sink_client.erl b/apps/mg_woody/test/mg_event_sink_client.erl new file mode 100644 index 00000000..469f5b68 --- /dev/null +++ b/apps/mg_woody/test/mg_event_sink_client.erl @@ -0,0 +1,51 @@ +%%% +%%% Copyright 2020 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_event_sink_client). + +%% API +-export_type([options/0]). +-export([get_history/3]). + +%% +%% API +%% +-type options() :: URL :: string(). + +-spec get_history(options(), mg_core:id(), mg_proto_state_processing_thrift:'HistoryRange'()) -> + mg_proto_state_processing_thrift:'SinkHistory'(). +get_history(BaseURL, EventSinkID, Range) -> + call_service(BaseURL, 'GetHistory', {EventSinkID, Range}). + +%% +%% local +%% +-spec call_service(_BaseURL, atom(), woody:args()) -> _. +call_service(BaseURL, Function, Args) -> + WR = woody_client:call( + {{mg_proto_state_processing_thrift, 'EventSink'}, Function, Args}, + #{ + url => BaseURL ++ "/v1/event_sink", + event_handler => {mg_woody_event_handler, mg_cth_pulse} + }, + woody_context:new() + ), + case WR of + {ok, R} -> + R; + {exception, Exception} -> + erlang:throw(Exception) + end. diff --git a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl index f08743fd..58fee70f 100644 --- a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl @@ -190,6 +190,10 @@ mg_woody_config(Name, C) -> } end ) + }, + event_sink_ns => #{ + storage => mg_core_storage_memory, + default_processing_timeout => 5000 } }. diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index 29bf3aa0..6d77187c 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -26,6 +26,7 @@ -export([stress_test/1]). -define(NS, <<"NS">>). +-define(ES_ID, <<"test_event_sink">>). -type test_name() :: atom(). -type config() :: [{atom(), _}]. @@ -82,6 +83,7 @@ init_per_suite(C) -> ns => ?NS, retry_strategy => genlib_retry:new_strategy({exponential, 5, 2, 1000}) }}, + {event_sink_options, "http://localhost:8022"}, {processor_pid, ProcessorPid} | C ]. @@ -120,9 +122,15 @@ mg_woody_config(_C) -> timers => #{} }, retries => #{}, - event_sinks => [], + event_sinks => [ + {mg_core_events_sink_machine, #{name => default, machine_id => ?ES_ID}} + ], event_stash_size => 10 } + }, + event_sink_ns => #{ + storage => mg_core_storage_memory, + default_processing_timeout => 5000 } }. diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index f317aab2..7c088ad5 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -70,6 +70,14 @@ -export([success_call_with_deadline/1]). -export([timeout_call_with_deadline/1]). +%% event_sink group tests +-export([event_sink_get_empty_history/1]). +-export([event_sink_get_not_empty_history/1]). +-export([event_sink_get_last_event/1]). +-export([event_sink_incorrect_event_id/1]). +-export([event_sink_incorrect_sink_id/1]). +-export([event_sink_lots_events_ordering/1]). + %% -export([config_with_multiple_event_sinks/1]). @@ -88,6 +96,7 @@ all() -> {group, history}, {group, repair}, {group, timers}, + {group, event_sink}, {group, deadline}, config_with_multiple_event_sinks ]. @@ -96,6 +105,7 @@ all() -> groups() -> [ % TODO проверить отмену таймера + % TODO проверить отдельно get_history {base, [sequence], [ namespace_not_found, machine_id_not_found, @@ -150,6 +160,16 @@ groups() -> machine_start, success_call_with_deadline, timeout_call_with_deadline + ]}, + + {event_sink, [sequence], [ + event_sink_get_empty_history, + event_sink_get_not_empty_history, + event_sink_get_last_event, + % TODO event_not_found + % event_sink_incorrect_event_id, + event_sink_incorrect_sink_id, + event_sink_lots_events_ordering ]} ]. @@ -215,6 +235,7 @@ init_per_group(C) -> ns => ?NS, retry_strategy => genlib_retry:linear(3, 1) }}, + {event_sink_options, "http://localhost:8022"}, {processor_pid, ProcessorPid} | C ]. @@ -332,6 +353,10 @@ mg_woody_config(C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ + {mg_core_events_sink_machine, #{ + name => machine, + machine_id => ?ES_ID + }}, {mg_core_events_sink_kafka, #{ name => kafka, topic => ?ES_ID, @@ -339,6 +364,10 @@ mg_woody_config(C) -> }} ] } + }, + event_sink_ns => #{ + storage => mg_core_storage_memory, + default_processing_timeout => 5000 } }. @@ -594,6 +623,95 @@ success_call_with_deadline(C) -> <<"sleep">> = mg_cth_automaton_client:call(Options, ?ID, <<"sleep">>, Deadline). %% +%% event_sink group test +%% +-spec event_sink_get_empty_history(config()) -> _. +event_sink_get_empty_history(C) -> + [] = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ + direction = forward + }). + +-spec event_sink_get_not_empty_history(config()) -> _. +event_sink_get_not_empty_history(C) -> + ok = start_machine(C, ?ID), + + _ = create_events(3, C, ?ID), + + AllEvents = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ + direction = forward + }), + GeneratedEvents = [ + E + || E = #mg_stateproc_SinkEvent{ + source_id = ?ID, + source_ns = ?NS, + event = #mg_stateproc_Event{} + } <- AllEvents + ], + ?assert(erlang:length(GeneratedEvents) >= 3). + +-spec event_sink_get_last_event(config()) -> _. +event_sink_get_last_event(C) -> + [ + #mg_stateproc_SinkEvent{ + id = 3, + source_id = _ID, + source_ns = _NS, + event = #mg_stateproc_Event{} + } + ] = + mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ + direction = backward, + limit = 1 + }). + +-spec event_sink_incorrect_event_id(config()) -> _. +event_sink_incorrect_event_id(C) -> + #mg_stateproc_EventNotFound{} = + (catch mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ + 'after' = 42 + })). + +-spec event_sink_incorrect_sink_id(config()) -> _. +event_sink_incorrect_sink_id(C) -> + HRange = #mg_stateproc_HistoryRange{}, + #mg_stateproc_EventSinkNotFound{} = + (catch mg_event_sink_client:get_history(es_opts(C), <<"incorrect_event_sink_id">>, HRange)). + +-spec event_sink_lots_events_ordering(config()) -> _. +event_sink_lots_events_ordering(C) -> + MachineID = genlib:unique(), + ok = start_machine(C, MachineID), + N = 20, + _ = create_events(N, C, MachineID), + + HRange = #mg_stateproc_HistoryRange{direction = forward}, + Events = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, HRange), + % event_sink не гарантирует отсутствия дублей событий, но гарантирует + % сохранения порядка событий отдельной машины. + lists:foldl( + fun(Ev, LastEvIDMap) -> + #mg_stateproc_SinkEvent{ + source_id = Machine, + source_ns = NS, + event = Body + } = Ev, + Key = {NS, Machine}, + LastID = maps:get(Key, LastEvIDMap, 0), + case Body#mg_stateproc_Event.id of + ID when ID =:= LastID + 1 -> + LastEvIDMap#{Key => ID}; + ID when ID =< LastID -> + % Дубликат одного из уже известных событий + LastEvIDMap; + ID -> + % Нарушен порядок событий, получился пропуск + erlang:error({invalid_order, ID, LastID}, [Ev, LastEvIDMap]) + end + end, + #{}, + Events + ). -spec config_with_multiple_event_sinks(config()) -> _. config_with_multiple_event_sinks(_C) -> @@ -613,11 +731,7 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_kafka, #{ - name => kafka, - topic => <<"mg_core_event_sink">>, - client => mg_cth:config(kafka_client_name) - }} + {mg_core_events_sink_machine, #{name => default, machine_id => <<"SingleES">>}} ] }, <<"2">> => #{ @@ -633,10 +747,9 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_kafka, #{ - name => kafka_other, - topic => <<"mg_core_event_sink_2">>, - client => mg_cth:config(kafka_client_name) + {mg_core_events_sink_machine, #{ + name => machine, + machine_id => <<"SingleES">> }}, {mg_core_events_sink_kafka, #{ name => kafka, @@ -645,6 +758,10 @@ config_with_multiple_event_sinks(_C) -> }} ] } + }, + event_sink_ns => #{ + storage => mg_core_storage_memory, + default_processing_timeout => 5000 } }, Apps = mg_cth:start_applications([ @@ -674,9 +791,25 @@ start_machine(C, ID, Args) -> ok end. +-spec create_event(mg_core_storage:opaque(), config(), mg_core:id()) -> _. +create_event(Event, C, ID) -> + mg_cth_automaton_client:call(automaton_options(C), ID, Event). + +-spec create_events(integer(), config(), mg_core:id()) -> _. +create_events(N, C, ID) -> + lists:foreach( + fun(I) -> + I = create_event([<<"event">>, I], C, ID) + end, + lists:seq(1, N) + ). + -spec automaton_options(config()) -> _. automaton_options(C) -> ?config(automaton_options, C). +-spec es_opts(config()) -> _. +es_opts(C) -> ?config(event_sink_options, C). + -spec no_timeout_automaton_options(config()) -> _. no_timeout_automaton_options(C) -> Options0 = automaton_options(C), diff --git a/config/config.yaml b/config/config.yaml index 8458dc2e..9d9d1e56 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -175,6 +175,9 @@ namespaces: # only for testing, default 0 # suicide_probability: 0.1 event_sinks: + machine: + type: machine + machine_id: main_event_sink kafka: type: kafka client: default_kafka_client diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index e79a67e8..7e570da6 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -216,6 +216,7 @@ machinegun(YamlConfig) -> {health_check, health_check(YamlConfig)}, {quotas, quotas(YamlConfig)}, {namespaces, namespaces(YamlConfig)}, + {event_sink_ns, event_sink_ns(YamlConfig)}, {pulse, pulse(YamlConfig)}, {cluster, cluster(YamlConfig)} ]. @@ -539,9 +540,23 @@ notification_scheduler(Share, Config) -> timeout(Name, Config, Default, Unit) -> ?C:time_interval(?C:conf([Name], Config, Default), Unit). +event_sink_ns(YamlConfig) -> + #{ + registry => procreg(YamlConfig), + storage => storage(<<"_event_sinks">>, YamlConfig), + worker => #{registry => procreg(YamlConfig)}, + duplicate_search_batch => 1000, + default_processing_timeout => ?C:milliseconds(<<"30s">>) + }. + event_sink({Name, ESYamlConfig}) -> event_sink(?C:atom(?C:conf([type], ESYamlConfig)), Name, ESYamlConfig). +event_sink(machine, Name, ESYamlConfig) -> + {mg_core_events_sink_machine, #{ + name => ?C:atom(Name), + machine_id => ?C:conf([machine_id], ESYamlConfig) + }}; event_sink(kafka, Name, ESYamlConfig) -> {mg_core_events_sink_kafka, #{ name => ?C:atom(Name), From 50c2939cb33d87140279e7f206332f098fa0bdf3 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 26 Apr 2024 18:21:29 +0300 Subject: [PATCH 02/31] Extracts main impl modules and testsuites --- apps/mg_es_kafka/rebar.config | 4 ++++ apps/mg_es_kafka/src/mg_es_kafka.app.src | 17 +++++++++++++++++ .../src/mg_event_sink_kafka.erl} | 0 .../test/mg_event_sink_kafka_SUITE.erl} | 0 .../test/mg_event_sink_kafka_errors_SUITE.erl} | 0 apps/mg_es_machine/rebar.config | 3 +++ apps/mg_es_machine/src/mg_es_machine.app.src | 16 ++++++++++++++++ .../src/mg_event_sink_machine.erl} | 0 .../test/mg_event_sink_machine_SUITE.erl} | 0 9 files changed, 40 insertions(+) create mode 100644 apps/mg_es_kafka/rebar.config create mode 100644 apps/mg_es_kafka/src/mg_es_kafka.app.src rename apps/{mg_core/src/mg_core_events_sink_kafka.erl => mg_es_kafka/src/mg_event_sink_kafka.erl} (100%) rename apps/{mg_core/test/mg_core_events_sink_kafka_SUITE.erl => mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl} (100%) rename apps/{mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl => mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl} (100%) create mode 100644 apps/mg_es_machine/rebar.config create mode 100644 apps/mg_es_machine/src/mg_es_machine.app.src rename apps/{mg_core/src/mg_core_events_sink_machine.erl => mg_es_machine/src/mg_event_sink_machine.erl} (100%) rename apps/{mg_core/test/mg_core_events_sink_machine_SUITE.erl => mg_es_machine/test/mg_event_sink_machine_SUITE.erl} (100%) diff --git a/apps/mg_es_kafka/rebar.config b/apps/mg_es_kafka/rebar.config new file mode 100644 index 00000000..4853489b --- /dev/null +++ b/apps/mg_es_kafka/rebar.config @@ -0,0 +1,4 @@ +{deps, [ + {brod, "3.16.1"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_es_kafka/src/mg_es_kafka.app.src b/apps/mg_es_kafka/src/mg_es_kafka.app.src new file mode 100644 index 00000000..0cfac286 --- /dev/null +++ b/apps/mg_es_kafka/src/mg_es_kafka.app.src @@ -0,0 +1,17 @@ +{application, mg_core , [ + {description, "Event sink kafka implementation"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + mg_core, + brod + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_events_sink_kafka.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl similarity index 100% rename from apps/mg_core/src/mg_core_events_sink_kafka.erl rename to apps/mg_es_kafka/src/mg_event_sink_kafka.erl diff --git a/apps/mg_core/test/mg_core_events_sink_kafka_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl similarity index 100% rename from apps/mg_core/test/mg_core_events_sink_kafka_SUITE.erl rename to apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl diff --git a/apps/mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl similarity index 100% rename from apps/mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl rename to apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl diff --git a/apps/mg_es_machine/rebar.config b/apps/mg_es_machine/rebar.config new file mode 100644 index 00000000..02763852 --- /dev/null +++ b/apps/mg_es_machine/rebar.config @@ -0,0 +1,3 @@ +{deps, [ + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_es_machine/src/mg_es_machine.app.src b/apps/mg_es_machine/src/mg_es_machine.app.src new file mode 100644 index 00000000..bdafc3da --- /dev/null +++ b/apps/mg_es_machine/src/mg_es_machine.app.src @@ -0,0 +1,16 @@ +{application, mg_es_machine , [ + {description, "Event sink machine implementation"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + mg_core + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_events_sink_machine.erl b/apps/mg_es_machine/src/mg_event_sink_machine.erl similarity index 100% rename from apps/mg_core/src/mg_core_events_sink_machine.erl rename to apps/mg_es_machine/src/mg_event_sink_machine.erl diff --git a/apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl similarity index 100% rename from apps/mg_core/test/mg_core_events_sink_machine_SUITE.erl rename to apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl From d1d5b1c9d6b59c66047dc2148d36cb053bab2eef Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 26 Apr 2024 19:37:40 +0300 Subject: [PATCH 03/31] Moves and renames event sink modules --- apps/machinegun/src/machinegun.app.src | 2 + apps/machinegun/src/mg_configurator.erl | 18 +-- apps/machinegun/src/mg_pulse.erl | 7 +- apps/machinegun/src/mg_pulse_prometheus.erl | 27 +---- .../test/mg_prometheus_metric_SUITE.erl | 37 +----- apps/machinegun/test/mg_tests_SUITE.erl | 4 +- apps/mg_core/include/pulse.hrl | 18 --- ...events_sink.erl => mg_core_event_sink.erl} | 2 +- apps/mg_core/src/mg_core_events_machine.erl | 4 +- apps/mg_core/src/mg_core_pulse.erl | 2 - .../test/mg_core_events_machine_SUITE.erl | 4 +- .../test/mg_core_events_stash_SUITE.erl | 4 +- apps/mg_cth/src/mg_cth_configurator.erl | 18 +-- apps/mg_es_kafka/include/pulse.hrl | 17 +++ apps/mg_es_kafka/src/mg_es_kafka.app.src | 2 +- apps/mg_es_kafka/src/mg_event_sink_kafka.erl | 10 +- .../mg_event_sink_kafka_prometheus_pulse.erl | 95 ++++++++++++++++ .../test/mg_event_sink_kafka_SUITE.erl | 8 +- .../test/mg_event_sink_kafka_errors_SUITE.erl | 10 +- .../mg_event_sink_kafka_prometheus_SUITE.erl | 106 ++++++++++++++++++ .../src/mg_event_sink_machine.erl | 8 +- .../test/mg_event_sink_machine_SUITE.erl | 18 +-- apps/mg_woody/src/mg_woody.app.src | 1 + apps/mg_woody/src/mg_woody_event_sink.erl | 8 +- apps/mg_woody/test/mg_stress_SUITE.erl | 2 +- apps/mg_woody/test/mg_woody_tests_SUITE.erl | 10 +- elvis.config | 2 +- rel_scripts/configurator.escript | 4 +- 28 files changed, 297 insertions(+), 151 deletions(-) rename apps/mg_core/src/{mg_core_events_sink.erl => mg_core_event_sink.erl} (97%) create mode 100644 apps/mg_es_kafka/include/pulse.hrl create mode 100644 apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl create mode 100644 apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index 19f7a2d8..f2117076 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -27,6 +27,8 @@ prometheus, prometheus_cowboy, mg_core, + mg_es_kafka, + mg_es_machine, mg_woody, opentelemetry_api, opentelemetry_exporter, diff --git a/apps/machinegun/src/mg_configurator.erl b/apps/machinegun/src/mg_configurator.erl index f77746a8..3eb998fb 100644 --- a/apps/machinegun/src/mg_configurator.erl +++ b/apps/machinegun/src/mg_configurator.erl @@ -13,7 +13,7 @@ % all but `worker_options.worker` option worker => mg_core_workers_manager:options(), storage := mg_core_machine:storage_options(), - event_sinks => [mg_core_events_sink:handler()], + event_sinks => [mg_core_event_sink:handler()], retries := mg_core_machine:retry_opt(), schedulers := mg_core_machine:schedulers_opt(), default_processing_timeout := timeout(), @@ -184,19 +184,19 @@ api_automaton_options(NSs, EventSinkNS, Pulse) -> NSs ). --spec event_sink_options(mg_core_events_sink:handler(), event_sink_ns(), pulse()) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_machine, EventSinkConfig}, EvSinks, Pulse) -> +-spec event_sink_options(mg_core_event_sink:handler(), event_sink_ns(), pulse()) -> mg_core_event_sink:handler(). +event_sink_options({mg_event_sink_machine, EventSinkConfig}, EvSinks, Pulse) -> EventSinkNS = event_sink_namespace_options(EvSinks, Pulse), - {mg_core_events_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, _Config, Pulse) -> - {mg_core_events_sink_kafka, EventSinkConfig#{ + {mg_event_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; +event_sink_options({mg_event_sink_kafka, EventSinkConfig}, _Config, Pulse) -> + {mg_event_sink_kafka, EventSinkConfig#{ pulse => Pulse, encoder => fun mg_woody_event_sink:serialize/3 }}. -spec event_sink_ns_child_spec(event_sink_ns(), atom(), pulse()) -> supervisor:child_spec(). event_sink_ns_child_spec(EventSinkNS, ChildID, Pulse) -> - mg_core_events_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS, Pulse), ChildID). + mg_event_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS, Pulse), ChildID). -spec api_event_sink_options(namespaces(), event_sink_ns(), pulse()) -> mg_woody_event_sink:options(). api_event_sink_options(NSs, EventSinkNS, Pulse) -> @@ -208,11 +208,11 @@ collect_event_sink_machines(NSs) -> NSConfigs = maps:values(NSs), EventSinks = ordsets:from_list([ maps:get(machine_id, SinkConfig) - || NSConfig <- NSConfigs, {mg_core_events_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) + || NSConfig <- NSConfigs, {mg_event_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) ]), ordsets:to_list(EventSinks). --spec event_sink_namespace_options(event_sink_ns(), pulse()) -> mg_core_events_sink_machine:ns_options(). +-spec event_sink_namespace_options(event_sink_ns(), pulse()) -> mg_event_sink_machine:ns_options(). event_sink_namespace_options(#{storage := Storage} = EventSinkNS, Pulse) -> NS = <<"_event_sinks">>, MachinesStorage = sub_storage_options(<<"machines">>, Storage), diff --git a/apps/machinegun/src/mg_pulse.erl b/apps/machinegun/src/mg_pulse.erl index 4ad13f62..f3c194e7 100644 --- a/apps/machinegun/src/mg_pulse.erl +++ b/apps/machinegun/src/mg_pulse.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ -module(mg_pulse). -include_lib("mg_woody/include/pulse.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). %% mg_pulse handler -behaviour(mg_core_pulse). @@ -28,7 +29,8 @@ mg_core_pulse:beat() | mg_core_queue_scanner:beat() | #woody_event{} - | #woody_request_handle_error{}. + | #woody_request_handle_error{} + | #mg_event_sink_kafka_sent{}. -type options() :: #{ woody_event_handler_options => woody_event_handler:options(), @@ -48,6 +50,7 @@ handle_beat(Options, Beat) -> ok = mg_core_pulse_otel:handle_beat(Options, Beat), ok = mg_pulse_log:handle_beat(maps:get(woody_event_handler_options, Options, #{}), Beat), ok = mg_pulse_prometheus:handle_beat(#{}, Beat), + ok = mg_event_sink_kafka_prometheus_pulse:handle_beat(#{}, Beat), ok = maybe_handle_lifecycle_kafka(Options, Beat). %% diff --git a/apps/machinegun/src/mg_pulse_prometheus.erl b/apps/machinegun/src/mg_pulse_prometheus.erl index 87abb4e0..71bd1514 100644 --- a/apps/machinegun/src/mg_pulse_prometheus.erl +++ b/apps/machinegun/src/mg_pulse_prometheus.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -240,21 +240,6 @@ setup() -> {labels, [namespace, name]}, {help, "Total number of killed used Machinegun riak pool connections."} ]), - %% Event sink / kafka - true = prometheus_counter:declare([ - {name, mg_events_sink_produced_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of Machinegun event sink events."} - ]), - true = prometheus_histogram:declare([ - {name, mg_events_sink_kafka_produced_duration_seconds}, - {registry, registry()}, - {labels, [namespace, name, action]}, - {buckets, duration_buckets()}, - {duration_unit, seconds}, - {help, "Machinegun event sink addition duration."} - ]), ok. %% Internals @@ -416,16 +401,6 @@ dispatch_metrics(#mg_core_riak_connection_pool_connection_killed{name = {NS, _Ca ok = inc(mg_riak_pool_killed_in_use_connections_total, [NS, Type]); dispatch_metrics(#mg_core_riak_connection_pool_error{name = {NS, _Caller, Type}, reason = connect_timeout}) -> ok = inc(mg_riak_pool_connect_timeout_errors_total, [NS, Type]); -% Event sink operations -dispatch_metrics(#mg_core_events_sink_kafka_sent{ - name = Name, - namespace = NS, - encode_duration = EncodeDuration, - send_duration = SendDuration -}) -> - ok = inc(mg_events_sink_produced_total, [NS, Name]), - ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), - ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); % Unknown dispatch_metrics(_Beat) -> ok. diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index c198c123..5ae741fa 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -19,6 +19,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("stdlib/include/assert.hrl"). -include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). -include_lib("prometheus/include/prometheus_model.hrl"). %% tests descriptions @@ -75,7 +76,6 @@ -export([riak_pool_killed_free_connections_test/1]). -export([riak_pool_killed_in_use_connections_test/1]). -export([riak_pool_connect_timeout_errors_test/1]). --export([events_sink_kafka_sent_test/1]). -export([riak_pool_collector_test/1]). @@ -144,8 +144,7 @@ groups() -> riak_pool_queue_limit_reached_errors_test, riak_pool_killed_free_connections_test, riak_pool_killed_in_use_connections_test, - riak_pool_connect_timeout_errors_test, - events_sink_kafka_sent_test + riak_pool_connect_timeout_errors_test ]}, {collectors, [], [ riak_pool_collector_test @@ -729,38 +728,6 @@ riak_pool_connect_timeout_errors_test(_C) -> prometheus_counter:value(mg_riak_pool_connect_timeout_errors_total, [?NS, type]) ). --spec events_sink_kafka_sent_test(config()) -> _. -events_sink_kafka_sent_test(_C) -> - Buckets = test_millisecond_buckets(), - Name = kafka, - _ = maps:fold( - fun(DurationMs, BucketIdx, {Counter, BucketAcc}) -> - ok = test_beat(#mg_core_events_sink_kafka_sent{ - name = Name, - namespace = ?NS, - machine_id = <<"ID">>, - request_context = null, - deadline = undefined, - encode_duration = erlang:convert_time_unit(DurationMs, millisecond, native), - send_duration = erlang:convert_time_unit(DurationMs, millisecond, native), - data_size = 0, - partition = 0, - offset = 0 - }), - ?assertEqual(prometheus_counter:value(mg_events_sink_produced_total, [?NS, Name]), Counter), - {BucketsHits, _} = - prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), - {BucketsHits, _} = - prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, send]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, BucketAcc, 0) + 1, BucketHit), - {Counter + 1, BucketAcc#{BucketIdx => BucketHit}} - end, - {1, #{}}, - Buckets - ). - %% -spec riak_pool_collector_test(config()) -> _. diff --git a/apps/machinegun/test/mg_tests_SUITE.erl b/apps/machinegun/test/mg_tests_SUITE.erl index a43fa106..3f2c6175 100644 --- a/apps/machinegun/test/mg_tests_SUITE.erl +++ b/apps/machinegun/test/mg_tests_SUITE.erl @@ -247,11 +247,11 @@ mg_config(#{endpoint := {IP, Port}}, C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_core_events_sink_machine, #{ + {mg_event_sink_machine, #{ name => machine, machine_id => ?ES_ID }}, - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, client => mg_cth:config(kafka_client_name) diff --git a/apps/mg_core/include/pulse.hrl b/apps/mg_core/include/pulse.hrl index c604ebe6..d60be5e9 100644 --- a/apps/mg_core/include/pulse.hrl +++ b/apps/mg_core/include/pulse.hrl @@ -341,21 +341,3 @@ msg_queue_len :: non_neg_integer(), msg_queue_limit :: mg_core_workers_manager:queue_limit() }). - -%% Events sink operations - --record(mg_core_events_sink_kafka_sent, { - name :: atom(), - namespace :: mg_core:ns(), - machine_id :: mg_core:id(), - request_context :: mg_core:request_context(), - deadline :: mg_core_deadline:deadline(), - % in native units - encode_duration :: non_neg_integer(), - % in native units - send_duration :: non_neg_integer(), - % in bytes - data_size :: non_neg_integer(), - partition :: brod:partition(), - offset :: brod:offset() -}). diff --git a/apps/mg_core/src/mg_core_events_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl similarity index 97% rename from apps/mg_core/src/mg_core_events_sink.erl rename to apps/mg_core/src/mg_core_event_sink.erl index 9f30e64b..5ebdd29a 100644 --- a/apps/mg_core/src/mg_core_events_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink). +-module(mg_core_event_sink). -export([add_events/6]). diff --git a/apps/mg_core/src/mg_core_events_machine.erl b/apps/mg_core/src/mg_core_events_machine.erl index 6cfcf57f..0742a1ea 100644 --- a/apps/mg_core/src/mg_core_events_machine.erl +++ b/apps/mg_core/src/mg_core_events_machine.erl @@ -123,7 +123,7 @@ machines => mg_core_machine:options(), retries => #{_Subject => genlib_retry:policy()}, pulse => mg_core_pulse:handler(), - event_sinks => [mg_core_events_sink:handler()], + event_sinks => [mg_core_event_sink:handler()], default_processing_timeout => timeout(), event_stash_size => non_neg_integer() }. @@ -433,7 +433,7 @@ push_events_to_event_sinks(Options, ID, ReqCtx, Deadline, Events) -> EventSinks = maps:get(event_sinks, Options, []), lists:foreach( fun(EventSinkHandler) -> - ok = mg_core_events_sink:add_events( + ok = mg_core_event_sink:add_events( EventSinkHandler, Namespace, ID, diff --git a/apps/mg_core/src/mg_core_pulse.erl b/apps/mg_core/src/mg_core_pulse.erl index b25ded4a..3d6e33e0 100644 --- a/apps/mg_core/src/mg_core_pulse.erl +++ b/apps/mg_core/src/mg_core_pulse.erl @@ -75,8 +75,6 @@ | #mg_core_storage_search_finish{} | #mg_core_storage_delete_start{} | #mg_core_storage_delete_finish{} - % Event sink operations - | #mg_core_events_sink_kafka_sent{} % Riak client call handling | #mg_core_riak_client_get_start{} | #mg_core_riak_client_get_finish{} diff --git a/apps/mg_core/test/mg_core_events_machine_SUITE.erl b/apps/mg_core/test/mg_core_events_machine_SUITE.erl index 1e43fa67..1be41886 100644 --- a/apps/mg_core/test/mg_core_events_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_machine_SUITE.erl @@ -35,8 +35,8 @@ -export_type([options/0]). -export([process_signal/4, process_call/4, process_repair/4]). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% mg_core_storage callbacks diff --git a/apps/mg_core/test/mg_core_events_stash_SUITE.erl b/apps/mg_core/test/mg_core_events_stash_SUITE.erl index bd685e26..0cf1dc47 100644 --- a/apps/mg_core/test/mg_core_events_stash_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_stash_SUITE.erl @@ -16,8 +16,8 @@ -behaviour(mg_core_events_machine). -export([process_signal/4, process_call/4, process_repair/4]). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% Pulse diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_configurator.erl index 397ba738..84b71149 100644 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ b/apps/mg_cth/src/mg_cth_configurator.erl @@ -13,7 +13,7 @@ % all but `worker_options.worker` option worker => mg_core_workers_manager:ns_options(), storage := mg_core_machine:storage_options(), - event_sinks => [mg_core_events_sink:handler()], + event_sinks => [mg_core_event_sink:handler()], retries := mg_core_machine:retry_opt(), schedulers := mg_core_machine:schedulers_opt(), default_processing_timeout := timeout(), @@ -151,19 +151,19 @@ api_automaton_options(NSs, EventSinkNS) -> NSs ). --spec event_sink_options(mg_core_events_sink:handler(), _) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_machine, EventSinkConfig}, EvSinks) -> +-spec event_sink_options(mg_core_event_sink:handler(), _) -> mg_core_event_sink:handler(). +event_sink_options({mg_event_sink_machine, EventSinkConfig}, EvSinks) -> EventSinkNS = event_sink_namespace_options(EvSinks), - {mg_core_events_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, _Config) -> - {mg_core_events_sink_kafka, EventSinkConfig#{ + {mg_event_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; +event_sink_options({mg_event_sink_kafka, EventSinkConfig}, _Config) -> + {mg_event_sink_kafka, EventSinkConfig#{ pulse => pulse(), encoder => fun mg_woody_event_sink:serialize/3 }}. -spec event_sink_ns_child_spec(_, atom()) -> supervisor:child_spec(). event_sink_ns_child_spec(EventSinkNS, ChildID) -> - mg_core_events_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS), ChildID). + mg_event_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS), ChildID). -spec api_event_sink_options(_, _) -> mg_woody_event_sink:options(). api_event_sink_options(NSs, EventSinkNS) -> @@ -176,11 +176,11 @@ collect_event_sink_machines(NSs) -> EventSinks = ordsets:from_list([ maps:get(machine_id, SinkConfig) || NSConfig <- NSConfigs, - {mg_core_events_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) + {mg_event_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) ]), ordsets:to_list(EventSinks). --spec event_sink_namespace_options(_) -> mg_core_events_sink_machine:ns_options(). +-spec event_sink_namespace_options(_) -> mg_event_sink_machine:ns_options(). event_sink_namespace_options(#{storage := Storage} = EventSinkNS) -> NS = <<"_event_sinks">>, MachinesStorage = sub_storage_options(<<"machines">>, Storage), diff --git a/apps/mg_es_kafka/include/pulse.hrl b/apps/mg_es_kafka/include/pulse.hrl new file mode 100644 index 00000000..e13e291c --- /dev/null +++ b/apps/mg_es_kafka/include/pulse.hrl @@ -0,0 +1,17 @@ +%% Events sink operations + +-record(mg_event_sink_kafka_sent, { + name :: atom(), + namespace :: mg_core:ns(), + machine_id :: mg_core:id(), + request_context :: mg_core:request_context(), + deadline :: mg_core_deadline:deadline(), + % in native units + encode_duration :: non_neg_integer(), + % in native units + send_duration :: non_neg_integer(), + % in bytes + data_size :: non_neg_integer(), + partition :: brod:partition(), + offset :: brod:offset() +}). diff --git a/apps/mg_es_kafka/src/mg_es_kafka.app.src b/apps/mg_es_kafka/src/mg_es_kafka.app.src index 0cfac286..59b50e78 100644 --- a/apps/mg_es_kafka/src/mg_es_kafka.app.src +++ b/apps/mg_es_kafka/src/mg_es_kafka.app.src @@ -1,4 +1,4 @@ -{application, mg_core , [ +{application, mg_es_kafka , [ {description, "Event sink kafka implementation"}, {vsn, "1"}, {registered, []}, diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl index 3df114b3..8ba0c1da 100644 --- a/apps/mg_es_kafka/src/mg_event_sink_kafka.erl +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl @@ -14,12 +14,12 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka). +-module(mg_event_sink_kafka). --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% Types @@ -53,7 +53,7 @@ add_events(Options, NS, MachineID, Events, ReqCtx, Deadline) -> EncodeTimestamp = erlang:monotonic_time(), {ok, Partition, Offset} = produce(Client, Topic, event_key(NS, MachineID), Batch), FinishTimestamp = erlang:monotonic_time(), - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_events_sink_kafka_sent{ + ok = mg_core_pulse:handle_beat(Pulse, #mg_event_sink_kafka_sent{ name = Name, namespace = NS, machine_id = MachineID, diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl new file mode 100644 index 00000000..feadea91 --- /dev/null +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl @@ -0,0 +1,95 @@ +-module(mg_event_sink_kafka_prometheus_pulse). + +-include_lib("mg_es_kafka/include/pulse.hrl"). + +-export([setup/0]). + +%% mg_pulse handler +-behaviour(mg_core_pulse). +-export([handle_beat/2]). + +%% internal types +-type beat() :: #mg_event_sink_kafka_sent{} | mg_core_pulse:beat(). +-type options() :: #{}. +-type metric_name() :: prometheus_metric:name(). +-type metric_label_value() :: term(). + +%% +%% mg_pulse handler +%% + +-spec handle_beat(options(), beat()) -> ok. +handle_beat(_Options, Beat) -> + ok = dispatch_metrics(Beat). + +%% +%% management API +%% + +%% Sets all metrics up. Call this when the app starts. +-spec setup() -> ok. +setup() -> + %% Event sink / kafka + true = prometheus_counter:declare([ + {name, mg_event_sink_produced_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of Machinegun event sink events."} + ]), + true = prometheus_histogram:declare([ + {name, mg_event_sink_kafka_produced_duration_seconds}, + {registry, registry()}, + {labels, [namespace, name, action]}, + {buckets, duration_buckets()}, + {duration_unit, seconds}, + {help, "Machinegun event sink addition duration."} + ]), + ok. + +%% Internals + +-spec dispatch_metrics(beat()) -> ok. +% Event sink operations +dispatch_metrics(#mg_event_sink_kafka_sent{ + name = Name, + namespace = NS, + encode_duration = EncodeDuration, + send_duration = SendDuration +}) -> + ok = inc(mg_event_sink_produced_total, [NS, Name]), + ok = observe(mg_event_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), + ok = observe(mg_event_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); +% Unknown +dispatch_metrics(_Beat) -> + ok. + +-spec inc(metric_name(), [metric_label_value()]) -> ok. +inc(Name, Labels) -> + _ = prometheus_counter:inc(registry(), Name, Labels, 1), + ok. + +-spec observe(metric_name(), [metric_label_value()], number()) -> ok. +observe(Name, Labels, Value) -> + _ = prometheus_histogram:observe(registry(), Name, Labels, Value), + ok. + +-spec registry() -> prometheus_registry:registry(). +registry() -> + default. + +-spec duration_buckets() -> [number()]. +duration_buckets() -> + [ + 0.001, + 0.005, + 0.010, + 0.025, + 0.050, + 0.100, + 0.250, + 0.500, + 1, + 2.5, + 5, + 10 + ]. diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl index 9fbb2f68..2082985d 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka_SUITE). +-module(mg_event_sink_kafka_SUITE). -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("kafka_protocol/include/kpro_public.hrl"). @@ -63,7 +63,7 @@ groups() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_core_events_sink_kafka, '_', '_'}, x), + % dbg:tpl({mg_event_sink_kafka, '_', '_'}, x), AppSpecs = [ {brod, [ {clients, [ @@ -105,7 +105,7 @@ add_events_test(C) -> -spec add_events(config()) -> ok. add_events(C) -> F = fun() -> - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, @@ -138,7 +138,7 @@ do_read_all(Hosts, Topic, Partition, Offset, Result) -> do_read_all(Hosts, Topic, Partition, NewOffset, NewRecords ++ Result) end. --spec event_sink_options() -> mg_core_events_sink_kafka:options(). +-spec event_sink_options() -> mg_event_sink_kafka:options(). event_sink_options() -> #{ name => kafka, diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl index f24f56fe..ac6f3899 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka_errors_SUITE). +-module(mg_event_sink_kafka_errors_SUITE). -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("mg_cth/include/mg_cth.hrl"). @@ -78,7 +78,7 @@ groups() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_core_events_sink_kafka, '_', '_'}, x), + % dbg:tpl({mg_event_sink_kafka, '_', '_'}, x), {Events, _} = mg_core_events:generate_events_with_range( [{#{}, Body} || Body <- [1, 2, 3]], undefined @@ -143,7 +143,7 @@ add_events_ssl_failed_test(C) -> _ = ?assertException( throw, {transient, {event_sink_unavailable, {connect_failed, [{_, {{failed_to_upgrade_to_ssl, _}, _ST}}]}}}, - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, @@ -287,7 +287,7 @@ add_events_nxdomain_test(C) -> _ = mg_cth:stop_applications(Apps) end. --spec event_sink_options() -> mg_core_events_sink_kafka:options(). +-spec event_sink_options() -> mg_event_sink_kafka:options(). event_sink_options() -> #{ name => kafka, @@ -315,7 +315,7 @@ change_proxy_mode(ModeWas, Mode, Proxy, C) -> -spec add_events(config()) -> ok. add_events(C) -> - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl new file mode 100644 index 00000000..154a4fef --- /dev/null +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl @@ -0,0 +1,106 @@ +-module(mg_event_sink_kafka_prometheus_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). + +-export([event_sink_kafka_sent_test/1]). + +-define(NS, <<"NS">>). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, beats} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {beats, [parallel], [ + event_sink_kafka_sent_test + ]} + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + C. + +-spec end_per_suite(config()) -> ok. +end_per_suite(_C) -> + ok. + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(_, C) -> + C. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% Tests + +-spec event_sink_kafka_sent_test(config()) -> _. +event_sink_kafka_sent_test(_C) -> + Buckets = test_millisecond_buckets(), + Name = kafka, + _ = maps:fold( + fun(DurationMs, BucketIdx, {Counter, BucketAcc}) -> + ok = test_beat(#mg_event_sink_kafka_sent{ + name = Name, + namespace = ?NS, + machine_id = <<"ID">>, + request_context = null, + deadline = undefined, + encode_duration = erlang:convert_time_unit(DurationMs, millisecond, native), + send_duration = erlang:convert_time_unit(DurationMs, millisecond, native), + data_size = 0, + partition = 0, + offset = 0 + }), + ?assertEqual(prometheus_counter:value(mg_event_sink_produced_total, [?NS, Name]), Counter), + {BucketsHits, _} = + prometheus_histogram:value(mg_event_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), + {BucketsHits, _} = + prometheus_histogram:value(mg_event_sink_kafka_produced_duration_seconds, [?NS, Name, send]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, BucketAcc, 0) + 1, BucketHit), + {Counter + 1, BucketAcc#{BucketIdx => BucketHit}} + end, + {1, #{}}, + Buckets + ). + +%% Metrics utils + +-spec test_beat(term()) -> ok. +test_beat(Beat) -> + mg_event_sink_kafka_prometheus_pulse:handle_beat(#{}, Beat). + +-spec test_millisecond_buckets() -> #{non_neg_integer() => pos_integer()}. +test_millisecond_buckets() -> + #{ + 0 => 1, + 1 => 1, + 5 => 2, + 10 => 3 + }. diff --git a/apps/mg_es_machine/src/mg_event_sink_machine.erl b/apps/mg_es_machine/src/mg_event_sink_machine.erl index 7232691b..3a760b50 100644 --- a/apps/mg_es_machine/src/mg_event_sink_machine.erl +++ b/apps/mg_es_machine/src/mg_event_sink_machine.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2017 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_machine). +-module(mg_event_sink_machine). %% API -export_type([event_body/0]). @@ -26,8 +26,8 @@ -export([get_history/3]). -export([repair/4]). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% mg_core_machine handler diff --git a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl index 249d5866..ddac1ce4 100644 --- a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl +++ b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_machine_SUITE). +-module(mg_event_sink_machine_SUITE). -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -61,7 +61,7 @@ groups() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_core_events_sink_machine, '_', '_'}, x), + % dbg:tpl({mg_event_sink_machine, '_', '_'}, x), Apps = mg_cth:start_applications([mg_core]), Pid = start_event_sink(event_sink_ns_options()), true = erlang:unlink(Pid), @@ -89,7 +89,7 @@ add_events_test(C) -> -spec get_unexisted_event_test(config()) -> _. get_unexisted_event_test(_C) -> - [] = mg_core_events_sink_machine:get_history( + [] = mg_event_sink_machine:get_history( event_sink_ns_options(), ?ES_ID, {42, undefined, forward} @@ -114,7 +114,7 @@ not_idempotent_add_get_events_test(C) -> -spec add_events(config()) -> _. add_events(C) -> - mg_core_events_sink_machine:add_events( + mg_event_sink_machine:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, @@ -127,23 +127,23 @@ add_events(C) -> get_history(_C) -> HRange = {undefined, undefined, forward}, % _ = ct:pal("~p", [PreparedEvents]), - EventsSinkEvents = mg_core_events_sink_machine:get_history( + EventsSinkEvents = mg_event_sink_machine:get_history( event_sink_ns_options(), ?ES_ID, HRange ), [{ID, Body} || #{id := ID, body := Body} <- EventsSinkEvents]. --spec start_event_sink(mg_core_events_sink_machine:ns_options()) -> pid(). +-spec start_event_sink(mg_event_sink_machine:ns_options()) -> pid(). start_event_sink(Options) -> mg_core_utils:throw_if_error( genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - [mg_core_events_sink_machine:child_spec(Options, event_sink)] + [mg_event_sink_machine:child_spec(Options, event_sink)] ) ). --spec event_sink_ns_options() -> mg_core_events_sink_machine:ns_options(). +-spec event_sink_ns_options() -> mg_event_sink_machine:ns_options(). event_sink_ns_options() -> #{ namespace => ?ES_ID, @@ -156,7 +156,7 @@ event_sink_ns_options() -> events_storage => mg_core_storage_memory }. --spec event_sink_options() -> mg_core_events_sink_machine:options(). +-spec event_sink_options() -> mg_event_sink_machine:options(). event_sink_options() -> NSOptions = event_sink_ns_options(), NSOptions#{ diff --git a/apps/mg_woody/src/mg_woody.app.src b/apps/mg_woody/src/mg_woody.app.src index ff8e11ce..dfce7df8 100644 --- a/apps/mg_woody/src/mg_woody.app.src +++ b/apps/mg_woody/src/mg_woody.app.src @@ -25,6 +25,7 @@ mg_proto, woody, mg_core, + mg_es_machine, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_woody/src/mg_woody_event_sink.erl b/apps/mg_woody/src/mg_woody_event_sink.erl index e2303de1..71b0970e 100644 --- a/apps/mg_woody/src/mg_woody_event_sink.erl +++ b/apps/mg_woody/src/mg_woody_event_sink.erl @@ -30,7 +30,7 @@ %% %% API %% --type options() :: {[mg_core:id()], mg_core_events_sink_machine:ns_options()}. +-type options() :: {[mg_core:id()], mg_event_sink_machine:ns_options()}. -spec handler(options()) -> mg_woody_utils:woody_handler(). handler(Options) -> @@ -57,7 +57,7 @@ handle_function('GetHistory', {EventSinkID, Range}, WoodyContext, {AvaliableEven }, fun() -> _ = check_event_sink(AvaliableEventSinks, EventSinkID), - mg_core_events_sink_machine:get_history( + mg_event_sink_machine:get_history( Options, EventSinkID, mg_woody_packer:unpack(history_range, Range) @@ -68,7 +68,7 @@ handle_function('GetHistory', {EventSinkID, Range}, WoodyContext, {AvaliableEven {ok, mg_woody_packer:pack(sink_history, SinkHistory)}. %% -%% events_sink events encoder +%% event_sink events encoder %% -spec serialize(mg_core:ns(), mg_core:id(), mg_core_events:event()) -> iodata(). @@ -110,6 +110,6 @@ check_event_sink(AvaliableEventSinks, EventSinkID) -> throw({logic, event_sink_not_found}) end. --spec pulse(mg_core_events_sink_machine:ns_options()) -> mg_core_pulse:handler(). +-spec pulse(mg_event_sink_machine:ns_options()) -> mg_core_pulse:handler(). pulse(#{pulse := Pulse}) -> Pulse. diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index 6d77187c..da3c6f09 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -123,7 +123,7 @@ mg_woody_config(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_machine, #{name => default, machine_id => ?ES_ID}} + {mg_event_sink_machine, #{name => default, machine_id => ?ES_ID}} ], event_stash_size => 10 } diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index 7c088ad5..273f815c 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -353,11 +353,11 @@ mg_woody_config(C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_core_events_sink_machine, #{ + {mg_event_sink_machine, #{ name => machine, machine_id => ?ES_ID }}, - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, client => mg_cth:config(kafka_client_name) @@ -731,7 +731,7 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_machine, #{name => default, machine_id => <<"SingleES">>}} + {mg_event_sink_machine, #{name => default, machine_id => <<"SingleES">>}} ] }, <<"2">> => #{ @@ -747,11 +747,11 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_machine, #{ + {mg_event_sink_machine, #{ name => machine, machine_id => <<"SingleES">> }}, - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => <<"mg_core_event_sink">>, client => mg_cth:config(kafka_client_name) diff --git a/elvis.config b/elvis.config index ae0fa37d..91ccff3a 100644 --- a/elvis.config +++ b/elvis.config @@ -101,7 +101,7 @@ }}, {elvis_style, used_ignored_variable, #{ ignore => [ - mg_core_events_sink_kafka_errors_SUITE, + mg_event_sink_kafka_errors_SUITE, mg_core_workers_SUITE ] }}, diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index 7e570da6..12a2306c 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -553,12 +553,12 @@ event_sink({Name, ESYamlConfig}) -> event_sink(?C:atom(?C:conf([type], ESYamlConfig)), Name, ESYamlConfig). event_sink(machine, Name, ESYamlConfig) -> - {mg_core_events_sink_machine, #{ + {mg_event_sink_machine, #{ name => ?C:atom(Name), machine_id => ?C:conf([machine_id], ESYamlConfig) }}; event_sink(kafka, Name, ESYamlConfig) -> - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => ?C:atom(Name), client => ?C:atom(?C:conf([client], ESYamlConfig)), topic => ?C:conf([topic], ESYamlConfig) From 303d7c3d4b2002151b4bb0f540b9540f1d8b42ff Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Sat, 27 Apr 2024 14:46:20 +0300 Subject: [PATCH 04/31] Fixes kafka prometheus testsuite --- apps/machinegun/src/machinegun.erl | 1 + apps/mg_es_kafka/src/mg_es_kafka.app.src | 3 ++- .../test/mg_event_sink_kafka_prometheus_SUITE.erl | 8 +++++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/apps/machinegun/src/machinegun.erl b/apps/machinegun/src/machinegun.erl index 0d2308b1..e4478812 100644 --- a/apps/machinegun/src/machinegun.erl +++ b/apps/machinegun/src/machinegun.erl @@ -40,4 +40,5 @@ setup_metrics() -> ok = woody_ranch_prometheus_collector:setup(), ok = woody_hackney_prometheus_collector:setup(), ok = mg_pulse_prometheus:setup(), + ok = mg_event_sink_kafka_prometheus_pulse:setup(), ok = mg_riak_prometheus:setup(). diff --git a/apps/mg_es_kafka/src/mg_es_kafka.app.src b/apps/mg_es_kafka/src/mg_es_kafka.app.src index 59b50e78..9168b3fc 100644 --- a/apps/mg_es_kafka/src/mg_es_kafka.app.src +++ b/apps/mg_es_kafka/src/mg_es_kafka.app.src @@ -6,8 +6,9 @@ kernel, stdlib, genlib, + brod, mg_core, - brod + prometheus ]}, {env, []}, {modules, []}, diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl index 154a4fef..690d38d3 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl @@ -42,11 +42,13 @@ groups() -> %% -spec init_per_suite(config()) -> config(). init_per_suite(C) -> - C. + Apps = mg_cth:start_applications([mg_es_kafka]), + ok = mg_event_sink_kafka_prometheus_pulse:setup(), + [{apps, Apps} | C]. -spec end_per_suite(config()) -> ok. -end_per_suite(_C) -> - ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). -spec init_per_group(group_name(), config()) -> config(). init_per_group(_, C) -> From 5ffc406c1f83bd61b20425bafe8f2b1bf2c97766 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Sat, 27 Apr 2024 16:02:23 +0300 Subject: [PATCH 05/31] Moves child specs configurator into separate umbrella app --- apps/machinegun/src/machinegun.app.src | 3 +- apps/machinegun/src/machinegun.erl | 40 ++- apps/mg_conf/rebar.config | 3 + apps/mg_conf/src/mg_conf.app.src | 18 ++ .../src/mg_conf.erl} | 46 +--- .../src/mg_conf_namespace_sup.erl} | 4 +- apps/mg_cth/src/mg_cth.app.src | 3 + apps/mg_cth/src/mg_cth_configurator.erl | 233 +----------------- .../test/mg_modernizer_tests_SUITE.erl | 6 +- apps/mg_woody/test/mg_stress_SUITE.erl | 6 +- apps/mg_woody/test/mg_woody_tests_SUITE.erl | 18 +- 11 files changed, 104 insertions(+), 276 deletions(-) create mode 100644 apps/mg_conf/rebar.config create mode 100644 apps/mg_conf/src/mg_conf.app.src rename apps/{machinegun/src/mg_configurator.erl => mg_conf/src/mg_conf.erl} (87%) rename apps/{machinegun/src/mg_namespace_sup.erl => mg_conf/src/mg_conf_namespace_sup.erl} (96%) diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index f2117076..d9c1adac 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -32,7 +32,8 @@ mg_woody, opentelemetry_api, opentelemetry_exporter, - opentelemetry + opentelemetry, + mg_conf ]}, {env, []}, {modules, []}, diff --git a/apps/machinegun/src/machinegun.erl b/apps/machinegun/src/machinegun.erl index e4478812..6b0379bd 100644 --- a/apps/machinegun/src/machinegun.erl +++ b/apps/machinegun/src/machinegun.erl @@ -22,12 +22,8 @@ stop() -> start(_StartType, _StartArgs) -> Config = maps:from_list(genlib_app:env(?MODULE)), ok = setup_metrics(), - ChildSpecs = mg_configurator:construct_child_specs(Config), - genlib_adhoc_supervisor:start_link( - {local, ?MODULE}, - #{strategy => rest_for_one}, - ChildSpecs - ). + ChildSpecs = mg_conf:construct_child_specs(Config, additional_routes(Config)), + genlib_adhoc_supervisor:start_link({local, ?MODULE}, #{strategy => rest_for_one}, ChildSpecs). -spec stop(any()) -> ok. stop(_State) -> @@ -42,3 +38,35 @@ setup_metrics() -> ok = mg_pulse_prometheus:setup(), ok = mg_event_sink_kafka_prometheus_pulse:setup(), ok = mg_riak_prometheus:setup(). + +%% TODO Maybe move those to `mg_conf'. + +-spec additional_routes(mg_conf:config()) -> [woody_server_thrift_http_handler:route(any())]. +additional_routes(Config) -> + HealthChecks = maps:get(health_check, Config, #{}), + [ + get_startup_route(), + get_health_route(HealthChecks), + get_prometheus_route() + ]. + +-spec get_startup_route() -> {iodata(), module(), _Opts :: any()}. +get_startup_route() -> + EvHandler = {erl_health_event_handler, []}, + Check = #{ + startup => #{ + runner => {mg_health_check, startup, []}, + event_handler => EvHandler + } + }, + erl_health_handle:get_startup_route(Check). + +-spec get_health_route(erl_health:check()) -> {iodata(), module(), _Opts :: any()}. +get_health_route(Check0) -> + EvHandler = {erl_health_event_handler, []}, + Check = maps:map(fun(_, V = {_, _, _}) -> #{runner => V, event_handler => EvHandler} end, Check0), + erl_health_handle:get_route(Check). + +-spec get_prometheus_route() -> {iodata(), module(), _Opts :: any()}. +get_prometheus_route() -> + {"/metrics/[:registry]", prometheus_cowboy2_handler, []}. diff --git a/apps/mg_conf/rebar.config b/apps/mg_conf/rebar.config new file mode 100644 index 00000000..02763852 --- /dev/null +++ b/apps/mg_conf/rebar.config @@ -0,0 +1,3 @@ +{deps, [ + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_conf/src/mg_conf.app.src b/apps/mg_conf/src/mg_conf.app.src new file mode 100644 index 00000000..aa4b5056 --- /dev/null +++ b/apps/mg_conf/src/mg_conf.app.src @@ -0,0 +1,18 @@ +{application, mg_conf, [ + {description, "Machinegun configuration"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + mg_core, + mg_es_kafka, + mg_es_machine, + mg_woody + ]}, + {env, []}, + {modules, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/machinegun/src/mg_configurator.erl b/apps/mg_conf/src/mg_conf.erl similarity index 87% rename from apps/machinegun/src/mg_configurator.erl rename to apps/mg_conf/src/mg_conf.erl index 3eb998fb..17a38cea 100644 --- a/apps/machinegun/src/mg_configurator.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -1,6 +1,6 @@ --module(mg_configurator). +-module(mg_conf). --export([construct_child_specs/1]). +-export([construct_child_specs/2]). -type modernizer() :: #{ current_format_version := mg_core_events:format_version(), @@ -33,26 +33,31 @@ woody_server := mg_woody:woody_server(), event_sink_ns := event_sink_ns(), namespaces := namespaces(), - pulse := pulse(), quotas => [mg_core_quota_worker:options()], + pulse := pulse(), health_check => erl_health:check() }. +-export_type([event_sink_ns/0]). +-export_type([namespaces/0]). +-export_type([config/0]). + -type processor() :: mg_woody_processor:options(). -type pulse() :: mg_core_pulse:handler(). --spec construct_child_specs(config()) -> [supervisor:child_spec()]. +-spec construct_child_specs(config(), [woody_server_thrift_http_handler:route(any())]) -> [supervisor:child_spec()]. construct_child_specs( #{ woody_server := WoodyServer, event_sink_ns := EventSinkNS, namespaces := Namespaces, pulse := Pulse - } = Config + } = Config, + AdditionalRoutes ) -> Quotas = maps:get(quotas, Config, []), - HealthChecks = maps:get(health_check, Config, #{}), + ClusterOpts = maps:get(cluster, Config, #{}), QuotasChildSpec = quotas_child_specs(Quotas, quota), @@ -65,11 +70,7 @@ construct_child_specs( automaton => api_automaton_options(Namespaces, EventSinkNS, Pulse), event_sink => api_event_sink_options(Namespaces, EventSinkNS, Pulse), woody_server => WoodyServer, - additional_routes => [ - get_startup_route(), - get_health_route(HealthChecks), - get_prometheus_route() - ] + additional_routes => AdditionalRoutes } ), ClusterSpec = mg_core_union:child_spec(ClusterOpts), @@ -84,27 +85,6 @@ construct_child_specs( %% --spec get_startup_route() -> {iodata(), module(), _Opts :: any()}. -get_startup_route() -> - EvHandler = {erl_health_event_handler, []}, - Check = #{ - startup => #{ - runner => {mg_health_check, startup, []}, - event_handler => EvHandler - } - }, - erl_health_handle:get_startup_route(Check). - --spec get_health_route(erl_health:check()) -> {iodata(), module(), _Opts :: any()}. -get_health_route(Check0) -> - EvHandler = {erl_health_event_handler, []}, - Check = maps:map(fun(_, V = {_, _, _}) -> #{runner => V, event_handler => EvHandler} end, Check0), - erl_health_handle:get_route(Check). - --spec get_prometheus_route() -> {iodata(), module(), _Opts :: any()}. -get_prometheus_route() -> - {"/metrics/[:registry]", prometheus_cowboy2_handler, []}. - -spec quotas_child_specs([mg_core_quota_worker:options()], atom()) -> [supervisor:child_spec()]. quotas_child_specs(Quotas, ChildID) -> [ @@ -118,7 +98,7 @@ events_machines_child_specs(NSs, EventSinkNS, Pulse) -> events_machine_options(NS, NSs, EventSinkNS, Pulse) || NS <- maps:keys(NSs) ], - mg_namespace_sup:child_spec(NsOptions, namespaces_sup). + mg_conf_namespace_sup:child_spec(NsOptions, namespaces_sup). -spec events_machine_options(mg_core:ns(), namespaces(), event_sink_ns(), pulse()) -> mg_core_events_machine:options(). events_machine_options(NS, NSs, EventSinkNS, Pulse) -> diff --git a/apps/machinegun/src/mg_namespace_sup.erl b/apps/mg_conf/src/mg_conf_namespace_sup.erl similarity index 96% rename from apps/machinegun/src/mg_namespace_sup.erl rename to apps/mg_conf/src/mg_conf_namespace_sup.erl index 97faf405..caef316d 100644 --- a/apps/machinegun/src/mg_namespace_sup.erl +++ b/apps/mg_conf/src/mg_conf_namespace_sup.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2022 Valitydev +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_namespace_sup). +-module(mg_conf_namespace_sup). -type namespaces() :: [mg_core_events_machine:options()]. diff --git a/apps/mg_cth/src/mg_cth.app.src b/apps/mg_cth/src/mg_cth.app.src index 9d35b704..0d65a4a2 100644 --- a/apps/mg_cth/src/mg_cth.app.src +++ b/apps/mg_cth/src/mg_cth.app.src @@ -6,6 +6,9 @@ kernel, stdlib, genlib, + mg_core, + mg_es_machine, + mg_es_kafka, mg_woody ]}, {env, []}, diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_configurator.erl index 84b71149..c0957612 100644 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ b/apps/mg_cth/src/mg_cth_configurator.erl @@ -2,240 +2,15 @@ -export([construct_child_specs/1]). --type modernizer() :: #{ - current_format_version := mg_core_events:format_version(), - handler := mg_woody_modernizer:options() -}. - --type events_machines() :: #{ - processor := processor(), - modernizer => modernizer(), - % all but `worker_options.worker` option - worker => mg_core_workers_manager:ns_options(), - storage := mg_core_machine:storage_options(), - event_sinks => [mg_core_event_sink:handler()], - retries := mg_core_machine:retry_opt(), - schedulers := mg_core_machine:schedulers_opt(), - default_processing_timeout := timeout(), - suicide_probability => mg_core_machine:suicide_probability(), - event_stash_size := non_neg_integer() -}. - --type event_sink_ns() :: #{ - default_processing_timeout := timeout(), - storage => mg_core_storage:options(), - worker => mg_core_worker:options() -}. - -type config() :: #{ woody_server := mg_woody:woody_server(), - event_sink_ns := event_sink_ns(), - namespaces := #{mg_core:ns() => events_machines()}, + event_sink_ns := mg_conf:event_sink_ns(), + namespaces := mg_conf:namespaces(), quotas => [mg_core_quota_worker:options()] }. --type processor() :: mg_woody_processor:options(). - -spec construct_child_specs(config() | undefined) -> _. construct_child_specs(undefined) -> []; -construct_child_specs( - #{ - woody_server := WoodyServer, - event_sink_ns := EventSinkNS, - namespaces := Namespaces - } = Config -) -> - Quotas = maps:get(quotas, Config, []), - - QuotasChSpec = quotas_child_specs(Quotas, quota), - EventSinkChSpec = event_sink_ns_child_spec(EventSinkNS, event_sink), - EventMachinesChSpec = events_machines_child_specs(Namespaces, EventSinkNS), - WoodyServerChSpec = mg_woody:child_spec( - woody_server, - #{ - woody_server => WoodyServer, - automaton => api_automaton_options(Namespaces, EventSinkNS), - event_sink => api_event_sink_options(Namespaces, EventSinkNS), - pulse => mg_cth_pulse - } - ), - - lists:flatten([ - EventSinkChSpec, - WoodyServerChSpec, - QuotasChSpec, - EventMachinesChSpec - ]). - -%% - --spec quotas_child_specs(_, atom()) -> [supervisor:child_spec()]. -quotas_child_specs(Quotas, ChildID) -> - [ - mg_core_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) - || Options <- Quotas - ]. - --spec events_machines_child_specs(_, _) -> [supervisor:child_spec()]. -events_machines_child_specs(NSs, EventSinkNS) -> - [ - mg_core_events_machine:child_spec( - events_machine_options(NS, NSs, EventSinkNS), - binary_to_atom(NS, utf8) - ) - || NS <- maps:keys(NSs) - ]. - --spec events_machine_options(mg_core:ns(), _, event_sink_ns()) -> mg_core_events_machine:options(). -events_machine_options(NS, NSs, EventSinkNS) -> - NSConfigs = maps:get(NS, NSs), - #{processor := ProcessorConfig, storage := Storage} = NSConfigs, - EventSinks = [ - event_sink_options(SinkConfig, EventSinkNS) - || SinkConfig <- maps:get(event_sinks, NSConfigs, []) - ], - EventsStorage = sub_storage_options(<<"events">>, Storage), - #{ - namespace => NS, - processor => processor(ProcessorConfig), - machines => machine_options(NS, NSConfigs), - events_storage => EventsStorage, - event_sinks => EventSinks, - pulse => pulse(), - default_processing_timeout => maps:get(default_processing_timeout, NSConfigs), - event_stash_size => maps:get(event_stash_size, NSConfigs, 0) - }. - --spec machine_options(mg_core:ns(), events_machines()) -> mg_core_machine:options(). -machine_options(NS, Config) -> - #{storage := Storage} = Config, - Options = maps:with( - [ - retries, - timer_processing_timeout - ], - Config - ), - MachinesStorage = sub_storage_options(<<"machines">>, Storage), - NotificationsStorage = sub_storage_options(<<"notifications">>, Storage), - Options#{ - namespace => NS, - storage => MachinesStorage, - worker => worker_manager_options(Config), - schedulers => maps:get(schedulers, Config, #{}), - pulse => pulse(), - notification => #{ - namespace => NS, - pulse => pulse(), - storage => NotificationsStorage - }, - % TODO сделать аналогично в event_sink'е и тэгах - suicide_probability => maps:get(suicide_probability, Config, undefined) - }. - --spec api_automaton_options(_, event_sink_ns()) -> mg_woody_automaton:options(). -api_automaton_options(NSs, EventSinkNS) -> - maps:fold( - fun(NS, ConfigNS, Options) -> - Options#{ - NS => maps:merge( - #{ - machine => events_machine_options(NS, NSs, EventSinkNS) - }, - modernizer_options(maps:get(modernizer, ConfigNS, undefined)) - ) - } - end, - #{}, - NSs - ). - --spec event_sink_options(mg_core_event_sink:handler(), _) -> mg_core_event_sink:handler(). -event_sink_options({mg_event_sink_machine, EventSinkConfig}, EvSinks) -> - EventSinkNS = event_sink_namespace_options(EvSinks), - {mg_event_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; -event_sink_options({mg_event_sink_kafka, EventSinkConfig}, _Config) -> - {mg_event_sink_kafka, EventSinkConfig#{ - pulse => pulse(), - encoder => fun mg_woody_event_sink:serialize/3 - }}. - --spec event_sink_ns_child_spec(_, atom()) -> supervisor:child_spec(). -event_sink_ns_child_spec(EventSinkNS, ChildID) -> - mg_event_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS), ChildID). - --spec api_event_sink_options(_, _) -> mg_woody_event_sink:options(). -api_event_sink_options(NSs, EventSinkNS) -> - EventSinkMachines = collect_event_sink_machines(NSs), - {EventSinkMachines, event_sink_namespace_options(EventSinkNS)}. - --spec collect_event_sink_machines(_) -> [mg_core:id()]. -collect_event_sink_machines(NSs) -> - NSConfigs = maps:values(NSs), - EventSinks = ordsets:from_list([ - maps:get(machine_id, SinkConfig) - || NSConfig <- NSConfigs, - {mg_event_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) - ]), - ordsets:to_list(EventSinks). - --spec event_sink_namespace_options(_) -> mg_event_sink_machine:ns_options(). -event_sink_namespace_options(#{storage := Storage} = EventSinkNS) -> - NS = <<"_event_sinks">>, - MachinesStorage = sub_storage_options(<<"machines">>, Storage), - EventsStorage = sub_storage_options(<<"events">>, Storage), - EventSinkNS#{ - namespace => NS, - pulse => pulse(), - storage => MachinesStorage, - events_storage => EventsStorage, - worker => worker_manager_options(EventSinkNS) - }. - --spec worker_manager_options(map()) -> mg_core_workers_manager:ns_options(). -worker_manager_options(Config) -> - maps:merge( - #{ - %% Use 'global' process registry - registry => mg_core_procreg_global, - sidecar => mg_cth_worker - }, - maps:get(worker, Config, #{}) - ). - --spec processor(processor()) -> mg_core_utils:mod_opts(). -processor(Processor) -> - {mg_woody_processor, Processor#{event_handler => {mg_woody_event_handler, pulse()}}}. - --spec sub_storage_options(mg_core:ns(), mg_core_machine:storage_options()) -> - mg_core_machine:storage_options(). -sub_storage_options(SubNS, Storage0) -> - Storage1 = mg_core_utils:separate_mod_opts(Storage0, #{}), - Storage2 = add_bucket_postfix(SubNS, Storage1), - Storage2. - --spec add_bucket_postfix(mg_core:ns(), mg_core_storage:options()) -> mg_core_storage:options(). -add_bucket_postfix(_, {mg_core_storage_memory, _} = Storage) -> - Storage; -add_bucket_postfix(SubNS, {mg_core_storage_riak, #{bucket := Bucket} = Options}) -> - {mg_core_storage_riak, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. - --spec pulse() -> mg_core_pulse:handler(). -pulse() -> - mg_cth_pulse. - --spec modernizer_options(modernizer() | undefined) -> - #{modernizer => mg_core_events_modernizer:options()}. -modernizer_options(#{current_format_version := CurrentFormatVersion, handler := WoodyClient}) -> - #{ - modernizer => #{ - current_format_version => CurrentFormatVersion, - handler => - {mg_woody_modernizer, WoodyClient#{ - event_handler => {mg_woody_event_handler, pulse()} - }} - } - }; -modernizer_options(undefined) -> - #{}. +construct_child_specs(Config) -> + mg_conf:construct_child_specs(Config#{pulse => mg_cth_pulse}, []). diff --git a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl index 58fee70f..3662a022 100644 --- a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl @@ -176,7 +176,11 @@ mg_woody_config(Name, C) -> timers => #{} }, retries => #{}, - event_stash_size => 0 + event_stash_size => 0, + worker => #{ + registry => mg_core_procreg_global, + sidecar => mg_cth_worker + } }, case Name of legacy_activities -> diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index da3c6f09..7212ca8b 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -125,7 +125,11 @@ mg_woody_config(_C) -> event_sinks => [ {mg_event_sink_machine, #{name => default, machine_id => ?ES_ID}} ], - event_stash_size => 10 + event_stash_size => 10, + worker => #{ + registry => mg_core_procreg_global, + sidecar => mg_cth_worker + } } }, event_sink_ns => #{ diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index 273f815c..3a67094a 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -362,7 +362,11 @@ mg_woody_config(C) -> topic => ?ES_ID, client => mg_cth:config(kafka_client_name) }} - ] + ], + worker => #{ + registry => mg_core_procreg_global, + sidecar => mg_cth_worker + } } }, event_sink_ns => #{ @@ -732,7 +736,11 @@ config_with_multiple_event_sinks(_C) -> retries => #{}, event_sinks => [ {mg_event_sink_machine, #{name => default, machine_id => <<"SingleES">>}} - ] + ], + worker => #{ + registry => mg_core_procreg_global, + sidecar => mg_cth_worker + } }, <<"2">> => #{ storage => mg_core_storage_memory, @@ -756,7 +764,11 @@ config_with_multiple_event_sinks(_C) -> topic => <<"mg_core_event_sink">>, client => mg_cth:config(kafka_client_name) }} - ] + ], + worker => #{ + registry => mg_core_procreg_global, + sidecar => mg_cth_worker + } } }, event_sink_ns => #{ From 4c73dc4e17baa00dedc995b1c3a92fae62bb0e33 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Sat, 27 Apr 2024 16:09:59 +0300 Subject: [PATCH 06/31] Extracts 'mg_riak' in umbrella --- README.md | 7 + apps/machinegun/src/machinegun.app.src | 1 + apps/machinegun/src/machinegun.erl | 5 +- apps/machinegun/src/mg_pulse.erl | 3 + apps/machinegun/src/mg_pulse_prometheus.erl | 88 +-- .../test/mg_prometheus_metric_SUITE.erl | 265 +-------- apps/mg_conf/src/mg_conf.app.src | 1 + apps/mg_conf/src/mg_conf.erl | 4 +- apps/mg_core/include/pulse.hrl | 56 -- apps/mg_core/rebar.config | 25 - apps/mg_core/src/mg_core.app.src | 2 - apps/mg_core/src/mg_core_pulse.erl | 15 +- apps/mg_core/test/mg_core_storages_SUITE.erl | 150 +---- apps/mg_riak/include/pulse.hrl | 55 ++ apps/mg_riak/rebar.config | 32 ++ apps/mg_riak/src/mg_riak.app.src | 41 ++ apps/mg_riak/src/mg_riak.erl | 3 + .../src/mg_riak_prometheus.erl | 4 +- .../src/mg_riak_prometheus_collector.erl | 2 +- apps/mg_riak/src/mg_riak_pulse.erl | 29 + apps/mg_riak/src/mg_riak_pulse_prometheus.erl | 167 ++++++ .../src/mg_riak_storage.erl} | 30 +- .../test/mg_riak_prometheus_metric_SUITE.erl | 349 ++++++++++++ apps/mg_riak/test/mg_riak_storage_SUITE.erl | 514 ++++++++++++++++++ rel_scripts/configurator.escript | 2 +- 25 files changed, 1235 insertions(+), 615 deletions(-) create mode 100644 apps/mg_riak/include/pulse.hrl create mode 100644 apps/mg_riak/rebar.config create mode 100644 apps/mg_riak/src/mg_riak.app.src create mode 100644 apps/mg_riak/src/mg_riak.erl rename apps/{machinegun => mg_riak}/src/mg_riak_prometheus.erl (90%) rename apps/{machinegun => mg_riak}/src/mg_riak_prometheus_collector.erl (98%) create mode 100644 apps/mg_riak/src/mg_riak_pulse.erl create mode 100644 apps/mg_riak/src/mg_riak_pulse_prometheus.erl rename apps/{mg_core/src/mg_core_storage_riak.erl => mg_riak/src/mg_riak_storage.erl} (96%) create mode 100644 apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl create mode 100644 apps/mg_riak/test/mg_riak_storage_SUITE.erl diff --git a/README.md b/README.md index 5a3f7314..4001eea3 100644 --- a/README.md +++ b/README.md @@ -128,6 +128,13 @@ _НС_ — это любое отклонение от основного биз !!! attention "Todo" +## EventSink + +Основная его задача — сохранение сплошного потока эвенотов, для возможности синхронизации баз. Эвенты должны быть total ordered, и должна быть цепочка хэшей для контроля целостности. +Находится отдельно от машин, и может быть подписан на произвольные namespace'ы. Тоже является машиной в отдельном нэймспейсе (чтобы это работало нормально нужно сделать [оптимизации протокола](https://github.com/rbkmoney/damsel/pull/38) и возможно отдельный бэкенд для бд). +Через настройки описываются подписки event_sink'ов на namespace'ы (точнее на машины). +У машины появляется промежуточный стейт для слива в синк. + ## OpenTelemetry В МГ добавлена поддержка трассировки сигналов автомата и передача соответствующего контекста в http заголовках при работе woody rpc. diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index d9c1adac..6867616b 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -27,6 +27,7 @@ prometheus, prometheus_cowboy, mg_core, + mg_riak, mg_es_kafka, mg_es_machine, mg_woody, diff --git a/apps/machinegun/src/machinegun.erl b/apps/machinegun/src/machinegun.erl index 6b0379bd..74689d5a 100644 --- a/apps/machinegun/src/machinegun.erl +++ b/apps/machinegun/src/machinegun.erl @@ -36,8 +36,9 @@ setup_metrics() -> ok = woody_ranch_prometheus_collector:setup(), ok = woody_hackney_prometheus_collector:setup(), ok = mg_pulse_prometheus:setup(), - ok = mg_event_sink_kafka_prometheus_pulse:setup(), - ok = mg_riak_prometheus:setup(). + ok = mg_riak_pulse_prometheus:setup(), + ok = mg_riak_prometheus:setup(), + ok = mg_event_sink_kafka_prometheus_pulse:setup(). %% TODO Maybe move those to `mg_conf'. diff --git a/apps/machinegun/src/mg_pulse.erl b/apps/machinegun/src/mg_pulse.erl index f3c194e7..9767bf94 100644 --- a/apps/machinegun/src/mg_pulse.erl +++ b/apps/machinegun/src/mg_pulse.erl @@ -27,6 +27,7 @@ %% pulse types -type beat() :: mg_core_pulse:beat() + | mg_riak_pulse:beat() | mg_core_queue_scanner:beat() | #woody_event{} | #woody_request_handle_error{} @@ -48,8 +49,10 @@ handle_beat(Options, Beat) -> ok = mg_woody_pulse_otel:handle_beat(Options, Beat), ok = mg_core_pulse_otel:handle_beat(Options, Beat), + ok = mg_riak_pulse:handle_beat(Options, Beat), ok = mg_pulse_log:handle_beat(maps:get(woody_event_handler_options, Options, #{}), Beat), ok = mg_pulse_prometheus:handle_beat(#{}, Beat), + ok = mg_riak_pulse_prometheus:handle_beat(#{}, Beat), ok = mg_event_sink_kafka_prometheus_pulse:handle_beat(#{}, Beat), ok = maybe_handle_lifecycle_kafka(Options, Beat). diff --git a/apps/machinegun/src/mg_pulse_prometheus.erl b/apps/machinegun/src/mg_pulse_prometheus.erl index 71bd1514..4ddcdffe 100644 --- a/apps/machinegun/src/mg_pulse_prometheus.erl +++ b/apps/machinegun/src/mg_pulse_prometheus.erl @@ -17,7 +17,6 @@ -module(mg_pulse_prometheus). -include_lib("mg_core/include/pulse.hrl"). --include_lib("mg_woody/include/pulse.hrl"). -export([setup/0]). -export([handle_beat/2]). @@ -34,7 +33,8 @@ -spec handle_beat(options(), beat()) -> ok. handle_beat(_Options, Beat) -> - ok = dispatch_metrics(Beat). + ok = dispatch_metrics(Beat), + ok. %% %% management API @@ -194,52 +194,6 @@ setup() -> {duration_unit, seconds}, {help, "Machinegun storage operation duration."} ]), - % Riak client operations - true = prometheus_counter:declare([ - {name, mg_riak_client_operation_changes_total}, - {registry, registry()}, - {labels, [namespace, name, operation, change]}, - {help, "Total number of Machinegun riak client operations."} - ]), - true = prometheus_histogram:declare([ - {name, mg_riak_client_operation_duration_seconds}, - {registry, registry()}, - {labels, [namespace, name, operation]}, - {buckets, duration_buckets()}, - {duration_unit, seconds}, - {help, "Machinegun riak client operation duration."} - ]), - %% Riak pool events - true = prometheus_counter:declare([ - {name, mg_riak_pool_no_free_connection_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of no free connection errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_queue_limit_reached_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of queue limit reached errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_connect_timeout_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of connect timeout errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_killed_free_connections_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of killed free Machinegun riak pool connections."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_killed_in_use_connections_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of killed used Machinegun riak pool connections."} - ]), ok. %% Internals @@ -363,44 +317,6 @@ dispatch_metrics(#mg_core_storage_delete_start{name = {NS, _Caller, Type}}) -> dispatch_metrics(#mg_core_storage_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> ok = inc(mg_storage_operation_changes_total, [NS, Type, delete, finish]), ok = observe(mg_storage_operation_duration_seconds, [NS, Type, delete], Duration); -% Riak client operations -dispatch_metrics(#mg_core_riak_client_get_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, start]); -dispatch_metrics(#mg_core_riak_client_get_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, get], Duration); -dispatch_metrics(#mg_core_riak_client_put_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, start]); -dispatch_metrics(#mg_core_riak_client_put_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, put], Duration); -dispatch_metrics(#mg_core_riak_client_search_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, start]); -dispatch_metrics(#mg_core_riak_client_search_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, search], Duration); -dispatch_metrics(#mg_core_riak_client_delete_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, start]); -dispatch_metrics(#mg_core_riak_client_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, delete], Duration); -% Riak pool events -dispatch_metrics(#mg_core_riak_connection_pool_state_reached{ - name = {NS, _Caller, Type}, - state = no_free_connections -}) -> - ok = inc(mg_riak_pool_no_free_connection_errors_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_state_reached{ - name = {NS, _Caller, Type}, - state = queue_limit_reached -}) -> - ok = inc(mg_riak_pool_queue_limit_reached_errors_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = free}) -> - ok = inc(mg_riak_pool_killed_free_connections_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = in_use}) -> - ok = inc(mg_riak_pool_killed_in_use_connections_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_error{name = {NS, _Caller, Type}, reason = connect_timeout}) -> - ok = inc(mg_riak_pool_connect_timeout_errors_total, [NS, Type]); % Unknown dispatch_metrics(_Beat) -> ok. diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 5ae741fa..6189df66 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -20,7 +20,6 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("mg_core/include/pulse.hrl"). -include_lib("mg_es_kafka/include/pulse.hrl"). --include_lib("prometheus/include/prometheus_model.hrl"). %% tests descriptions -export([all/0]). @@ -63,21 +62,6 @@ -export([storage_search_finish_test/1]). -export([storage_delete_start_test/1]). -export([storage_delete_finish_test/1]). --export([riak_client_get_start_test/1]). --export([riak_client_get_finish_test/1]). --export([riak_client_put_start_test/1]). --export([riak_client_put_finish_test/1]). --export([riak_client_search_start_test/1]). --export([riak_client_search_finish_test/1]). --export([riak_client_delete_start_test/1]). --export([riak_client_delete_finish_test/1]). --export([riak_pool_no_free_connection_errors_test/1]). --export([riak_pool_queue_limit_reached_errors_test/1]). --export([riak_pool_killed_free_connections_test/1]). --export([riak_pool_killed_in_use_connections_test/1]). --export([riak_pool_connect_timeout_errors_test/1]). - --export([riak_pool_collector_test/1]). -define(NS, <<"NS">>). @@ -91,8 +75,7 @@ -spec all() -> [test_name() | {group, group_name()}]. all() -> [ - {group, beats}, - {group, collectors} + {group, beats} ]. -spec groups() -> [{group_name(), list(_), [test_name()]}]. @@ -131,23 +114,7 @@ groups() -> storage_search_start_test, storage_search_finish_test, storage_delete_start_test, - storage_delete_finish_test, - riak_client_get_start_test, - riak_client_get_finish_test, - riak_client_put_start_test, - riak_client_put_finish_test, - riak_client_search_start_test, - riak_client_search_finish_test, - riak_client_delete_start_test, - riak_client_delete_finish_test, - riak_pool_no_free_connection_errors_test, - riak_pool_queue_limit_reached_errors_test, - riak_pool_killed_free_connections_test, - riak_pool_killed_in_use_connections_test, - riak_pool_connect_timeout_errors_test - ]}, - {collectors, [], [ - riak_pool_collector_test + storage_delete_finish_test ]} ]. @@ -569,232 +536,6 @@ storage_delete_finish_test(_C) -> Buckets ). --spec riak_client_get_start_test(config()) -> _. -riak_client_get_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_get_start{ - name = {?NS, caller, type} - }). - --spec riak_client_get_finish_test(config()) -> _. -riak_client_get_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_get_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, get]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_put_start_test(config()) -> _. -riak_client_put_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_put_start{ - name = {?NS, caller, type} - }). - --spec riak_client_put_finish_test(config()) -> _. -riak_client_put_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_put_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, put]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_search_start_test(config()) -> _. -riak_client_search_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_search_start{ - name = {?NS, caller, type} - }). - --spec riak_client_search_finish_test(config()) -> _. -riak_client_search_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_search_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, search]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_delete_start_test(config()) -> _. -riak_client_delete_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_delete_start{ - name = {?NS, caller, type} - }). - --spec riak_client_delete_finish_test(config()) -> _. -riak_client_delete_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_delete_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, delete]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_pool_no_free_connection_errors_test(config()) -> _. -riak_pool_no_free_connection_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_state_reached{ - name = {?NS, caller, type}, - state = no_free_connections - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_no_free_connection_errors_total, [?NS, type]) - ). - --spec riak_pool_queue_limit_reached_errors_test(config()) -> _. -riak_pool_queue_limit_reached_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_state_reached{ - name = {?NS, caller, type}, - state = queue_limit_reached - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_queue_limit_reached_errors_total, [?NS, type]) - ). - --spec riak_pool_killed_free_connections_test(config()) -> _. -riak_pool_killed_free_connections_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_connection_killed{ - name = {?NS, caller, type}, - state = free - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_killed_free_connections_total, [?NS, type]) - ). - --spec riak_pool_killed_in_use_connections_test(config()) -> _. -riak_pool_killed_in_use_connections_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_connection_killed{ - name = {?NS, caller, type}, - state = in_use - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_killed_in_use_connections_total, [?NS, type]) - ). - --spec riak_pool_connect_timeout_errors_test(config()) -> _. -riak_pool_connect_timeout_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_error{ - name = {?NS, caller, type}, - reason = connect_timeout - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_connect_timeout_errors_total, [?NS, type]) - ). - -%% - --spec riak_pool_collector_test(config()) -> _. -riak_pool_collector_test(_C) -> - ok = mg_cth:await_ready(fun mg_cth:riak_ready/0), - Storage = - {mg_core_storage_riak, #{ - name => {?NS, caller, type}, - host => "riakdb", - port => 8087, - bucket => ?NS, - pool_options => #{ - init_count => 0, - max_count => 10, - queue_max => 100 - }, - pulse => undefined, - sidecar => {mg_riak_prometheus, #{}} - }}, - - {ok, Pid} = genlib_adhoc_supervisor:start_link( - #{strategy => one_for_all}, - [mg_core_storage:child_spec(Storage, storage)] - ), - - Collectors = prometheus_registry:collectors(default), - ?assert(lists:member(mg_riak_prometheus_collector, Collectors)), - - Self = self(), - ok = prometheus_collector:collect_mf( - default, - mg_riak_prometheus_collector, - fun(MF) -> Self ! MF end - ), - MFs = mg_cth:flush(), - MLabels = [ - #'LabelPair'{name = <<"namespace">>, value = <<"NS">>}, - #'LabelPair'{name = <<"name">>, value = <<"type">>} - ], - ?assertMatch( - [ - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_free">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_in_use">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_limit">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 10}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_queued_requests">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_queued_requests_limit">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 100}}] - } - ], - lists:sort(MFs) - ), - - ok = proc_lib:stop(Pid, normal, 5000). - %% Metrics utils -spec test_beat(term()) -> ok. diff --git a/apps/mg_conf/src/mg_conf.app.src b/apps/mg_conf/src/mg_conf.app.src index aa4b5056..ad1826cc 100644 --- a/apps/mg_conf/src/mg_conf.app.src +++ b/apps/mg_conf/src/mg_conf.app.src @@ -7,6 +7,7 @@ stdlib, genlib, mg_core, + mg_riak, mg_es_kafka, mg_es_machine, mg_woody diff --git a/apps/mg_conf/src/mg_conf.erl b/apps/mg_conf/src/mg_conf.erl index 17a38cea..e2c5829e 100644 --- a/apps/mg_conf/src/mg_conf.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -227,8 +227,8 @@ sub_storage_options(SubNS, Storage0) -> -spec add_bucket_postfix(mg_core:ns(), mg_core_storage:options()) -> mg_core_storage:options(). add_bucket_postfix(_, {mg_core_storage_memory, _} = Storage) -> Storage; -add_bucket_postfix(SubNS, {mg_core_storage_riak, #{bucket := Bucket} = Options}) -> - {mg_core_storage_riak, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. +add_bucket_postfix(SubNS, {mg_riak_storage, #{bucket := Bucket} = Options}) -> + {mg_riak_storage, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. -spec modernizer_options(modernizer() | undefined, pulse()) -> #{modernizer => mg_core_events_modernizer:options()}. modernizer_options(#{current_format_version := CurrentFormatVersion, handler := WoodyClient}, Pulse) -> diff --git a/apps/mg_core/include/pulse.hrl b/apps/mg_core/include/pulse.hrl index d60be5e9..a5a3b99f 100644 --- a/apps/mg_core/include/pulse.hrl +++ b/apps/mg_core/include/pulse.hrl @@ -269,62 +269,6 @@ duration :: non_neg_integer() }). -%% Riak client operations -%% Duration is in native units - --record(mg_core_riak_client_get_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_get_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_put_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_put_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_search_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_search_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_delete_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_delete_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - -%% Riak connection pool events - --record(mg_core_riak_connection_pool_state_reached, { - name :: mg_core_storage:name(), - state :: no_free_connections | queue_limit_reached -}). - --record(mg_core_riak_connection_pool_connection_killed, { - name :: mg_core_storage:name(), - state :: free | in_use -}). - --record(mg_core_riak_connection_pool_error, { - name :: mg_core_storage:name(), - reason :: connect_timeout -}). - %% Workers management -record(mg_core_worker_call_attempt, { diff --git a/apps/mg_core/rebar.config b/apps/mg_core/rebar.config index 01d72ffd..aad165f4 100644 --- a/apps/mg_core/rebar.config +++ b/apps/mg_core/rebar.config @@ -3,32 +3,7 @@ {brod, "3.16.1"}, {snappyer, "1.2.8"}, {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}}, - {riakc, {git, "https://github.com/valitydev/riak-erlang-client", {branch, develop}}}, - {pooler, {git, "https://github.com/seth/pooler", {branch, master}}}, {msgpack, {git, "https://github.com/msgpack/msgpack-erlang", {branch, master}}}, {snowflake, {git, "https://github.com/valitydev/snowflake", {branch, master}}}, {opentelemetry_api, "1.2.1"} ]}. - -{overrides, [ - {override, rebar3_protobuffs_plugin, [ - {deps, [{protobuffs, {git, "https://github.com/basho/erlang_protobuffs", {tag, "0.8.2"}}}]} - ]}, - - {override, protobuffs, [{deps, []}]}, - - {override, riakc, [ - {erl_opts, [ - {d, namespaced_types}, - {d, deprecated_19} - ]} - ]}, - - {override, riak_pb, [ - {plugins, [ - {riak_pb_msgcodegen, {git, "https://github.com/tsloughter/riak_pb_msgcodegen", {branch, "master"}}}, - {rebar3_protobuffs_plugin, {git, "https://github.com/cmkarlsson/rebar3_protobuffs_plugin", {tag, "0.1.1"}}} - ]}, - {provider_hooks, [{pre, [{compile, {protobuffs, compile}}, {compile, riak_pb_msgcodegen}]}]} - ]} -]}. diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index e8aa4068..55db7235 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -23,8 +23,6 @@ stdlib, genlib, gproc, - riakc, - pooler, brod, msgpack, snowflake, diff --git a/apps/mg_core/src/mg_core_pulse.erl b/apps/mg_core/src/mg_core_pulse.erl index 3d6e33e0..c77b1acd 100644 --- a/apps/mg_core/src/mg_core_pulse.erl +++ b/apps/mg_core/src/mg_core_pulse.erl @@ -74,20 +74,7 @@ | #mg_core_storage_search_start{} | #mg_core_storage_search_finish{} | #mg_core_storage_delete_start{} - | #mg_core_storage_delete_finish{} - % Riak client call handling - | #mg_core_riak_client_get_start{} - | #mg_core_riak_client_get_finish{} - | #mg_core_riak_client_put_start{} - | #mg_core_riak_client_put_finish{} - | #mg_core_riak_client_search_start{} - | #mg_core_riak_client_search_finish{} - | #mg_core_riak_client_delete_start{} - | #mg_core_riak_client_delete_finish{} - % Riak client call handling - | #mg_core_riak_connection_pool_state_reached{} - | #mg_core_riak_connection_pool_connection_killed{} - | #mg_core_riak_connection_pool_error{}. + | #mg_core_storage_delete_finish{}. -type handler() :: mg_core_utils:mod_opts() | undefined. diff --git a/apps/mg_core/test/mg_core_storages_SUITE.erl b/apps/mg_core/test/mg_core_storages_SUITE.erl index b2599761..d7f71c00 100644 --- a/apps/mg_core/test/mg_core_storages_SUITE.erl +++ b/apps/mg_core/test/mg_core_storages_SUITE.erl @@ -40,10 +40,6 @@ -export([indexes_test_with_limits/1]). -export([stress_test/1]). --export([riak_pool_stable_test/1]). --export([riak_pool_overload_test/1]). --export([riak_pool_misbehaving_connection_test/1]). - -export([handle_beat/2]). %% @@ -56,21 +52,13 @@ -spec all() -> [test_name() | {group, group_name()}]. all() -> [ - {group, memory}, - {group, riak} + {group, memory} ]. -spec groups() -> [{group_name(), list(_), [test_name()]}]. groups() -> [ - {memory, [], tests()}, - {riak, [], - tests() ++ - [ - riak_pool_stable_test, - riak_pool_overload_test, - riak_pool_misbehaving_connection_test - ]} + {memory, [], tests()} ]. -spec tests() -> [test_name()]. @@ -91,9 +79,7 @@ tests() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({riakc_pb_socket, 'get_index_eq', '_'}, x), - % dbg:tpl({riakc_pb_socket, 'get_index_range', '_'}, x), - Apps = mg_cth:start_applications([msgpack, gproc, riakc, pooler]), + Apps = mg_cth:start_applications([msgpack, gproc]), [{apps, Apps} | C]. -spec end_per_suite(config()) -> ok. @@ -365,143 +351,13 @@ stop_wait(Pid, Reason, Timeout) -> %% --spec riak_pool_stable_test(_C) -> ok. -riak_pool_stable_test(_C) -> - Namespace = <<"riak_pool_stable_test">>, - InitialCount = 1, - RequestCount = 10, - Options = riak_options(Namespace, #{ - init_count => InitialCount, - max_count => RequestCount div 2, - idle_timeout => 1000, - cull_interval => 1000, - queue_max => RequestCount * 2 - }), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - % Run multiple requests concurrently - _ = genlib_pmap:map( - fun(N) -> - base_test(genlib:to_binary(N), Storage) - end, - lists:seq(1, RequestCount) - ), - - % Give pool 3 seconds to get back to initial state - ok = timer:sleep(3000), - - {ok, Utilization} = mg_core_storage_riak:pool_utilization(Options), - ?assertMatch( - #{ - in_use_count := 0, - free_count := InitialCount - }, - maps:from_list(Utilization) - ), - - ok = stop_storage(Pid). - --spec riak_pool_overload_test(_C) -> ok. -riak_pool_overload_test(_C) -> - Namespace = <<"riak_pool_overload_test">>, - RequestCount = 40, - Options = riak_options( - Namespace, - #{ - init_count => 1, - max_count => 4, - queue_max => RequestCount div 4 - } - ), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - ?assertThrow( - {transient, {storage_unavailable, no_pool_members}}, - genlib_pmap:map( - fun(N) -> - base_test(genlib:to_binary(N), Storage) - end, - lists:seq(1, RequestCount) - ) - ), - - ok = stop_storage(Pid). - --spec riak_pool_misbehaving_connection_test(_C) -> ok. -riak_pool_misbehaving_connection_test(_C) -> - Namespace = <<"riak_pool_overload_test">>, - WorkersCount = 4, - RequestCount = 4, - Options = riak_options( - Namespace, - #{ - init_count => 1, - max_count => WorkersCount div 2, - queue_max => WorkersCount * 2 - } - ), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - _ = genlib_pmap:map( - fun(RequestID) -> - Key = genlib:to_binary(RequestID), - case RequestID of - N when (N rem WorkersCount) == (N div WorkersCount) -> - % Ensure that request fails occasionally... - ?assertThrow( - {transient, {storage_unavailable, _}}, - mg_core_storage:put(Storage, Key, <<"NOTACONTEXT">>, <<>>, []) - ); - _ -> - % ...And it will not affect any concurrently running requests. - ?assertEqual( - undefined, - mg_core_storage:get(Storage, Key) - ) - end - end, - lists:seq(1, RequestCount * WorkersCount), - #{proc_limit => WorkersCount} - ), - - ok = stop_storage(Pid). - -%% - -spec storage_options(atom(), binary()) -> mg_core_storage:options(). -storage_options(riak, Namespace) -> - {mg_core_storage_riak, - riak_options( - Namespace, - #{ - init_count => 1, - max_count => 10, - idle_timeout => 1000, - cull_interval => 1000, - auto_grow_threshold => 5, - queue_max => 100 - } - )}; storage_options(memory, _) -> {mg_core_storage_memory, #{ pulse => ?MODULE, name => storage }}. --spec riak_options(mg_core:ns(), map()) -> mg_core_storage_riak:options(). -riak_options(Namespace, PoolOptions) -> - #{ - name => storage, - pulse => ?MODULE, - host => "riakdb", - port => 8087, - bucket => Namespace, - pool_options => PoolOptions - }. - -spec start_storage(mg_core_storage:options()) -> pid(). start_storage(Options) -> mg_core_utils:throw_if_error( diff --git a/apps/mg_riak/include/pulse.hrl b/apps/mg_riak/include/pulse.hrl new file mode 100644 index 00000000..a4a3b31b --- /dev/null +++ b/apps/mg_riak/include/pulse.hrl @@ -0,0 +1,55 @@ +%% Riak client operations +%% Duration is in native units + +-record(mg_riak_client_get_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_get_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_put_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_put_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_search_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_search_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_delete_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_delete_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +%% Riak connection pool events + +-record(mg_riak_connection_pool_state_reached, { + name :: mg_core_storage:name(), + state :: no_free_connections | queue_limit_reached +}). + +-record(mg_riak_connection_pool_connection_killed, { + name :: mg_core_storage:name(), + state :: free | in_use +}). + +-record(mg_riak_connection_pool_error, { + name :: mg_core_storage:name(), + reason :: connect_timeout +}). diff --git a/apps/mg_riak/rebar.config b/apps/mg_riak/rebar.config new file mode 100644 index 00000000..12b32ab3 --- /dev/null +++ b/apps/mg_riak/rebar.config @@ -0,0 +1,32 @@ +{deps, [ + {gproc, "0.9.0"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}}, + {riakc, {git, "https://github.com/valitydev/riak-erlang-client", {branch, develop}}}, + {pooler, {git, "https://github.com/seth/pooler", {branch, master}}}, + {msgpack, {git, "https://github.com/msgpack/msgpack-erlang", {branch, master}}}, + {prometheus, "4.8.1"}, + {opentelemetry_api, "1.2.1"} +]}. + +{overrides, [ + {override, rebar3_protobuffs_plugin, [ + {deps, [{protobuffs, {git, "https://github.com/basho/erlang_protobuffs", {tag, "0.8.2"}}}]} + ]}, + + {override, protobuffs, [{deps, []}]}, + + {override, riakc, [ + {erl_opts, [ + {d, namespaced_types}, + {d, deprecated_19} + ]} + ]}, + + {override, riak_pb, [ + {plugins, [ + {riak_pb_msgcodegen, {git, "https://github.com/tsloughter/riak_pb_msgcodegen", {branch, "master"}}}, + {rebar3_protobuffs_plugin, {git, "https://github.com/cmkarlsson/rebar3_protobuffs_plugin", {tag, "0.1.1"}}} + ]}, + {provider_hooks, [{pre, [{compile, {protobuffs, compile}}, {compile, riak_pb_msgcodegen}]}]} + ]} +]}. diff --git a/apps/mg_riak/src/mg_riak.app.src b/apps/mg_riak/src/mg_riak.app.src new file mode 100644 index 00000000..9fab2887 --- /dev/null +++ b/apps/mg_riak/src/mg_riak.app.src @@ -0,0 +1,41 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +{application, mg_riak , [ + {description, "Machinegun Riak Storage"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + gproc, + genlib, + prometheus, + mg_core, + riakc, + pooler, + msgpack, + opentelemetry_api + ]}, + {env, []}, + {modules, []}, + {maintainers, [ + "Petr Kozorezov ", + "Andrey Mayorov " + ]}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_riak/src/mg_riak.erl b/apps/mg_riak/src/mg_riak.erl new file mode 100644 index 00000000..e4647aef --- /dev/null +++ b/apps/mg_riak/src/mg_riak.erl @@ -0,0 +1,3 @@ +-module(mg_riak). + +%% diff --git a/apps/machinegun/src/mg_riak_prometheus.erl b/apps/mg_riak/src/mg_riak_prometheus.erl similarity index 90% rename from apps/machinegun/src/mg_riak_prometheus.erl rename to apps/mg_riak/src/mg_riak_prometheus.erl index ac49f384..bea49a0e 100644 --- a/apps/machinegun/src/mg_riak_prometheus.erl +++ b/apps/mg_riak/src/mg_riak_prometheus.erl @@ -10,7 +10,7 @@ -export([init/1]). -type options() :: #{}. --type storage() :: mg_core_storage_riak:options(). +-type storage() :: mg_riak_storage:options(). -export_type([storage/0]). @@ -37,7 +37,7 @@ child_spec(_Options, Storage, ChildID) -> -spec init(mg_core_storage:options()) -> genlib_gen:supervisor_ret(). init(Storage) -> - {mg_core_storage_riak, StorageOptions} = mg_core_utils:separate_mod_opts(Storage), + {mg_riak_storage, StorageOptions} = mg_core_utils:separate_mod_opts(Storage), true = gproc:add_local_property(?PROPNAME, StorageOptions), % NOTE % We only care about keeping gproc property live through this supervisor process. diff --git a/apps/machinegun/src/mg_riak_prometheus_collector.erl b/apps/mg_riak/src/mg_riak_prometheus_collector.erl similarity index 98% rename from apps/machinegun/src/mg_riak_prometheus_collector.erl rename to apps/mg_riak/src/mg_riak_prometheus_collector.erl index 82eabc9a..6cca89ca 100644 --- a/apps/machinegun/src/mg_riak_prometheus_collector.erl +++ b/apps/mg_riak/src/mg_riak_prometheus_collector.erl @@ -82,7 +82,7 @@ collect_storage_metrics(#{name := {NS, _Module, Type}} = Storage, Callback) -> -spec gather_metrics(storage()) -> pooler_metrics(). gather_metrics(#{name := Name} = Storage) -> - case mg_core_storage_riak:pool_utilization(Storage) of + case mg_riak_storage:pool_utilization(Storage) of {ok, Metrics} -> Metrics; {error, Reason} -> diff --git a/apps/mg_riak/src/mg_riak_pulse.erl b/apps/mg_riak/src/mg_riak_pulse.erl new file mode 100644 index 00000000..a4636dd2 --- /dev/null +++ b/apps/mg_riak/src/mg_riak_pulse.erl @@ -0,0 +1,29 @@ +-module(mg_riak_pulse). + +-include_lib("mg_riak/include/pulse.hrl"). + +%% API +-export_type([beat/0]). +-export([handle_beat/2]). + +%% +%% API +%% +-type beat() :: + % Riak client call handling + #mg_riak_client_get_start{} + | #mg_riak_client_get_finish{} + | #mg_riak_client_put_start{} + | #mg_riak_client_put_finish{} + | #mg_riak_client_search_start{} + | #mg_riak_client_search_finish{} + | #mg_riak_client_delete_start{} + | #mg_riak_client_delete_finish{} + % Riak client call handling + | #mg_riak_connection_pool_state_reached{} + | #mg_riak_connection_pool_connection_killed{} + | #mg_riak_connection_pool_error{}. + +-spec handle_beat(any(), beat() | _OtherBeat) -> ok. +handle_beat(_Options, _Beat) -> + ok. diff --git a/apps/mg_riak/src/mg_riak_pulse_prometheus.erl b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl new file mode 100644 index 00000000..aa9bead3 --- /dev/null +++ b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl @@ -0,0 +1,167 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_riak_pulse_prometheus). + +-include_lib("mg_riak/include/pulse.hrl"). + +-export([setup/0]). +-export([handle_beat/2]). + +%% internal types +-type beat() :: mg_riak_pulse:beat(). +-type options() :: #{}. +-type metric_name() :: prometheus_metric:name(). +-type metric_label_value() :: term(). + +%% +%% mg_pulse handler +%% + +-spec handle_beat(options(), beat() | _OtherBeat) -> ok. +handle_beat(_Options, Beat) -> + ok = dispatch_metrics(Beat). + +%% +%% management API +%% + +%% Sets all metrics up. Call this when the app starts. +-spec setup() -> ok. +setup() -> + % Riak client operations + true = prometheus_counter:declare([ + {name, mg_riak_client_operation_changes_total}, + {registry, registry()}, + {labels, [namespace, name, operation, change]}, + {help, "Total number of Machinegun riak client operations."} + ]), + true = prometheus_histogram:declare([ + {name, mg_riak_client_operation_duration_seconds}, + {registry, registry()}, + {labels, [namespace, name, operation]}, + {buckets, duration_buckets()}, + {duration_unit, seconds}, + {help, "Machinegun riak client operation duration."} + ]), + %% Riak pool events + true = prometheus_counter:declare([ + {name, mg_riak_pool_no_free_connection_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of no free connection errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_queue_limit_reached_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of queue limit reached errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_connect_timeout_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of connect timeout errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_killed_free_connections_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of killed free Machinegun riak pool connections."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_killed_in_use_connections_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of killed used Machinegun riak pool connections."} + ]), + ok. + +%% Internals + +-spec dispatch_metrics(beat() | _Other) -> ok. +% Riak client operations +dispatch_metrics(#mg_riak_client_get_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, start]); +dispatch_metrics(#mg_riak_client_get_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, get], Duration); +dispatch_metrics(#mg_riak_client_put_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, start]); +dispatch_metrics(#mg_riak_client_put_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, put], Duration); +dispatch_metrics(#mg_riak_client_search_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, start]); +dispatch_metrics(#mg_riak_client_search_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, search], Duration); +dispatch_metrics(#mg_riak_client_delete_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, start]); +dispatch_metrics(#mg_riak_client_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, delete], Duration); +% Riak pool events +dispatch_metrics(#mg_riak_connection_pool_state_reached{ + name = {NS, _Caller, Type}, + state = no_free_connections +}) -> + ok = inc(mg_riak_pool_no_free_connection_errors_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_state_reached{ + name = {NS, _Caller, Type}, + state = queue_limit_reached +}) -> + ok = inc(mg_riak_pool_queue_limit_reached_errors_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = free}) -> + ok = inc(mg_riak_pool_killed_free_connections_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = in_use}) -> + ok = inc(mg_riak_pool_killed_in_use_connections_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_error{name = {NS, _Caller, Type}, reason = connect_timeout}) -> + ok = inc(mg_riak_pool_connect_timeout_errors_total, [NS, Type]); +% Unknown +dispatch_metrics(_Beat) -> + ok. + +-spec inc(metric_name(), [metric_label_value()]) -> ok. +inc(Name, Labels) -> + _ = prometheus_counter:inc(registry(), Name, Labels, 1), + ok. + +-spec observe(metric_name(), [metric_label_value()], number()) -> ok. +observe(Name, Labels, Value) -> + _ = prometheus_histogram:observe(registry(), Name, Labels, Value), + ok. + +-spec registry() -> prometheus_registry:registry(). +registry() -> + default. + +-spec duration_buckets() -> [number()]. +duration_buckets() -> + [ + 0.001, + 0.005, + 0.010, + 0.025, + 0.050, + 0.100, + 0.250, + 0.500, + 1, + 2.5, + 5, + 10 + ]. diff --git a/apps/mg_core/src/mg_core_storage_riak.erl b/apps/mg_riak/src/mg_riak_storage.erl similarity index 96% rename from apps/mg_core/src/mg_core_storage_riak.erl rename to apps/mg_riak/src/mg_riak_storage.erl index e82e20ab..9ddfcf3d 100644 --- a/apps/mg_core/src/mg_core_storage_riak.erl +++ b/apps/mg_riak/src/mg_riak_storage.erl @@ -46,9 +46,9 @@ %%% TODO: %%% - классификация и обработка ошибок %%% --module(mg_core_storage_riak). +-module(mg_riak_storage). -include_lib("riakc/include/riakc.hrl"). --include("pulse.hrl"). +-include_lib("mg_riak/include/pulse.hrl"). %% mg_core_storage callbacks -behaviour(mg_core_storage). @@ -599,7 +599,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"error_no_members_count">>], _ #{name := Name, pulse := Handler} = pulse_options(PoolNameString), mg_core_pulse:handle_beat( Handler, - #mg_core_riak_connection_pool_state_reached{ + #mg_riak_connection_pool_state_reached{ name = Name, state = no_free_connections } @@ -608,7 +608,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"queue_max_reached">>], _, _Co #{name := Name, pulse := Handler} = pulse_options(PoolNameString), mg_core_pulse:handle_beat( Handler, - #mg_core_riak_connection_pool_state_reached{ + #mg_riak_connection_pool_state_reached{ name = Name, state = queue_limit_reached } @@ -617,7 +617,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"starting_member_timeout">>], #{name := Name, pulse := Handler} = pulse_options(PoolNameString), mg_core_pulse:handle_beat( Handler, - #mg_core_riak_connection_pool_error{ + #mg_riak_connection_pool_error{ name = Name, reason = connect_timeout } @@ -626,7 +626,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"killed_free_count">>], _, _Co #{name := Name, pulse := Handler} = pulse_options(PoolNameString), mg_core_pulse:handle_beat( Handler, - #mg_core_riak_connection_pool_connection_killed{ + #mg_riak_connection_pool_connection_killed{ name = Name, state = free } @@ -635,7 +635,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"killed_in_use_count">>], _, _ #{name := Name, pulse := Handler} = pulse_options(PoolNameString), mg_core_pulse:handle_beat( Handler, - #mg_core_riak_connection_pool_connection_killed{ + #mg_riak_connection_pool_connection_killed{ name = Name, state = in_use } @@ -649,40 +649,40 @@ update_or_create(_MetricKey, _Value, _Type, []) -> -spec emit_beat_start(mg_core_storage:request(), options()) -> ok. emit_beat_start({get, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_get_start{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_get_start{ name = Name }); emit_beat_start({put, _, _, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_put_start{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_put_start{ name = Name }); emit_beat_start({search, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_search_start{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_search_start{ name = Name }); emit_beat_start({delete, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_delete_start{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_delete_start{ name = Name }). -spec emit_beat_finish(mg_core_storage:request(), options(), duration()) -> ok. emit_beat_finish({get, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_get_finish{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_get_finish{ name = Name, duration = Duration }); emit_beat_finish({put, _, _, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_put_finish{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_put_finish{ name = Name, duration = Duration }); emit_beat_finish({search, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_search_finish{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_search_finish{ name = Name, duration = Duration }); emit_beat_finish({delete, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_delete_finish{ + ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_delete_finish{ name = Name, duration = Duration }). diff --git a/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl b/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl new file mode 100644 index 00000000..eb18f51b --- /dev/null +++ b/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl @@ -0,0 +1,349 @@ +%%% +%%% Copyright 2020 RBKmoney +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_riak_prometheus_metric_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_riak/include/pulse.hrl"). +-include_lib("prometheus/include/prometheus_model.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). +-export([riak_client_get_start_test/1]). +-export([riak_client_get_finish_test/1]). +-export([riak_client_put_start_test/1]). +-export([riak_client_put_finish_test/1]). +-export([riak_client_search_start_test/1]). +-export([riak_client_search_finish_test/1]). +-export([riak_client_delete_start_test/1]). +-export([riak_client_delete_finish_test/1]). +-export([riak_pool_no_free_connection_errors_test/1]). +-export([riak_pool_queue_limit_reached_errors_test/1]). +-export([riak_pool_killed_free_connections_test/1]). +-export([riak_pool_killed_in_use_connections_test/1]). +-export([riak_pool_connect_timeout_errors_test/1]). + +-export([riak_pool_collector_test/1]). + +-define(NS, <<"NS">>). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, beats}, + {group, collectors} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {beats, [parallel], [ + riak_client_get_start_test, + riak_client_get_finish_test, + riak_client_put_start_test, + riak_client_put_finish_test, + riak_client_search_start_test, + riak_client_search_finish_test, + riak_client_delete_start_test, + riak_client_delete_finish_test, + riak_pool_no_free_connection_errors_test, + riak_pool_queue_limit_reached_errors_test, + riak_pool_killed_free_connections_test, + riak_pool_killed_in_use_connections_test, + riak_pool_connect_timeout_errors_test + ]}, + {collectors, [], [ + riak_pool_collector_test + ]} + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + Apps = mg_cth:start_applications([mg_riak]), + ok = mg_riak_pulse_prometheus:setup(), + ok = mg_riak_prometheus:setup(), + [{apps, Apps} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(_, C) -> + C. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% Tests + +-spec riak_client_get_start_test(config()) -> _. +riak_client_get_start_test(_C) -> + ok = test_beat(#mg_riak_client_get_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_get_finish_test(config()) -> _. +riak_client_get_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_get_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, get]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_put_start_test(config()) -> _. +riak_client_put_start_test(_C) -> + ok = test_beat(#mg_riak_client_put_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_put_finish_test(config()) -> _. +riak_client_put_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_put_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, put]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_search_start_test(config()) -> _. +riak_client_search_start_test(_C) -> + ok = test_beat(#mg_riak_client_search_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_search_finish_test(config()) -> _. +riak_client_search_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_search_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, search]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_delete_start_test(config()) -> _. +riak_client_delete_start_test(_C) -> + ok = test_beat(#mg_riak_client_delete_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_delete_finish_test(config()) -> _. +riak_client_delete_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_delete_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, delete]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_pool_no_free_connection_errors_test(config()) -> _. +riak_pool_no_free_connection_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_state_reached{ + name = {?NS, caller, type}, + state = no_free_connections + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_no_free_connection_errors_total, [?NS, type]) + ). + +-spec riak_pool_queue_limit_reached_errors_test(config()) -> _. +riak_pool_queue_limit_reached_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_state_reached{ + name = {?NS, caller, type}, + state = queue_limit_reached + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_queue_limit_reached_errors_total, [?NS, type]) + ). + +-spec riak_pool_killed_free_connections_test(config()) -> _. +riak_pool_killed_free_connections_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_connection_killed{ + name = {?NS, caller, type}, + state = free + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_killed_free_connections_total, [?NS, type]) + ). + +-spec riak_pool_killed_in_use_connections_test(config()) -> _. +riak_pool_killed_in_use_connections_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_connection_killed{ + name = {?NS, caller, type}, + state = in_use + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_killed_in_use_connections_total, [?NS, type]) + ). + +-spec riak_pool_connect_timeout_errors_test(config()) -> _. +riak_pool_connect_timeout_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_error{ + name = {?NS, caller, type}, + reason = connect_timeout + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_connect_timeout_errors_total, [?NS, type]) + ). + +%% + +-spec riak_pool_collector_test(config()) -> _. +riak_pool_collector_test(_C) -> + ok = mg_cth:await_ready(fun mg_cth:riak_ready/0), + Storage = + {mg_riak_storage, #{ + name => {?NS, caller, type}, + host => "riakdb", + port => 8087, + bucket => ?NS, + pool_options => #{ + init_count => 0, + max_count => 10, + queue_max => 100 + }, + pulse => undefined, + sidecar => {mg_riak_prometheus, #{}} + }}, + + {ok, Pid} = genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + [mg_core_storage:child_spec(Storage, storage)] + ), + + Collectors = prometheus_registry:collectors(default), + ?assert(lists:member(mg_riak_prometheus_collector, Collectors)), + + Self = self(), + ok = prometheus_collector:collect_mf( + default, + mg_riak_prometheus_collector, + fun(MF) -> Self ! MF end + ), + MFs = mg_cth:flush(), + MLabels = [ + #'LabelPair'{name = <<"namespace">>, value = <<"NS">>}, + #'LabelPair'{name = <<"name">>, value = <<"type">>} + ], + ?assertMatch( + [ + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_free">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_in_use">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_limit">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 10}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_queued_requests">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_queued_requests_limit">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 100}}] + } + ], + lists:sort(MFs) + ), + + ok = proc_lib:stop(Pid, normal, 5000). + +%% Metrics utils + +-spec test_beat(term()) -> ok. +test_beat(Beat) -> + mg_riak_pulse_prometheus:handle_beat(#{}, Beat). + +-spec test_millisecond_buckets() -> #{non_neg_integer() => pos_integer()}. +test_millisecond_buckets() -> + #{ + 0 => 1, + 1 => 1, + 5 => 2, + 10 => 3 + }. diff --git a/apps/mg_riak/test/mg_riak_storage_SUITE.erl b/apps/mg_riak/test/mg_riak_storage_SUITE.erl new file mode 100644 index 00000000..dc43be82 --- /dev/null +++ b/apps/mg_riak/test/mg_riak_storage_SUITE.erl @@ -0,0 +1,514 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +%%% +%%% Тесты всех возможных бэкендов хранилищ. +%%% +%%% TODO: +%%% - сделать проверку, что неймспейсы не пересекаются +%%% +-module(mg_riak_storage_SUITE). +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). + +%% base group tests +-export([base_test/1]). +-export([batch_test/1]). +-export([indexes_test/1]). +-export([key_length_limit_test/1]). +-export([indexes_test_with_limits/1]). +-export([stress_test/1]). + +-export([riak_pool_stable_test/1]). +-export([riak_pool_overload_test/1]). +-export([riak_pool_misbehaving_connection_test/1]). + +-export([handle_beat/2]). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, riak} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {riak, [], + tests() ++ + [ + riak_pool_stable_test, + riak_pool_overload_test, + riak_pool_misbehaving_connection_test + ]} + ]. + +-spec tests() -> [test_name()]. +tests() -> + [ + base_test, + batch_test, + % incorrect_context_test, + indexes_test, + key_length_limit_test, + indexes_test_with_limits, + stress_test + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + % dbg:tracer(), dbg:p(all, c), + % dbg:tpl({riakc_pb_socket, 'get_index_eq', '_'}, x), + % dbg:tpl({riakc_pb_socket, 'get_index_range', '_'}, x), + Apps = mg_cth:start_applications([msgpack, gproc, riakc, pooler]), + [{apps, Apps} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(Group, C) -> + [{storage_type, Group} | C]. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% +%% base group tests +%% +-spec base_test(config()) -> _. +base_test(C) -> + Options = storage_options(?config(storage_type, C), <<"base_test">>), + Pid = start_storage(Options), + base_test(<<"1">>, Options), + ok = stop_storage(Pid). + +-spec base_test(mg_core:id(), mg_core_storage:options()) -> _. +base_test(Key, Options) -> + Value1 = #{<<"hello">> => <<"world">>}, + Value2 = [<<"hello">>, 1], + + undefined = mg_core_storage:get(Options, Key), + Ctx1 = mg_core_storage:put(Options, Key, undefined, Value1, []), + {Ctx1, Value1} = mg_core_storage:get(Options, Key), + Ctx2 = mg_core_storage:put(Options, Key, Ctx1, Value2, []), + {Ctx2, Value2} = mg_core_storage:get(Options, Key), + ok = mg_core_storage:delete(Options, Key, Ctx2), + undefined = mg_core_storage:get(Options, Key), + ok. + +-spec batch_test(config()) -> _. +batch_test(C) -> + {Mod, StorageOpts} = storage_options(?config(storage_type, C), <<"batch_test">>), + Options = {Mod, StorageOpts#{bathing => #{concurrency_limit => 3}}}, + Pid = start_storage(Options), + Keys = lists:map(fun genlib:to_binary/1, lists:seq(1, 10)), + Value = #{<<"hello">> => <<"world">>}, + + PutBatch = lists:foldl( + fun(Key, Batch) -> + mg_core_storage:add_batch_request({put, Key, undefined, Value, []}, Batch) + end, + mg_core_storage:new_batch(), + Keys + ), + PutResults = mg_core_storage:run_batch(Options, PutBatch), + Ctxs = lists:zipwith( + fun(Key, Result) -> + {{put, Key, undefined, Value, _}, Ctx} = Result, + Ctx + end, + Keys, + PutResults + ), + + GetBatch = lists:foldl( + fun(Key, Batch) -> + mg_core_storage:add_batch_request({get, Key}, Batch) + end, + mg_core_storage:new_batch(), + Keys + ), + GetResults = mg_core_storage:run_batch(Options, GetBatch), + _ = lists:zipwith3( + fun(Key, Ctx, Result) -> + {{get, Key}, {Ctx, Value}} = Result + end, + Keys, + Ctxs, + GetResults + ), + + ok = stop_storage(Pid). + +-spec indexes_test(config()) -> _. +indexes_test(C) -> + Options = storage_options(?config(storage_type, C), <<"indexes_test">>), + Pid = start_storage(Options), + + K1 = <<"Key_24">>, + I1 = {integer, <<"index1">>}, + IV1 = 1, + + K2 = <<"Key_42">>, + I2 = {integer, <<"index2">>}, + IV2 = 2, + + Value = #{<<"hello">> => <<"world">>}, + + [] = mg_core_storage:search(Options, {I1, IV1}), + [] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + Ctx1 = mg_core_storage:put(Options, K1, undefined, Value, [{I1, IV1}, {I2, IV2}]), + + [K1] = mg_core_storage:search(Options, {I1, IV1}), + [{IV1, K1}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [K1] = mg_core_storage:search(Options, {I2, IV2}), + [{IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + Ctx2 = mg_core_storage:put(Options, K2, undefined, Value, [{I1, IV2}, {I2, IV1}]), + + [K1] = mg_core_storage:search(Options, {I1, IV1}), + [{IV1, K1}, {IV2, K2}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [K1] = mg_core_storage:search(Options, {I2, IV2}), + [{IV1, K2}, {IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = mg_core_storage:delete(Options, K1, Ctx1), + + [{IV2, K2}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [{IV1, K2}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = mg_core_storage:delete(Options, K2, Ctx2), + + [] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = stop_storage(Pid). + +-spec key_length_limit_test(config()) -> _. +key_length_limit_test(C) -> + Options = storage_options(?config(storage_type, C), <<"key_length_limit">>), + Pid = start_storage(Options), + + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:get(Options, <<"">>)), + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:add_batch_request({get, <<"">>}, mg_core_storage:new_batch())), + + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:put(Options, <<"">>, undefined, <<"test">>, [])), + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:add_batch_request( + {put, <<"">>, undefined, <<"test">>, []}, + mg_core_storage:new_batch() + )), + + _ = mg_core_storage:get(Options, binary:copy(<<"K">>, 1024)), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:get(Options, binary:copy(<<"K">>, 1025))), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:add_batch_request( + {get, binary:copy(<<"K">>, 1025)}, + mg_core_storage:new_batch() + )), + + _ = mg_core_storage:put( + Options, + binary:copy(<<"K">>, 1024), + undefined, + <<"test">>, + [] + ), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:put( + Options, + binary:copy(<<"K">>, 1025), + undefined, + <<"test">>, + [] + )), + + ok = stop_storage(Pid). + +-spec indexes_test_with_limits(config()) -> _. +indexes_test_with_limits(C) -> + Options = storage_options(?config(storage_type, C), <<"indexes_test_with_limits">>), + Pid = start_storage(Options), + + K1 = <<"Key_24">>, + I1 = {integer, <<"index1">>}, + IV1 = 1, + + K2 = <<"Key_42">>, + I2 = {integer, <<"index2">>}, + IV2 = 2, + + Value = #{<<"hello">> => <<"world">>}, + + Ctx1 = mg_core_storage:put(Options, K1, undefined, Value, [{I1, IV1}, {I2, IV2}]), + Ctx2 = mg_core_storage:put(Options, K2, undefined, Value, [{I1, IV2}, {I2, IV1}]), + + {[{IV1, K1}], Cont1} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, undefined}), + {[{IV2, K2}], Cont2} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, Cont1}), + {[], undefined} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, Cont2}), + + [{IV1, K2}, {IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}, inf, undefined}), + + ok = mg_core_storage:delete(Options, K1, Ctx1), + ok = mg_core_storage:delete(Options, K2, Ctx2), + + ok = stop_storage(Pid). + +-spec stress_test(_C) -> ok. +stress_test(C) -> + Options = storage_options(?config(storage_type, C), <<"stress_test">>), + Pid = start_storage(Options), + ProcessCount = 20, + Processes = [ + stress_test_start_process(ID, ProcessCount, Options) + || ID <- lists:seq(1, ProcessCount) + ], + + timer:sleep(5000), + ok = stop_wait_all(Processes, shutdown, 5000), + ok = stop_storage(Pid). + +-spec stress_test_start_process(integer(), pos_integer(), mg_core_storage:options()) -> pid(). +stress_test_start_process(ID, ProcessCount, Options) -> + erlang:spawn_link(fun() -> stress_test_process(ID, ProcessCount, 0, Options) end). + +-spec stress_test_process(integer(), pos_integer(), integer(), mg_core_storage:options()) -> + no_return(). +stress_test_process(ID, ProcessCount, RunCount, Options) -> + % Добавляем смещение ID, чтобы не было пересечения ID машин + ok = base_test(erlang:integer_to_binary(ID), Options), + + receive + {stop, Reason} -> + ct:print("Process: ~p. Number of runs: ~p", [self(), RunCount]), + exit(Reason) + after 0 -> stress_test_process(ID + ProcessCount, ProcessCount, RunCount + 1, Options) + end. + +-spec stop_wait_all([pid()], _Reason, timeout()) -> ok. +stop_wait_all(Pids, Reason, Timeout) -> + OldTrap = process_flag(trap_exit, true), + + lists:foreach( + fun(Pid) -> send_stop(Pid, Reason) end, + Pids + ), + + lists:foreach( + fun(Pid) -> + case stop_wait(Pid, Reason, Timeout) of + ok -> ok; + timeout -> exit(stop_timeout) + end + end, + Pids + ), + + true = process_flag(trap_exit, OldTrap), + ok. + +-spec send_stop(pid(), _Reason) -> ok. +send_stop(Pid, Reason) -> + Pid ! {stop, Reason}, + ok. + +-spec stop_wait(pid(), _Reason, timeout()) -> ok | timeout. +stop_wait(Pid, Reason, Timeout) -> + receive + {'EXIT', Pid, Reason} -> ok + after Timeout -> timeout + end. + +%% + +-spec riak_pool_stable_test(_C) -> ok. +riak_pool_stable_test(_C) -> + Namespace = <<"riak_pool_stable_test">>, + InitialCount = 1, + RequestCount = 10, + Options = riak_options(Namespace, #{ + init_count => InitialCount, + max_count => RequestCount div 2, + idle_timeout => 1000, + cull_interval => 1000, + queue_max => RequestCount * 2 + }), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + % Run multiple requests concurrently + _ = genlib_pmap:map( + fun(N) -> + base_test(genlib:to_binary(N), Storage) + end, + lists:seq(1, RequestCount) + ), + + % Give pool 3 seconds to get back to initial state + ok = timer:sleep(3000), + + {ok, Utilization} = mg_riak_storage:pool_utilization(Options), + ?assertMatch( + #{ + in_use_count := 0, + free_count := InitialCount + }, + maps:from_list(Utilization) + ), + + ok = stop_storage(Pid). + +-spec riak_pool_overload_test(_C) -> ok. +riak_pool_overload_test(_C) -> + Namespace = <<"riak_pool_overload_test">>, + RequestCount = 40, + Options = riak_options( + Namespace, + #{ + init_count => 1, + max_count => 4, + queue_max => RequestCount div 4 + } + ), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + ?assertThrow( + {transient, {storage_unavailable, no_pool_members}}, + genlib_pmap:map( + fun(N) -> + base_test(genlib:to_binary(N), Storage) + end, + lists:seq(1, RequestCount) + ) + ), + + ok = stop_storage(Pid). + +-spec riak_pool_misbehaving_connection_test(_C) -> ok. +riak_pool_misbehaving_connection_test(_C) -> + Namespace = <<"riak_pool_overload_test">>, + WorkersCount = 4, + RequestCount = 4, + Options = riak_options( + Namespace, + #{ + init_count => 1, + max_count => WorkersCount div 2, + queue_max => WorkersCount * 2 + } + ), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + _ = genlib_pmap:map( + fun(RequestID) -> + Key = genlib:to_binary(RequestID), + case RequestID of + N when (N rem WorkersCount) == (N div WorkersCount) -> + % Ensure that request fails occasionally... + ?assertThrow( + {transient, {storage_unavailable, _}}, + mg_core_storage:put(Storage, Key, <<"NOTACONTEXT">>, <<>>, []) + ); + _ -> + % ...And it will not affect any concurrently running requests. + ?assertEqual( + undefined, + mg_core_storage:get(Storage, Key) + ) + end + end, + lists:seq(1, RequestCount * WorkersCount), + #{proc_limit => WorkersCount} + ), + + ok = stop_storage(Pid). + +%% + +-spec storage_options(atom(), binary()) -> mg_core_storage:options(). +storage_options(riak, Namespace) -> + {mg_riak_storage, + riak_options( + Namespace, + #{ + init_count => 1, + max_count => 10, + idle_timeout => 1000, + cull_interval => 1000, + auto_grow_threshold => 5, + queue_max => 100 + } + )}. + +-spec riak_options(mg_core:ns(), map()) -> mg_riak_storage:options(). +riak_options(Namespace, PoolOptions) -> + #{ + name => storage, + pulse => ?MODULE, + host => "riakdb", + port => 8087, + bucket => Namespace, + pool_options => PoolOptions + }. + +-spec start_storage(mg_core_storage:options()) -> pid(). +start_storage(Options) -> + mg_core_utils:throw_if_error( + genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + [mg_core_storage:child_spec(Options, storage)] + ) + ). + +-spec stop_storage(pid()) -> ok. +stop_storage(Pid) -> + ok = proc_lib:stop(Pid, normal, 5000), + ok. + +-spec handle_beat(_, mg_core_pulse:beat()) -> ok. +handle_beat(_, Beat) -> + ct:pal("~p", [Beat]). diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index 12a2306c..4bd67f57 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -323,7 +323,7 @@ storage(NS, YamlConfig) -> mg_core_storage_memory; <<"riak">> -> PoolSize = ?C:conf([storage, pool, size], YamlConfig, 100), - {mg_core_storage_riak, #{ + {mg_riak_storage, #{ host => ?C:conf([storage, host], YamlConfig), port => ?C:conf([storage, port], YamlConfig), bucket => NS, From f6e7094fb83f7fc59fadc719b6470799d102a7ea Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Sat, 27 Apr 2024 17:26:35 +0300 Subject: [PATCH 07/31] Extracts gen_squad into umbrella --- apps/gen_squad/include/gen_squad_cth.hrl | 56 +++++++++++++++++++ apps/gen_squad/rebar.config | 0 apps/gen_squad/src/gen_squad.app.src | 15 +++++ .../src/gen_squad.erl} | 29 ++++++---- .../src/gen_squad_heart.erl} | 12 ++-- .../src/gen_squad_pulse.erl} | 31 +++++++--- .../test/gen_squad_SUITE.erl} | 33 ++++++----- apps/machinegun/src/mg_pulse_log.erl | 2 +- apps/mg_core/src/mg_core.app.src | 3 +- apps/mg_core/src/mg_core_queue_scanner.erl | 30 +++++----- apps/mg_core/src/mg_core_scheduler_sup.erl | 2 +- apps/mg_es_kafka/src/mg_es_kafka.app.src | 2 +- apps/mg_es_machine/src/mg_es_machine.app.src | 2 +- apps/mg_riak/src/mg_riak.app.src | 2 +- elvis.config | 4 +- 15 files changed, 157 insertions(+), 66 deletions(-) create mode 100644 apps/gen_squad/include/gen_squad_cth.hrl create mode 100644 apps/gen_squad/rebar.config create mode 100644 apps/gen_squad/src/gen_squad.app.src rename apps/{mg_core/src/mg_core_gen_squad.erl => gen_squad/src/gen_squad.erl} (96%) rename apps/{mg_core/src/mg_core_gen_squad_heart.erl => gen_squad/src/gen_squad_heart.erl} (93%) rename apps/{mg_core/src/mg_core_gen_squad_pulse.erl => gen_squad/src/gen_squad_pulse.erl} (63%) rename apps/{mg_core/test/mg_core_gen_squad_SUITE.erl => gen_squad/test/gen_squad_SUITE.erl} (87%) diff --git a/apps/gen_squad/include/gen_squad_cth.hrl b/apps/gen_squad/include/gen_squad_cth.hrl new file mode 100644 index 00000000..1bd9a33f --- /dev/null +++ b/apps/gen_squad/include/gen_squad_cth.hrl @@ -0,0 +1,56 @@ +-ifndef(__gen_squad_cth__). +-define(__gen_squad_cth__, 42). + +-define(flushMailbox(__Acc0), + (fun __Flush(__Acc) -> + receive + __M -> __Flush([__M | __Acc]) + after 0 -> __Acc + end + end)( + __Acc0 + ) +). + +-define(assertReceive(__Expr), + ?assertReceive(__Expr, 1000) +). + +-define(assertReceive(__Expr, __Timeout), + (fun() -> + receive + (__Expr) = __V -> __V + after __Timeout -> + erlang:error( + {assertReceive, [ + {module, ?MODULE}, + {line, ?LINE}, + {expression, (??__Expr)}, + {mailbox, ?flushMailbox([])} + ]} + ) + end + end)() +). + +-define(assertNoReceive(), + ?assertNoReceive(1000) +). + +-define(assertNoReceive(__Timeout), + (fun() -> + receive + __Message -> + erlang:error( + {assertNoReceive, [ + {module, ?MODULE}, + {line, ?LINE}, + {mailbox, ?flushMailbox([__Message])} + ]} + ) + after __Timeout -> ok + end + end)() +). + +-endif. diff --git a/apps/gen_squad/rebar.config b/apps/gen_squad/rebar.config new file mode 100644 index 00000000..e69de29b diff --git a/apps/gen_squad/src/gen_squad.app.src b/apps/gen_squad/src/gen_squad.app.src new file mode 100644 index 00000000..18937c0c --- /dev/null +++ b/apps/gen_squad/src/gen_squad.app.src @@ -0,0 +1,15 @@ +{application, gen_squad, [ + {description, "Generic squad behaviour"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_gen_squad.erl b/apps/gen_squad/src/gen_squad.erl similarity index 96% rename from apps/mg_core/src/mg_core_gen_squad.erl rename to apps/gen_squad/src/gen_squad.erl index ece672ac..e1782ed4 100644 --- a/apps/mg_core/src/mg_core_gen_squad.erl +++ b/apps/gen_squad/src/gen_squad.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ %%% - Do we even need monitors? %%% - More tests %%% --module(mg_core_gen_squad). +-module(gen_squad). %% @@ -134,19 +134,24 @@ discovery => discovery_opts(), heartbeat => heartbeat_opts(), promotion => promotion_opts(), - pulse => mg_core_gen_squad_pulse:handler() + pulse => gen_squad_pulse:handler() }. -export_type([opts/0]). -export_type([heartbeat_opts/0]). +-type gen_reg_name() :: + {local, atom()} + | {global, term()} + | {via, module(), term()}. + %% -spec start_link(module(), _Args, opts()) -> {ok, pid()} | ignore | {error, _}. start_link(Module, Args, Opts) -> gen_server:start_link(?MODULE, mk_state(Module, Args, set_defaults(Opts)), []). --spec start_link(mg_core_procreg:reg_name(), module(), _Args, opts()) -> +-spec start_link(gen_reg_name(), module(), _Args, opts()) -> {ok, pid()} | ignore | {error, _}. start_link(RegName, Module, Args, Opts) -> gen_server:start_link(RegName, ?MODULE, mk_state(Module, Args, set_defaults(Opts)), []). @@ -236,7 +241,7 @@ init(St0) -> {ok, St = #st{squad = Squad0, opts = Opts}} -> Squad = add_member(self(), Squad0, Opts), HeartOpts = maps:with([heartbeat, pulse], Opts), - {ok, HeartPid} = mg_core_gen_squad_heart:start_link(heartbeat, HeartOpts), + {ok, HeartPid} = gen_squad_heart:start_link(heartbeat, HeartOpts), {ok, defer_discovery(St#st{heart = HeartPid, squad = Squad})}; Ret -> Ret @@ -247,7 +252,7 @@ handle_call(Call, From, St = #st{squad = Squad}) -> invoke_callback(handle_call, [Call, From, get_rank(St), Squad], try_cancel_st_timer(user, St)). -type cast() :: - mg_core_gen_squad_heart:envelope() + gen_squad_heart:envelope() | heartbeat. -spec handle_cast(cast(), st()) -> noreply(st()). @@ -259,7 +264,7 @@ handle_cast(heartbeat, St) -> handle_cast(Cast, St = #st{squad = Squad}) -> invoke_callback(handle_cast, [Cast, get_rank(St), Squad], try_cancel_st_timer(user, St)). --spec handle_broadcast(mg_core_gen_squad_heart:payload(), st()) -> noreply(st()). +-spec handle_broadcast(gen_squad_heart:payload(), st()) -> noreply(st()). handle_broadcast( #{msg := howdy, from := Pid, members := Pids}, St = #st{squad = Squad0, opts = Opts} @@ -360,7 +365,7 @@ try_update_squad(Squad, St0 = #st{heart = HeartPid, opts = Opts}) -> ok = case has_squad_changed(Squad, St0) of {true, Members} -> - mg_core_gen_squad_heart:update_members(Members, HeartPid); + gen_squad_heart:update_members(Members, HeartPid); false -> ok end, @@ -486,13 +491,13 @@ account_heartbeat(Member) -> -type recepient_filter() :: fun((pid()) -> boolean()). --spec broadcast(mg_core_gen_squad_heart:message(), recepient_filter(), squad(), _Ctx, opts()) -> ok. +-spec broadcast(gen_squad_heart:message(), recepient_filter(), squad(), _Ctx, opts()) -> ok. broadcast(Message, RecepientFilter, Squad, Ctx, Opts) -> Self = self(), Members = members(maps:remove(Self, Squad)), Recepients = lists:filter(RecepientFilter, Members), Pulse = maps:get(pulse, Opts, undefined), - mg_core_gen_squad_heart:broadcast(Message, Self, Members, Recepients, Ctx, Pulse). + gen_squad_heart:broadcast(Message, Self, Members, Recepients, Ctx, Pulse). -spec newbies(squad()) -> recepient_filter(). newbies(Squad) -> @@ -581,10 +586,10 @@ cancel_monitor(MRef, Opts) -> %% --spec beat(mg_core_gen_squad_pulse:beat(), st() | opts()) -> _. +-spec beat(gen_squad_pulse:beat(), st() | opts()) -> _. beat(Beat, #st{opts = Opts}) -> beat(Beat, Opts); beat(Beat, #{pulse := Handler}) -> - mg_core_gen_squad_pulse:handle_beat(Handler, Beat); + gen_squad_pulse:handle_beat(Handler, Beat); beat(_Beat, _St) -> ok. diff --git a/apps/mg_core/src/mg_core_gen_squad_heart.erl b/apps/gen_squad/src/gen_squad_heart.erl similarity index 93% rename from apps/mg_core/src/mg_core_gen_squad_heart.erl rename to apps/gen_squad/src/gen_squad_heart.erl index bc3f5f85..43005bc2 100644 --- a/apps/mg_core/src/mg_core_gen_squad_heart.erl +++ b/apps/gen_squad/src/gen_squad_heart.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_heart). +-module(gen_squad_heart). -export([start_link/2]). -export([update_members/2]). @@ -43,9 +43,9 @@ -type envelope() :: {'$squad', payload()}. --type pulse() :: mg_core_gen_squad_pulse:handler(). +-type pulse() :: gen_squad_pulse:handler(). -type opts() :: #{ - heartbeat => mg_core_gen_squad:heartbeat_opts(), + heartbeat => gen_squad:heartbeat_opts(), pulse => pulse() }. @@ -165,12 +165,12 @@ monitor_self(St = #st{self = Self}) -> %% --spec beat(mg_core_gen_squad_pulse:beat(), st() | opts() | pulse() | undefined) -> _. +-spec beat(gen_squad_pulse:beat(), st() | opts() | pulse() | undefined) -> _. beat(Beat, #st{opts = Opts}) -> beat(Beat, Opts); beat(Beat, Opts = #{}) -> beat(Beat, maps:get(pulse, Opts, undefined)); beat(Beat, Handler) when Handler /= undefined -> - mg_core_gen_squad_pulse:handle_beat(Handler, Beat); + gen_squad_pulse:handle_beat(Handler, Beat); beat(_Beat, undefined) -> ok. diff --git a/apps/mg_core/src/mg_core_gen_squad_pulse.erl b/apps/gen_squad/src/gen_squad_pulse.erl similarity index 63% rename from apps/mg_core/src/mg_core_gen_squad_pulse.erl rename to apps/gen_squad/src/gen_squad_pulse.erl index 00566d87..736fe9d0 100644 --- a/apps/mg_core/src/mg_core_gen_squad_pulse.erl +++ b/apps/gen_squad/src/gen_squad_pulse.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,21 +14,21 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_pulse). +-module(gen_squad_pulse). -callback handle_beat(_Options, beat()) -> _. %% TODO remove weak circular deps -type beat() :: - {rank, {changed, mg_core_gen_squad:rank()}} + {rank, {changed, gen_squad:rank()}} | { {member, pid()}, added - | {refreshed, mg_core_gen_squad:member()} - | {removed, mg_core_gen_squad:member(), _Reason :: lost | {down, _}} + | {refreshed, gen_squad:member()} + | {removed, gen_squad:member(), _Reason :: lost | {down, _}} } | { - {broadcast, mg_core_gen_squad_heart:payload()}, + {broadcast, gen_squad_heart:payload()}, {sent, [pid()], _Ctx} | received } @@ -46,7 +46,10 @@ } | {unexpected, {{call, _From} | cast | info, _Payload}}. --type handler() :: mg_core_utils:mod_opts(). +-type mod_opts() :: mod_opts(term()). +-type mod_opts(Options) :: {module(), Options} | module(). + +-type handler() :: mod_opts(). -export_type([beat/0]). -export_type([handler/0]). @@ -57,5 +60,17 @@ -spec handle_beat(handler(), any()) -> _. handle_beat(Handler, Beat) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(Handler), + {Mod, Options} = separate_mod_opts(Handler), Mod:handle_beat(Options, Beat). + +%% + +-spec separate_mod_opts(mod_opts()) -> {module(), _Arg}. +separate_mod_opts(ModOpts) -> + separate_mod_opts(ModOpts, undefined). + +-spec separate_mod_opts(mod_opts(Defaults), Defaults) -> {module(), Defaults}. +separate_mod_opts(ModOpts = {_, _}, _) -> + ModOpts; +separate_mod_opts(Mod, Default) -> + {Mod, Default}. diff --git a/apps/mg_core/test/mg_core_gen_squad_SUITE.erl b/apps/gen_squad/test/gen_squad_SUITE.erl similarity index 87% rename from apps/mg_core/test/mg_core_gen_squad_SUITE.erl rename to apps/gen_squad/test/gen_squad_SUITE.erl index 07e05712..f0b90150 100644 --- a/apps/mg_core/test/mg_core_gen_squad_SUITE.erl +++ b/apps/gen_squad/test/gen_squad_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_SUITE). --include_lib("common_test/include/ct.hrl"). +-module(gen_squad_SUITE). + -include_lib("stdlib/include/assert.hrl"). --include_lib("mg_cth/include/mg_cth.hrl"). +-include("gen_squad_cth.hrl"). %% tests descriptions -export([all/0]). @@ -29,7 +29,7 @@ -export([squad_shrinks_consistently/1]). %% squad behaviour --behaviour(mg_core_gen_squad). +-behaviour(gen_squad). -export([init/1]). -export([discover/1]). -export([handle_rank_change/3]). @@ -37,7 +37,7 @@ -export([handle_cast/4]). -export([handle_info/4]). --behaviour(mg_core_gen_squad_pulse). +-behaviour(gen_squad_pulse). -export([handle_beat/2]). %% tests descriptions @@ -56,12 +56,11 @@ all() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> - Apps = mg_cth:start_applications([mg_core]), - [{apps, Apps} | C]. + C. -spec end_per_suite(config()) -> ok. -end_per_suite(C) -> - mg_cth:stop_applications(?config(apps, C)). +end_per_suite(_C) -> + ok. %% @@ -122,9 +121,9 @@ squad_shrinks_consistently(_) -> _ = ?assertEqual([LeaderLast], lists:filter(fun erlang:is_process_alive/1, Members)), ok. --spec start_member(mg_core_gen_squad:opts()) -> pid(). +-spec start_member(gen_squad:opts()) -> pid(). start_member(Opts) -> - {ok, Pid} = mg_core_gen_squad:start_link(?MODULE, #{runner => self(), known => []}, Opts), + {ok, Pid} = gen_squad:start_link(?MODULE, #{runner => self(), known => []}, Opts), Pid. -spec neighbours([T]) -> [{T, T}]. @@ -134,8 +133,8 @@ neighbours([]) -> []. %% --type rank() :: mg_core_gen_squad:rank(). --type squad() :: mg_core_gen_squad:squad(). +-type rank() :: gen_squad:rank(). +-type squad() :: gen_squad:squad(). -type st() :: #{ runner := pid(), @@ -152,7 +151,7 @@ discover(St = #{known := Known}) -> -spec handle_rank_change(rank(), squad(), st()) -> {noreply, st()}. handle_rank_change(Rank, Squad, St = #{runner := Runner}) -> - _ = Runner ! {self(), Rank, mg_core_gen_squad:members(Squad)}, + _ = Runner ! {self(), Rank, gen_squad:members(Squad)}, case Rank of leader -> {noreply, St, 200}; follower -> {noreply, St} @@ -164,7 +163,7 @@ handle_rank_change(_Rank, _Squad, St) -> -spec handle_call(call(), _From, rank(), squad(), st()) -> {noreply, st()} | {reply, _, st()}. handle_call(report, _From, Rank, Squad, St) -> - {reply, {self(), Rank, mg_core_gen_squad:members(Squad)}, St}; + {reply, {self(), Rank, gen_squad:members(Squad)}, St}; handle_call(Call, From, _Rank, _Squad, _St) -> erlang:error({unexpected, {call, Call, From}}). @@ -184,7 +183,7 @@ handle_info(timeout, leader, _Squad, St) -> handle_info(Info, _Rank, _Squad, _St) -> erlang:error({unexpected, {info, Info}}). --spec handle_beat(_, mg_core_gen_squad_pulse:beat()) -> _. +-spec handle_beat(_, gen_squad_pulse:beat()) -> _. handle_beat(_Start, {{timer, _}, _}) -> ok; handle_beat(_Start, {{monitor, _}, _}) -> diff --git a/apps/machinegun/src/mg_pulse_log.erl b/apps/machinegun/src/mg_pulse_log.erl index c4118eb6..192684c6 100644 --- a/apps/machinegun/src/mg_pulse_log.erl +++ b/apps/machinegun/src/mg_pulse_log.erl @@ -122,7 +122,7 @@ format_beat(_Beat, _Options) -> undefined. %% squad --spec format_squad_beat(mg_core_gen_squad_pulse:beat()) -> log_msg() | undefined. +-spec format_squad_beat(gen_squad_pulse:beat()) -> log_msg() | undefined. format_squad_beat({rank, {changed, Rank}}) -> {info, {"rank changed to: ~p", [Rank]}, [ {mg_pulse_event_id, squad_rank_changed}, diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index 55db7235..f434c1aa 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -14,7 +14,7 @@ %%% limitations under the License. %%% -{application, mg_core , [ +{application, mg_core, [ {description, "Machinegun FSM processor"}, {vsn, "1"}, {registered, []}, @@ -26,6 +26,7 @@ brod, msgpack, snowflake, + gen_squad, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_core/src/mg_core_queue_scanner.erl b/apps/mg_core/src/mg_core_queue_scanner.erl index 6028d2df..96297f3a 100644 --- a/apps/mg_core/src/mg_core_queue_scanner.erl +++ b/apps/mg_core/src/mg_core_queue_scanner.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ max_scan_limit => scan_limit() | unlimited, scan_ahead => scan_ahead(), retry_scan_delay => scan_delay(), - squad_opts => mg_core_gen_squad:opts(), + squad_opts => gen_squad:opts(), pulse => mg_core_pulse:handler() }. @@ -49,7 +49,7 @@ -export_type([scan_limit/0]). -export_type([scan_ahead/0]). --type beat() :: {squad, {atom(), mg_core_gen_squad_pulse:beat(), _ExtraMeta}}. +-type beat() :: {squad, {atom(), gen_squad_pulse:beat(), _ExtraMeta}}. -export_type([beat/0]). %% @@ -84,7 +84,7 @@ -export([start_link/2]). -export([where_is/1]). --behaviour(mg_core_gen_squad). +-behaviour(gen_squad). -export([init/1]). -export([discover/1]). -export([handle_rank_change/3]). @@ -92,7 +92,7 @@ -export([handle_cast/4]). -export([handle_info/4]). --behaviour(mg_core_gen_squad_pulse). +-behaviour(gen_squad_pulse). -export([handle_beat/2]). %% @@ -130,7 +130,7 @@ start_link(SchedulerID, Options) -> maps:with([pulse], Options) ) ), - mg_core_gen_squad:start_link( + gen_squad:start_link( self_reg_name(SchedulerID), ?MODULE, {SchedulerID, Options}, @@ -157,8 +157,8 @@ where_is(SchedulerID) -> -type st() :: #st{}. --type rank() :: mg_core_gen_squad:rank(). --type squad() :: mg_core_gen_squad:squad(). +-type rank() :: gen_squad:rank(). +-type squad() :: gen_squad:squad(). -spec init({scheduler_id(), options()}) -> {ok, st()}. init({SchedulerID, Options}) -> @@ -191,7 +191,7 @@ handle_rank_change(follower, _Squad, St) -> -spec handle_cast(_Cast, rank(), squad(), st()) -> {noreply, st()}. handle_cast(Cast, Rank, _Squad, St) -> ok = logger:error( - "unexpected mg_core_gen_squad cast received: ~p, from ~p, rank ~p, state ~p", + "unexpected gen_squad cast received: ~p, from ~p, rank ~p, state ~p", [Cast, Rank, St] ), {noreply, St}. @@ -199,7 +199,7 @@ handle_cast(Cast, Rank, _Squad, St) -> -spec handle_call(_Call, mg_core_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. handle_call(Call, From, Rank, _Squad, St) -> ok = logger:error( - "unexpected mg_core_gen_squad call received: ~p, from ~p, rank ~p, state ~p", + "unexpected gen_squad call received: ~p, from ~p, rank ~p, state ~p", [Call, From, Rank, St] ), {noreply, St}. @@ -213,12 +213,12 @@ handle_info(scan, follower, _Squad, St) -> {noreply, St}; handle_info(Info, Rank, _Squad, St) -> ok = logger:warning( - "unexpected mg_core_gen_squad info received: ~p, rank ~p, state ~p", + "unexpected gen_squad info received: ~p, rank ~p, state ~p", [Info, Rank, St] ), {noreply, St}. --spec handle_scan(mg_core_gen_squad:squad(), st()) -> st(). +-spec handle_scan(gen_squad:squad(), st()) -> st(). handle_scan(Squad, St0 = #st{max_limit = MaxLimit, retry_delay = RetryDelay}) -> StartedAt = erlang:monotonic_time(), %% Try to find out which schedulers are here, getting their statuses @@ -269,10 +269,10 @@ disseminate_tasks(Tasks, Schedulers, Capacities, _St) -> Partitions ). --spec inquire_schedulers(mg_core_gen_squad:squad(), st()) -> [mg_core_scheduler:status()]. +-spec inquire_schedulers(gen_squad:squad(), st()) -> [mg_core_scheduler:status()]. inquire_schedulers(Squad, #st{scheduler_id = SchedulerID}) -> %% Take all known members, there's at least one which is `self()` - Members = mg_core_gen_squad:members(Squad), + Members = gen_squad:members(Squad), Nodes = lists:map(fun erlang:node/1, Members), multicall(Nodes, mg_core_scheduler, inquire, [SchedulerID], ?INQUIRY_TIMEOUT). @@ -371,7 +371,7 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ %% --spec handle_beat({mg_core_pulse:handler(), scheduler_id()}, mg_core_gen_squad_pulse:beat()) -> _. +-spec handle_beat({mg_core_pulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. handle_beat({Handler, {Name, NS}}, Beat) -> Producer = queue_scanner, Extra = [{scheduler_type, Name}, {namespace, NS}], diff --git a/apps/mg_core/src/mg_core_scheduler_sup.erl b/apps/mg_core/src/mg_core_scheduler_sup.erl index 11fc809f..d15081ae 100644 --- a/apps/mg_core/src/mg_core_scheduler_sup.erl +++ b/apps/mg_core/src/mg_core_scheduler_sup.erl @@ -29,7 +29,7 @@ max_scan_limit => mg_core_queue_scanner:scan_limit() | unlimited, scan_ahead => mg_core_queue_scanner:scan_ahead(), retry_scan_delay => mg_core_queue_scanner:scan_delay(), - squad_opts => mg_core_gen_squad:opts(), + squad_opts => gen_squad:opts(), % workers task_handler := mg_core_utils:mod_opts(), % common diff --git a/apps/mg_es_kafka/src/mg_es_kafka.app.src b/apps/mg_es_kafka/src/mg_es_kafka.app.src index 9168b3fc..9f2e6f46 100644 --- a/apps/mg_es_kafka/src/mg_es_kafka.app.src +++ b/apps/mg_es_kafka/src/mg_es_kafka.app.src @@ -1,4 +1,4 @@ -{application, mg_es_kafka , [ +{application, mg_es_kafka, [ {description, "Event sink kafka implementation"}, {vsn, "1"}, {registered, []}, diff --git a/apps/mg_es_machine/src/mg_es_machine.app.src b/apps/mg_es_machine/src/mg_es_machine.app.src index bdafc3da..f6d2383d 100644 --- a/apps/mg_es_machine/src/mg_es_machine.app.src +++ b/apps/mg_es_machine/src/mg_es_machine.app.src @@ -1,4 +1,4 @@ -{application, mg_es_machine , [ +{application, mg_es_machine, [ {description, "Event sink machine implementation"}, {vsn, "1"}, {registered, []}, diff --git a/apps/mg_riak/src/mg_riak.app.src b/apps/mg_riak/src/mg_riak.app.src index 9fab2887..da0470ac 100644 --- a/apps/mg_riak/src/mg_riak.app.src +++ b/apps/mg_riak/src/mg_riak.app.src @@ -14,7 +14,7 @@ %%% limitations under the License. %%% -{application, mg_riak , [ +{application, mg_riak, [ {description, "Machinegun Riak Storage"}, {vsn, "1"}, {registered, []}, diff --git a/elvis.config b/elvis.config index 91ccff3a..b076488b 100644 --- a/elvis.config +++ b/elvis.config @@ -39,8 +39,8 @@ }}, {elvis_style, state_record_and_type, #{ ignore => [ - mg_core_gen_squad, - mg_core_gen_squad_heart, + gen_squad, + gen_squad_heart, mg_core_storage_memory, mg_core_union, mg_core_worker From c90f79fd87422987d853e74ecc836690419e7f78 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Sat, 27 Apr 2024 18:57:50 +0300 Subject: [PATCH 08/31] (WIP) Extracts scheduler into separate application in umbrella Requires fix for procreg components. --- apps/machinegun/src/mg_pulse.erl | 2 +- apps/machinegun/src/mg_pulse_log.erl | 13 +- apps/machinegun/src/mg_pulse_prometheus.erl | 19 +-- .../test/mg_prometheus_metric_SUITE.erl | 15 ++- apps/mg_conf/src/mg_conf.erl | 6 +- apps/mg_core/include/pulse.hrl | 64 --------- apps/mg_core/src/mg_core.app.src | 2 +- apps/mg_core/src/mg_core_machine.erl | 31 +++-- apps/mg_core/src/mg_core_pulse.erl | 16 +-- apps/mg_core/src/mg_core_pulse_otel.erl | 4 +- .../mg_core/src/mg_core_queue_interrupted.erl | 10 +- .../src/mg_core_queue_notifications.erl | 20 +-- apps/mg_core/src/mg_core_queue_timer.erl | 14 +- apps/mg_core/src/mg_core_storage.erl | 2 +- apps/mg_core/src/mg_core_utils.erl | 71 ++-------- apps/mg_cth/src/mg_cth_configurator.erl | 2 +- apps/mg_scheduler/include/pulse.hrl | 63 +++++++++ apps/mg_scheduler/rebar.config | 4 + apps/mg_scheduler/src/mg_scheduler.app.src | 18 +++ .../src/mg_skd.erl} | 62 ++++----- apps/mg_scheduler/src/mg_skd_pulse.erl | 25 ++++ .../src/mg_skd_quota.erl} | 4 +- .../src/mg_skd_quota_manager.erl} | 12 +- .../src/mg_skd_quota_worker.erl} | 42 +++--- .../src/mg_skd_scanner.erl} | 60 ++++----- .../src/mg_skd_sup.erl} | 32 ++--- .../src/mg_skd_task.erl} | 6 +- apps/mg_scheduler/src/mg_skd_utils.erl | 126 ++++++++++++++++++ .../src/mg_skd_worker.erl} | 40 +++--- .../test/mg_skd_quota_SUITE.erl} | 50 +++---- apps/mg_woody/src/mg_woody_pulse_otel.erl | 2 +- rel_scripts/configurator.escript | 2 +- 32 files changed, 481 insertions(+), 358 deletions(-) create mode 100644 apps/mg_scheduler/include/pulse.hrl create mode 100644 apps/mg_scheduler/rebar.config create mode 100644 apps/mg_scheduler/src/mg_scheduler.app.src rename apps/{mg_core/src/mg_core_scheduler.erl => mg_scheduler/src/mg_skd.erl} (87%) create mode 100644 apps/mg_scheduler/src/mg_skd_pulse.erl rename apps/{mg_core/src/mg_core_quota.erl => mg_scheduler/src/mg_skd_quota.erl} (99%) rename apps/{mg_core/src/mg_core_quota_manager.erl => mg_scheduler/src/mg_skd_quota_manager.erl} (80%) rename apps/{mg_core/src/mg_core_quota_worker.erl => mg_scheduler/src/mg_skd_quota_worker.erl} (82%) rename apps/{mg_core/src/mg_core_queue_scanner.erl => mg_scheduler/src/mg_skd_scanner.erl} (84%) rename apps/{mg_core/src/mg_core_scheduler_sup.erl => mg_scheduler/src/mg_skd_sup.erl} (64%) rename apps/{mg_core/src/mg_core_queue_task.erl => mg_scheduler/src/mg_skd_task.erl} (92%) create mode 100644 apps/mg_scheduler/src/mg_skd_utils.erl rename apps/{mg_core/src/mg_core_scheduler_worker.erl => mg_scheduler/src/mg_skd_worker.erl} (78%) rename apps/{mg_core/test/mg_core_quota_SUITE.erl => mg_scheduler/test/mg_skd_quota_SUITE.erl} (91%) diff --git a/apps/machinegun/src/mg_pulse.erl b/apps/machinegun/src/mg_pulse.erl index 9767bf94..561e0b5f 100644 --- a/apps/machinegun/src/mg_pulse.erl +++ b/apps/machinegun/src/mg_pulse.erl @@ -28,7 +28,7 @@ -type beat() :: mg_core_pulse:beat() | mg_riak_pulse:beat() - | mg_core_queue_scanner:beat() + | mg_skd_scanner:beat() | #woody_event{} | #woody_request_handle_error{} | #mg_event_sink_kafka_sent{}. diff --git a/apps/machinegun/src/mg_pulse_log.erl b/apps/machinegun/src/mg_pulse_log.erl index 192684c6..b9e1a366 100644 --- a/apps/machinegun/src/mg_pulse_log.erl +++ b/apps/machinegun/src/mg_pulse_log.erl @@ -16,6 +16,7 @@ -module(mg_pulse_log). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). -include_lib("mg_woody/include/pulse.hrl"). @@ -70,14 +71,14 @@ format_beat(#woody_event{event = Event, rpc_id = RPCID, event_meta = EventMeta}, WoodyMeta = woody_event_handler:format_meta(Event, EventMeta, WoodyMetaFields), Meta = lists:flatten([extract_woody_meta(WoodyMeta), extract_meta(rpc_id, RPCID)]), {Level, Msg, Meta}; -format_beat(#mg_core_scheduler_task_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_task_error, Beat), +format_beat(#mg_skd_task_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_task_error, Beat), {warning, {"scheduler task ~p failed ~p", [Name, Reason]}, Context}; -format_beat(#mg_core_scheduler_task_add_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_task_add_error, Beat), +format_beat(#mg_skd_task_add_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_task_add_error, Beat), {warning, {"scheduler task ~p add failed ~p", [Name, Reason]}, Context}; -format_beat(#mg_core_scheduler_search_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_search_error, Beat), +format_beat(#mg_skd_search_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_search_error, Beat), {warning, {"scheduler search ~p failed ~p", [Name, Reason]}, Context}; format_beat(#mg_core_machine_process_transient_error{exception = {_, Reason, _}} = Beat, _Options) -> Context = ?BEAT_TO_META(mg_core_machine_process_transient_error, Beat), diff --git a/apps/machinegun/src/mg_pulse_prometheus.erl b/apps/machinegun/src/mg_pulse_prometheus.erl index 4ddcdffe..b090a996 100644 --- a/apps/machinegun/src/mg_pulse_prometheus.erl +++ b/apps/machinegun/src/mg_pulse_prometheus.erl @@ -16,6 +16,7 @@ -module(mg_pulse_prometheus). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). -export([setup/0]). @@ -250,7 +251,7 @@ dispatch_metrics(#mg_core_timer_process_finished{namespace = NS, queue = Queue, ok = inc(mg_timer_processing_changes_total, [NS, Queue, finished]), ok = observe(mg_timer_processing_duration_seconds, [NS, Queue], Duration); % Scheduler -dispatch_metrics(#mg_core_scheduler_search_success{ +dispatch_metrics(#mg_skd_search_success{ scheduler_name = Name, namespace = NS, delay = DelayMS, @@ -259,25 +260,25 @@ dispatch_metrics(#mg_core_scheduler_search_success{ ok = inc(mg_scheduler_scan_changes_total, [NS, Name, success]), ok = observe(mg_scheduler_scan_delay_seconds, [NS, Name], decode_delay(DelayMS)), ok = observe(mg_scheduler_scan_duration_seconds, [NS, Name], Duration); -dispatch_metrics(#mg_core_scheduler_search_error{scheduler_name = Name, namespace = NS}) -> +dispatch_metrics(#mg_skd_search_error{scheduler_name = Name, namespace = NS}) -> ok = inc(mg_scheduler_scan_changes_total, [NS, Name, error]); -dispatch_metrics(#mg_core_scheduler_task_error{scheduler_name = Name, namespace = NS}) -> +dispatch_metrics(#mg_skd_task_error{scheduler_name = Name, namespace = NS}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, error]); -dispatch_metrics(#mg_core_scheduler_new_tasks{scheduler_name = Name, namespace = NS, new_tasks_count = Count}) -> +dispatch_metrics(#mg_skd_new_tasks{scheduler_name = Name, namespace = NS, new_tasks_count = Count}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, created], Count); -dispatch_metrics(#mg_core_scheduler_task_started{scheduler_name = Name, namespace = NS, task_delay = DelayMS}) -> +dispatch_metrics(#mg_skd_task_started{scheduler_name = Name, namespace = NS, task_delay = DelayMS}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, started]), ok = observe(mg_scheduler_task_processing_delay_seconds, [NS, Name], decode_delay(DelayMS)); -dispatch_metrics(#mg_core_scheduler_task_finished{} = Beat) -> - #mg_core_scheduler_task_finished{ +dispatch_metrics(#mg_skd_task_finished{} = Beat) -> + #mg_skd_task_finished{ scheduler_name = Name, namespace = NS, process_duration = Duration } = Beat, ok = inc(mg_scheduler_task_changes_total, [NS, Name, finished]), ok = observe(mg_scheduler_task_processing_duration_seconds, [NS, Name], Duration); -dispatch_metrics(#mg_core_scheduler_quota_reserved{} = Beat) -> - #mg_core_scheduler_quota_reserved{ +dispatch_metrics(#mg_skd_quota_reserved{} = Beat) -> + #mg_skd_quota_reserved{ scheduler_name = Name, namespace = NS, active_tasks = Active, diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 6189df66..358bf9cd 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -18,6 +18,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). -include_lib("mg_es_kafka/include/pulse.hrl"). @@ -326,7 +327,7 @@ scheduler_search_success_test(_C) -> Buckets = test_millisecond_buckets(), _ = maps:fold( fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_scheduler_search_success{ + ok = test_beat(#mg_skd_search_success{ namespace = ?NS, scheduler_name = name, delay = 0, @@ -347,7 +348,7 @@ scheduler_search_success_test(_C) -> -spec scheduler_search_error_test(config()) -> _. scheduler_search_error_test(_C) -> - ok = test_beat(#mg_core_scheduler_search_error{ + ok = test_beat(#mg_skd_search_error{ namespace = ?NS, scheduler_name = name, exception = {throw, thrown, []} @@ -355,7 +356,7 @@ scheduler_search_error_test(_C) -> -spec scheduler_task_error_test(config()) -> _. scheduler_task_error_test(_C) -> - ok = test_beat(#mg_core_scheduler_task_error{ + ok = test_beat(#mg_skd_task_error{ namespace = ?NS, machine_id = <<"ID">>, scheduler_name = name, @@ -364,7 +365,7 @@ scheduler_task_error_test(_C) -> -spec scheduler_new_tasks_test(config()) -> _. scheduler_new_tasks_test(_C) -> - ok = test_beat(#mg_core_scheduler_new_tasks{ + ok = test_beat(#mg_skd_new_tasks{ namespace = ?NS, scheduler_name = name, new_tasks_count = 0 @@ -372,7 +373,7 @@ scheduler_new_tasks_test(_C) -> -spec scheduler_task_started_test(config()) -> _. scheduler_task_started_test(_C) -> - ok = test_beat(#mg_core_scheduler_task_started{ + ok = test_beat(#mg_skd_task_started{ namespace = ?NS, scheduler_name = name, machine_id = <<"ID">>, @@ -384,7 +385,7 @@ scheduler_task_finished_test(_C) -> Buckets = test_millisecond_buckets(), _ = maps:fold( fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_scheduler_task_finished{ + ok = test_beat(#mg_skd_task_finished{ namespace = ?NS, scheduler_name = name, machine_id = <<"ID">>, @@ -404,7 +405,7 @@ scheduler_task_finished_test(_C) -> -spec scheduler_quota_reserved_test(config()) -> _. scheduler_quota_reserved_test(_C) -> - ok = test_beat(#mg_core_scheduler_quota_reserved{ + ok = test_beat(#mg_skd_quota_reserved{ namespace = ?NS, scheduler_name = name, active_tasks = 0, diff --git a/apps/mg_conf/src/mg_conf.erl b/apps/mg_conf/src/mg_conf.erl index e2c5829e..d64c149a 100644 --- a/apps/mg_conf/src/mg_conf.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -33,7 +33,7 @@ woody_server := mg_woody:woody_server(), event_sink_ns := event_sink_ns(), namespaces := namespaces(), - quotas => [mg_core_quota_worker:options()], + quotas => [mg_skd_quota_worker:options()], pulse := pulse(), health_check => erl_health:check() }. @@ -85,10 +85,10 @@ construct_child_specs( %% --spec quotas_child_specs([mg_core_quota_worker:options()], atom()) -> [supervisor:child_spec()]. +-spec quotas_child_specs([mg_skd_quota_worker:options()], atom()) -> [supervisor:child_spec()]. quotas_child_specs(Quotas, ChildID) -> [ - mg_core_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) + mg_skd_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) || Options <- Quotas ]. diff --git a/apps/mg_core/include/pulse.hrl b/apps/mg_core/include/pulse.hrl index a5a3b99f..3bc8dffc 100644 --- a/apps/mg_core/include/pulse.hrl +++ b/apps/mg_core/include/pulse.hrl @@ -52,70 +52,6 @@ duration :: non_neg_integer() }). -%% Scheduler - --record(mg_core_scheduler_search_success, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - delay :: mg_core_queue_scanner:scan_delay(), - tasks :: [mg_core_queue_task:task()], - limit :: mg_core_queue_scanner:scan_limit(), - % in native units - duration :: non_neg_integer() -}). - --record(mg_core_scheduler_search_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception() -}). - --record(mg_core_scheduler_task_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception(), - machine_id :: mg_core:id() | undefined -}). - --record(mg_core_scheduler_task_add_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception(), - machine_id :: mg_core:id(), - request_context :: mg_core:request_context() -}). - --record(mg_core_scheduler_new_tasks, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - new_tasks_count :: non_neg_integer() -}). - --record(mg_core_scheduler_task_started, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - machine_id :: mg_core:id() | undefined, - task_delay :: timeout() -}). - --record(mg_core_scheduler_task_finished, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - machine_id :: mg_core:id() | undefined, - task_delay :: timeout(), - % in native units - process_duration :: non_neg_integer() -}). - --record(mg_core_scheduler_quota_reserved, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - active_tasks :: non_neg_integer(), - waiting_tasks :: non_neg_integer(), - quota_name :: mg_core_quota_worker:name(), - quota_reserved :: mg_core_quota:resource() -}). - %% Machine -record(mg_core_machine_process_transient_error, { diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index f434c1aa..610db3dc 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -26,7 +26,7 @@ brod, msgpack, snowflake, - gen_squad, + mg_scheduler, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_core/src/mg_core_machine.erl b/apps/mg_core/src/mg_core_machine.erl index 3d8e2fc0..e50d5529 100644 --- a/apps/mg_core/src/mg_core_machine.erl +++ b/apps/mg_core/src/mg_core_machine.erl @@ -130,19 +130,19 @@ % how much tasks in total scheduler is ready to enqueue for processing capacity => non_neg_integer(), % wait at least this delay before subsequent scanning of persistent store for queued tasks - min_scan_delay => mg_core_queue_scanner:scan_delay(), + min_scan_delay => mg_skd_scanner:scan_delay(), % wait at most this delay before subsequent scanning attempts when queue appears to be empty - rescan_delay => mg_core_queue_scanner:scan_delay(), + rescan_delay => mg_skd_scanner:scan_delay(), % how many tasks to fetch at most - max_scan_limit => mg_core_queue_scanner:scan_limit(), + max_scan_limit => mg_skd_scanner:scan_limit(), % by how much to adjust limit to account for possibly duplicated tasks - scan_ahead => mg_core_queue_scanner:scan_ahead(), + scan_ahead => mg_skd_scanner:scan_ahead(), % how many seconds in future a task can be for it to be sent to the local scheduler target_cutoff => seconds(), % name of quota limiting number of active tasks - task_quota => mg_core_quota_worker:name(), + task_quota => mg_skd_quota_worker:name(), % share of quota limit - task_share => mg_core_quota:share(), + task_share => mg_skd_quota:share(), % notifications: upper bound for scan ([_; TSNow - scan_handicap]) scan_handicap => seconds(), % notifications: lower bound for scan ([TSNow - scan_handicap - scan_cutoff; _]) @@ -483,7 +483,7 @@ call_(Options, ID, Call, ReqCtx, Deadline) -> }. -type scheduler_ref() :: - {mg_core_scheduler:id(), _TargetCutoff :: seconds()}. + {mg_skd:id(), _TargetCutoff :: seconds()}. -spec handle_load(mg_core:id(), options(), request_context()) -> {ok, state()}. handle_load(ID, Options, ReqCtx) -> @@ -890,7 +890,7 @@ opaque_to_notification_args([1, Args, RequestContext]) -> TargetTime :: genlib_time:ts(). send_notification_task(Options, NotificationID, Args, MachineID, Context, TargetTime) -> Task = mg_core_queue_notifications:build_task(NotificationID, MachineID, TargetTime, Context, Args), - mg_core_scheduler:send_task(scheduler_id(notification, Options), Task). + mg_skd:send_task(scheduler_id(notification, Options), Task). -spec process_with_retry(Impact, ProcessingCtx, ReqCtx, Deadline, State, Retry) -> State when Impact :: processor_impact(), @@ -1237,16 +1237,16 @@ get_scheduler_ref(SchedulerType, Options) -> undefined end. --spec try_send_timer_task(scheduler_type(), mg_core_queue_task:target_time(), state()) -> ok. +-spec try_send_timer_task(scheduler_type(), mg_skd_task:target_time(), state()) -> ok. try_send_timer_task(SchedulerType, TargetTime, #{id := ID, schedulers := Schedulers}) -> case maps:get(SchedulerType, Schedulers, undefined) of {SchedulerID, Cutoff} when is_integer(Cutoff) -> % Ok let's send if it's not too far in the future. - CurrentTime = mg_core_queue_task:current_time(), + CurrentTime = mg_skd_task:current_time(), case TargetTime =< CurrentTime + Cutoff of true -> Task = mg_core_queue_timer:build_task(ID, TargetTime), - mg_core_scheduler:send_task(SchedulerID, Task); + mg_skd:send_task(SchedulerID, Task); false -> ok end; @@ -1432,15 +1432,15 @@ scheduler_child_spec(SchedulerType, Options) -> Config -> SchedulerID = scheduler_id(SchedulerType, Options), SchedulerOptions = scheduler_options(SchedulerType, Options, Config), - mg_core_scheduler_sup:child_spec(SchedulerID, SchedulerOptions, SchedulerType) + mg_skd_sup:child_spec(SchedulerID, SchedulerOptions, SchedulerType) end. --spec scheduler_id(scheduler_type(), options()) -> mg_core_scheduler:id() | undefined. +-spec scheduler_id(scheduler_type(), options()) -> mg_skd:id() | undefined. scheduler_id(SchedulerType, #{namespace := NS}) -> {SchedulerType, NS}. -spec scheduler_options(scheduler_type(), options(), scheduler_opt()) -> - mg_core_scheduler_sup:options(). + mg_skd_sup:options(). scheduler_options(SchedulerType, Options, Config) when SchedulerType == timers; SchedulerType == timers_retries @@ -1476,8 +1476,7 @@ scheduler_options(notification = SchedulerType, Options, Config) -> }, scheduler_options(mg_core_queue_notifications, Options, HandlerOptions, Config). --spec scheduler_options(module(), options(), map(), scheduler_opt()) -> - mg_core_scheduler_sup:options(). +-spec scheduler_options(module(), options(), map(), scheduler_opt()) -> mg_skd_sup:options(). scheduler_options(HandlerMod, Options, HandlerOptions, Config) -> #{ pulse := Pulse diff --git a/apps/mg_core/src/mg_core_pulse.erl b/apps/mg_core/src/mg_core_pulse.erl index c77b1acd..d41e8e12 100644 --- a/apps/mg_core/src/mg_core_pulse.erl +++ b/apps/mg_core/src/mg_core_pulse.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2018 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -17,6 +17,8 @@ -include_lib("mg_core/include/pulse.hrl"). +-behaviour(mg_skd_pulse). + %% API -export_type([beat/0]). -export_type([handler/0]). @@ -28,20 +30,12 @@ %% API %% -type beat() :: + mg_skd_pulse:beat() % Timer - #mg_core_timer_lifecycle_created{} + | #mg_core_timer_lifecycle_created{} | #mg_core_timer_lifecycle_rescheduled{} | #mg_core_timer_lifecycle_rescheduling_error{} | #mg_core_timer_lifecycle_removed{} - % Scheduler handling - | #mg_core_scheduler_task_add_error{} - | #mg_core_scheduler_search_success{} - | #mg_core_scheduler_search_error{} - | #mg_core_scheduler_task_error{} - | #mg_core_scheduler_new_tasks{} - | #mg_core_scheduler_task_started{} - | #mg_core_scheduler_task_finished{} - | #mg_core_scheduler_quota_reserved{} % Timer handling | #mg_core_timer_process_started{} | #mg_core_timer_process_finished{} diff --git a/apps/mg_core/src/mg_core_pulse_otel.erl b/apps/mg_core/src/mg_core_pulse_otel.erl index 297b49ba..4d40298d 100644 --- a/apps/mg_core/src/mg_core_pulse_otel.erl +++ b/apps/mg_core/src/mg_core_pulse_otel.erl @@ -13,7 +13,7 @@ -type beat() :: mg_core_pulse:beat() - | mg_core_queue_scanner:beat(). + | mg_skd_scanner:beat(). -export_type([options/0]). @@ -47,7 +47,7 @@ handle_beat( handle_beat(_Options, #mg_core_timer_lifecycle_removed{machine_id = ID, namespace = NS}) -> mg_core_otel:add_event(<<"timer removed">>, machine_tags(NS, ID)); %% Scheduler handling -%% TODO Handle and trace events for 'mg_core_scheduler_*' beats +%% TODO Handle and trace events for 'mg_skd_*' beats %% Timer handling %% Wraps `Module:process_machine/7` when processor impact is 'timeout'. handle_beat(_Options, #mg_core_timer_process_started{machine_id = ID, namespace = NS, queue = Queue}) -> diff --git a/apps/mg_core/src/mg_core_queue_interrupted.erl b/apps/mg_core/src/mg_core_queue_interrupted.erl index d02e9e14..6bd32388 100644 --- a/apps/mg_core/src/mg_core_queue_interrupted.erl +++ b/apps/mg_core/src/mg_core_queue_interrupted.erl @@ -16,11 +16,11 @@ -module(mg_core_queue_interrupted). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types @@ -45,8 +45,8 @@ -type task_id() :: mg_core:id(). -type task_payload() :: #{}. --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). % 1 minute -define(DEFAULT_PROCESSING_TIMEOUT, 60000). @@ -62,7 +62,7 @@ init(_Options) -> -spec search_tasks(options(), _Limit :: non_neg_integer(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options, Limit, #state{continuation = Continuation} = State) -> - CurrentTime = mg_core_queue_task:current_time(), + CurrentTime = mg_skd_task:current_time(), MachineOptions = machine_options(Options), Query = processing, {IDs, NewContinuation} = mg_core_machine:search(MachineOptions, Query, Limit, Continuation), diff --git a/apps/mg_core/src/mg_core_queue_notifications.erl b/apps/mg_core/src/mg_core_queue_notifications.erl index 2359e96d..5e55f4d9 100644 --- a/apps/mg_core/src/mg_core_queue_notifications.erl +++ b/apps/mg_core/src/mg_core_queue_notifications.erl @@ -22,11 +22,11 @@ -export([build_task/5]). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types @@ -34,7 +34,7 @@ -type seconds() :: non_neg_integer(). -type milliseconds() :: non_neg_integer(). -type options() :: #{ - scheduler_id := mg_core_scheduler:id(), + scheduler_id := mg_skd:id(), pulse := mg_core_pulse:handler(), machine := mg_core_machine:options(), notification := mg_core_notification:options(), @@ -63,10 +63,10 @@ args := mg_core_storage:opaque(), context := mg_core_notification:context() }. --type target_time() :: mg_core_queue_task:target_time(). --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). --type scan_limit() :: mg_core_queue_scanner:scan_limit(). +-type target_time() :: mg_skd_task:target_time(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). +-type scan_limit() :: mg_skd_scanner:scan_limit(). -type fail_action() :: delete | ignore | {reschedule, target_time()}. @@ -107,7 +107,7 @@ init(_Options) -> -spec search_tasks(options(), scan_limit(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options, Limit, State = #state{}) -> - CurrentTs = mg_core_queue_task:current_time(), + CurrentTs = mg_skd_task:current_time(), ScanCutoff = maps:get(scan_cutoff, Options, ?DEFAULT_SCAN_CUTOFF), ScanHandicap = get_handicap_seconds(State), TFrom = CurrentTs - ScanHandicap - ScanCutoff, @@ -149,7 +149,7 @@ execute_task(Options, #{id := NotificationID, machine_id := MachineID, payload : delete -> ok = mg_core_notification:delete(notification_options(Options), NotificationID, Context); {reschedule, NewTargetTime} -> - ok = mg_core_scheduler:send_task(SchedulerID, Task#{target_time => NewTargetTime}); + ok = mg_skd:send_task(SchedulerID, Task#{target_time => NewTargetTime}); ignore -> erlang:raise(throw, Reason, Stacktrace) end @@ -203,7 +203,7 @@ task_fail_action(_Options, _) -> -spec get_reschedule_time(options()) -> target_time(). get_reschedule_time(Options) -> Reschedule = maps:get(reschedule_time, Options, ?DEFAULT_RESCHEDULE_SECONDS), - mg_core_queue_task:current_time() + Reschedule. + mg_skd_task:current_time() + Reschedule. -spec emit_delivery_error_beat( options(), diff --git a/apps/mg_core/src/mg_core_queue_timer.erl b/apps/mg_core/src/mg_core_queue_timer.erl index 666811a5..53a693b4 100644 --- a/apps/mg_core/src/mg_core_queue_timer.erl +++ b/apps/mg_core/src/mg_core_queue_timer.erl @@ -18,11 +18,11 @@ -export([build_task/2]). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types @@ -49,10 +49,10 @@ -type task_id() :: mg_core:id(). -type task_payload() :: #{}. --type target_time() :: mg_core_queue_task:target_time(). --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). --type scan_limit() :: mg_core_queue_scanner:scan_limit(). +-type target_time() :: mg_skd_task:target_time(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). +-type scan_limit() :: mg_skd_scanner:scan_limit(). % 1 minute -define(DEFAULT_PROCESSING_TIMEOUT, 60000). @@ -75,7 +75,7 @@ build_task(ID, Timestamp) -> -spec search_tasks(options(), scan_limit(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options = #{timer_queue := TimerQueue}, Limit, State = #state{}) -> - CurrentTs = mg_core_queue_task:current_time(), + CurrentTs = mg_skd_task:current_time(), Lookahead = maps:get(lookahead, Options, 0), Query = {TimerQueue, 1, CurrentTs + Lookahead}, {Timers, Continuation} = mg_core_machine:search(machine_options(Options), Query, Limit), diff --git a/apps/mg_core/src/mg_core_storage.erl b/apps/mg_core/src/mg_core_storage.erl index e84459e4..322326e2 100644 --- a/apps/mg_core/src/mg_core_storage.erl +++ b/apps/mg_core/src/mg_core_storage.erl @@ -77,7 +77,7 @@ %% -type name() :: term(). --type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. +-type opaque() :: mg_skd_utils:opaque(). -type key() :: binary(). -type value() :: opaque(). -type kv() :: {key(), value()}. diff --git a/apps/mg_core/src/mg_core_utils.erl b/apps/mg_core/src/mg_core_utils.erl index 9f377b4c..4fea8c6b 100644 --- a/apps/mg_core/src/mg_core_utils.erl +++ b/apps/mg_core/src/mg_core_utils.erl @@ -79,64 +79,19 @@ %% API %% OTP %% --type reason() :: - normal - | shutdown - | {shutdown, _} - | _. --type gen_timeout() :: - 'hibernate' - | timeout(). - --type gen_start_ret() :: - {ok, pid()} - | ignore - | {error, _}. - --type gen_ref() :: - atom() - | {atom(), node()} - | {global, atom()} - | {via, atom(), term()} - | pid(). --type gen_reg_name() :: - {local, atom()} - | {global, term()} - | {via, module(), term()}. - --type gen_server_from() :: {pid(), _}. - --type gen_server_init_ret(State) :: - ignore - | {ok, State} - | {stop, reason()} - | {ok, State, gen_timeout()}. - --type gen_server_handle_call_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {reply, _Reply, State} - | {stop, reason(), State} - | {reply, _Reply, State, gen_timeout()} - | {stop, reason(), _Reply, State}. - --type gen_server_handle_cast_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {stop, reason(), State}. - --type gen_server_handle_info_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {stop, reason(), State}. - --type gen_server_code_change_ret(State) :: - {ok, State} - | {error, _}. - --type supervisor_ret() :: - ignore - | {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}. +%% TODO Refactor or move somewhere else. +-type reason() :: mg_skd_utils:reason(). +-type gen_timeout() :: mg_skd_utils:gen_timeout(). +-type gen_start_ret() :: mg_skd_utils:gen_start_ret(). +-type gen_ref() :: mg_skd_utils:gen_ref(). +-type gen_reg_name() :: mg_skd_utils:gen_reg_name(). +-type gen_server_from() :: mg_skd_utils:gen_server_from(). +-type gen_server_init_ret(State) :: mg_skd_utils:gen_server_init_ret(State). +-type gen_server_handle_call_ret(State) :: mg_skd_utils:gen_server_handle_call_ret(State). +-type gen_server_handle_cast_ret(State) :: mg_skd_utils:gen_server_handle_cast_ret(State). +-type gen_server_handle_info_ret(State) :: mg_skd_utils:gen_server_handle_info_ret(State). +-type gen_server_code_change_ret(State) :: mg_skd_utils:gen_server_code_change_ret(State). +-type supervisor_ret() :: mg_skd_utils:supervisor_ret(). -spec gen_reg_name_to_ref(gen_reg_name()) -> gen_ref(). gen_reg_name_to_ref({local, Name}) -> Name; diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_configurator.erl index c0957612..7e9737d9 100644 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ b/apps/mg_cth/src/mg_cth_configurator.erl @@ -6,7 +6,7 @@ woody_server := mg_woody:woody_server(), event_sink_ns := mg_conf:event_sink_ns(), namespaces := mg_conf:namespaces(), - quotas => [mg_core_quota_worker:options()] + quotas => [mg_skd_quota_worker:options()] }. -spec construct_child_specs(config() | undefined) -> _. diff --git a/apps/mg_scheduler/include/pulse.hrl b/apps/mg_scheduler/include/pulse.hrl new file mode 100644 index 00000000..fee46873 --- /dev/null +++ b/apps/mg_scheduler/include/pulse.hrl @@ -0,0 +1,63 @@ +%% Scheduler + +-record(mg_skd_search_success, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + delay :: mg_skd_scanner:scan_delay(), + tasks :: [mg_skd_task:task()], + limit :: mg_skd_scanner:scan_limit(), + % in native units + duration :: non_neg_integer() +}). + +-record(mg_skd_search_error, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_skd_utils:exception() +}). + +-record(mg_skd_task_error, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_skd_utils:exception(), + machine_id :: mg_skd_utils:id() | undefined +}). + +-record(mg_skd_task_add_error, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_skd_utils:exception(), + machine_id :: mg_skd_utils:id(), + request_context :: mg_skd_utils:request_context() +}). + +-record(mg_skd_new_tasks, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + new_tasks_count :: non_neg_integer() +}). + +-record(mg_skd_task_started, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + machine_id :: mg_skd_utils:id() | undefined, + task_delay :: timeout() +}). + +-record(mg_skd_task_finished, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + machine_id :: mg_skd_utils:id() | undefined, + task_delay :: timeout(), + % in native units + process_duration :: non_neg_integer() +}). + +-record(mg_skd_quota_reserved, { + namespace :: mg_skd_utils:ns(), + scheduler_name :: mg_skd:name(), + active_tasks :: non_neg_integer(), + waiting_tasks :: non_neg_integer(), + quota_name :: mg_skd_quota_worker:name(), + quota_reserved :: mg_skd_quota:resource() +}). diff --git a/apps/mg_scheduler/rebar.config b/apps/mg_scheduler/rebar.config new file mode 100644 index 00000000..2118765e --- /dev/null +++ b/apps/mg_scheduler/rebar.config @@ -0,0 +1,4 @@ +{deps, [ + {gproc, "0.9.0"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_scheduler/src/mg_scheduler.app.src b/apps/mg_scheduler/src/mg_scheduler.app.src new file mode 100644 index 00000000..a3daa1b9 --- /dev/null +++ b/apps/mg_scheduler/src/mg_scheduler.app.src @@ -0,0 +1,18 @@ +{application, mg_scheduler, [ + {description, "Machinegun scheduler"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + gproc, + gen_squad, + opentelemetry_api + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_scheduler.erl b/apps/mg_scheduler/src/mg_skd.erl similarity index 87% rename from apps/mg_core/src/mg_core_scheduler.erl rename to apps/mg_scheduler/src/mg_skd.erl index f58494c5..17233ea3 100644 --- a/apps/mg_core/src/mg_core_scheduler.erl +++ b/apps/mg_scheduler/src/mg_skd.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_scheduler). +-module(mg_skd). -export([child_spec/3]). -export([start_link/2]). @@ -34,17 +34,17 @@ -type options() :: #{ start_interval => non_neg_integer(), capacity := non_neg_integer(), - quota_name := mg_core_quota_worker:name(), - quota_share => mg_core_quota:share(), - pulse => mg_core_pulse:handler() + quota_name := mg_skd_quota_worker:name(), + quota_share => mg_skd_quota:share(), + pulse => mg_skd_pulse:handler() }. -type name() :: atom(). --type id() :: {name(), mg_core:ns()}. +-type id() :: {name(), mg_skd_utils:ns()}. --type task_id() :: mg_core_queue_task:id(). --type task() :: mg_core_queue_task:task(). --type target_time() :: mg_core_queue_task:target_time(). +-type task_id() :: mg_skd_task:id(). +-type task() :: mg_skd_task:task(). +-type target_time() :: mg_skd_task:target_time(). -export_type([id/0]). -export_type([name/0]). @@ -54,11 +54,11 @@ %% Internal types -record(state, { id :: id(), - pulse :: mg_core_pulse:handler(), + pulse :: mg_skd_pulse:handler(), capacity :: non_neg_integer(), - quota_name :: mg_core_quota_worker:name(), - quota_share :: mg_core_quota:share(), - quota_reserved :: mg_core_quota:resource() | undefined, + quota_name :: mg_skd_quota_worker:name(), + quota_share :: mg_skd_quota:share(), + quota_reserved :: mg_skd_quota:resource() | undefined, timer :: timer:tref(), waiting_tasks :: task_queue(), active_tasks :: #{task_id() => pid()}, @@ -99,7 +99,7 @@ child_spec(ID, Options, ChildID) -> type => worker }. --spec start_link(id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_skd_utils:gen_start_ret(). start_link(ID, Options) -> gen_server:start_link(self_reg_name(ID), ?MODULE, {ID, Options}, []). @@ -117,7 +117,7 @@ distribute_tasks(Pid, Tasks) when is_pid(Pid) -> %% gen_server callbacks --spec init({id(), options()}) -> mg_core_utils:gen_server_init_ret(state()). +-spec init({id(), options()}) -> mg_skd_utils:gen_server_init_ret(state()). init({ID, Options}) -> {ok, TimerRef} = timer:send_interval(maps:get(start_interval, Options, 1000), start), {ok, #state{ @@ -133,8 +133,8 @@ init({ID, Options}) -> timer = TimerRef }}. --spec handle_call(Call :: any(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_skd_utils:gen_server_from(), state()) -> + mg_skd_utils:gen_server_handle_call_ret(state()). handle_call(inquire, _From, State) -> Status = #{ pid => self(), @@ -150,7 +150,7 @@ handle_call(Call, From, State) -> -type cast() :: {tasks, [task()]}. --spec handle_cast(cast(), state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(cast(), state()) -> mg_skd_utils:gen_server_handle_cast_ret(state()). handle_cast({tasks, Tasks}, State0) -> State1 = add_tasks(Tasks, State0), State2 = maybe_update_reserved(State1), @@ -164,7 +164,7 @@ handle_cast(Cast, State) -> {'DOWN', monitor(), process, pid(), _Info} | start. --spec handle_info(info(), state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(info(), state()) -> mg_skd_utils:gen_server_handle_info_ret(state()). handle_info({'DOWN', Monitor, process, _Object, _Info}, State0) -> State1 = forget_about_task(Monitor, State0), State2 = start_new_tasks(State1), @@ -179,13 +179,13 @@ handle_info(Info, State) -> % Process registration --spec self_reg_name(id()) -> mg_core_procreg:reg_name(). +-spec self_reg_name(id()) -> mg_skd_procreg:reg_name(). self_reg_name(ID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, {?MODULE, ID}). + mg_skd_procreg:reg_name(mg_skd_procreg_gproc, {?MODULE, ID}). --spec self_ref(id()) -> mg_core_procreg:ref(). +-spec self_ref(id()) -> mg_skd_procreg:ref(). self_ref(ID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, {?MODULE, ID}). + mg_skd_procreg:ref(mg_skd_procreg_gproc, {?MODULE, ID}). % Helpers @@ -250,7 +250,7 @@ start_multiple_tasks(N, Iterator, State) when N > 0 -> case dequeue_task(Rank, WaitingTasks) of {Task = #{}, NewWaitingTasks} -> % ...so let's start it. - {ok, Pid, Monitor} = mg_core_scheduler_worker:start_task(ID, Task, SpanCtx), + {ok, Pid, Monitor} = mg_skd_worker:start_task(ID, Task, SpanCtx), NewState = State#state{ waiting_tasks = NewWaitingTasks, active_tasks = ActiveTasks#{TaskID => Pid}, @@ -317,7 +317,7 @@ update_reserved(State = #state{id = ID, quota_name = Quota, quota_share = QuotaS client_id => ID, share => QuotaShare }, - Reserved = mg_core_quota_worker:reserve( + Reserved = mg_skd_quota_worker:reserve( ClientOptions, TotalActiveTasks, TotalKnownTasks, @@ -337,25 +337,25 @@ get_waiting_task_count(#state{waiting_tasks = WaitingTasks}) -> %% logging --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). --spec emit_beat(mg_core_pulse:handler(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(mg_skd_pulse:handler(), mg_skd_pulse:beat()) -> ok. emit_beat(Handler, Beat) -> - ok = mg_core_pulse:handle_beat(Handler, Beat). + ok = mg_skd_pulse:handle_beat(Handler, Beat). -spec emit_new_tasks_beat(non_neg_integer(), state()) -> ok. emit_new_tasks_beat(NewTasksCount, #state{pulse = Pulse, id = {Name, NS}}) -> - emit_beat(Pulse, #mg_core_scheduler_new_tasks{ + emit_beat(Pulse, #mg_skd_new_tasks{ namespace = NS, scheduler_name = Name, new_tasks_count = NewTasksCount }). --spec emit_reserved_beat(non_neg_integer(), non_neg_integer(), mg_core_quota:resource(), state()) -> +-spec emit_reserved_beat(non_neg_integer(), non_neg_integer(), mg_skd_quota:resource(), state()) -> ok. emit_reserved_beat(Active, Total, Reserved, State) -> #state{pulse = Pulse, id = {Name, NS}, quota_name = Quota} = State, - emit_beat(Pulse, #mg_core_scheduler_quota_reserved{ + emit_beat(Pulse, #mg_skd_quota_reserved{ namespace = NS, scheduler_name = Name, active_tasks = Active, diff --git a/apps/mg_scheduler/src/mg_skd_pulse.erl b/apps/mg_scheduler/src/mg_skd_pulse.erl new file mode 100644 index 00000000..c932a515 --- /dev/null +++ b/apps/mg_scheduler/src/mg_skd_pulse.erl @@ -0,0 +1,25 @@ +-module(mg_skd_pulse). + +-include_lib("mg_scheduler/include/pulse.hrl"). + +%% API +-export_type([beat/0]). +-export_type([handler/0]). + +-callback handle_beat(handler(), beat() | any()) -> ok. + +%% +%% API +%% +-type beat() :: + % Scheduler handling + #mg_skd_task_add_error{} + | #mg_skd_search_success{} + | #mg_skd_search_error{} + | #mg_skd_task_error{} + | #mg_skd_new_tasks{} + | #mg_skd_task_started{} + | #mg_skd_task_finished{} + | #mg_skd_quota_reserved{}. + +-type handler() :: mg_skd_utils:mod_opts() | undefined. diff --git a/apps/mg_core/src/mg_core_quota.erl b/apps/mg_scheduler/src/mg_skd_quota.erl similarity index 99% rename from apps/mg_core/src/mg_core_quota.erl rename to apps/mg_scheduler/src/mg_skd_quota.erl index bd5f9183..e2217d9a 100644 --- a/apps/mg_core/src/mg_core_quota.erl +++ b/apps/mg_scheduler/src/mg_skd_quota.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota). +-module(mg_skd_quota). %% Менеджер ресурса, который пытается справедливо распределить ограниченный %% запас этого ресурса (размером limit) между множеством потребителей. diff --git a/apps/mg_core/src/mg_core_quota_manager.erl b/apps/mg_scheduler/src/mg_skd_quota_manager.erl similarity index 80% rename from apps/mg_core/src/mg_core_quota_manager.erl rename to apps/mg_scheduler/src/mg_skd_quota_manager.erl index 46eb6c7e..f995d9bb 100644 --- a/apps/mg_core/src/mg_core_quota_manager.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_manager.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_manager). +-module(mg_skd_quota_manager). -export([child_spec/2]). -export([start_link/1]). @@ -25,8 +25,8 @@ -export_type([options/0]). %% Internal types --type quota_name() :: mg_core_quota_worker:name(). --type quota_options() :: mg_core_quota_worker:options(). +-type quota_name() :: mg_skd_quota_worker:name(). +-type quota_options() :: mg_skd_quota_worker:options(). %% %% API @@ -41,12 +41,12 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_skd_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_one}, [ - mg_core_quota_worker:child_spec(QuotaOptions, Name) + mg_skd_quota_worker:child_spec(QuotaOptions, Name) || #{name := Name} = QuotaOptions <- maps:values(Options) ] ). diff --git a/apps/mg_core/src/mg_core_quota_worker.erl b/apps/mg_scheduler/src/mg_skd_quota_worker.erl similarity index 82% rename from apps/mg_core/src/mg_core_quota_worker.erl rename to apps/mg_scheduler/src/mg_skd_quota_worker.erl index cc9b881a..6cfa0ea8 100644 --- a/apps/mg_core/src/mg_core_quota_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_worker.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_worker). +-module(mg_skd_quota_worker). -behaviour(gen_server). @@ -38,10 +38,10 @@ update_interval => timeout() }. -type name() :: binary() | unlimited. --type share() :: mg_core_quota:share(). --type resource() :: mg_core_quota:resource(). --type client_id() :: mg_core_quota:client_id(). --type limit_options() :: mg_core_quota:limit_options(). +-type share() :: mg_skd_quota:share(). +-type resource() :: mg_skd_quota:resource(). +-type client_id() :: mg_skd_quota:client_id(). +-type limit_options() :: mg_skd_quota:limit_options(). -export_type([name/0]). -export_type([share/0]). @@ -65,9 +65,9 @@ pid :: pid() }). -type state() :: #state{}. --type quota() :: mg_core_quota:state(). +-type quota() :: mg_skd_quota:state(). -type client() :: #client{}. --type client_options() :: mg_core_quota:client_options(). +-type client_options() :: mg_skd_quota:client_options(). -type monitor() :: reference(). -define(DEFAULT_UPDATE_INTERVAL, 5000). @@ -86,7 +86,7 @@ child_spec(Options, ChildID) -> shutdown => 5000 }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_skd_utils:gen_start_ret(). start_link(#{name := Name} = Options) -> gen_server:start_link(self_reg_name(Name), ?MODULE, Options, []). @@ -99,24 +99,24 @@ reserve(ClientOptions, Usage, Expectation, Name) -> %% gen_server callbacks --spec init(options()) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(options()) -> mg_skd_utils:gen_server_init_ret(state()). init(Options) -> #{limit := Limit} = Options, Interval = maps:get(update_interval, Options, ?DEFAULT_UPDATE_INTERVAL), {ok, #state{ options = Options, - quota = mg_core_quota:new(#{limit => Limit}), + quota = mg_skd_quota:new(#{limit => Limit}), clients = #{}, client_monitors = #{}, interval = Interval, timer = erlang:send_after(Interval, self(), ?UPDATE_MESSAGE) }}. --spec handle_call(Call :: any(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_skd_utils:gen_server_from(), state()) -> + mg_skd_utils:gen_server_handle_call_ret(state()). handle_call({reserve, ClientOptions, Usage, Expectation}, {Pid, _Tag}, State0) -> State1 = ensure_is_registered(ClientOptions, Pid, State0), - {ok, NewReserved, NewQuota} = mg_core_quota:reserve( + {ok, NewReserved, NewQuota} = mg_skd_quota:reserve( ClientOptions, Usage, Expectation, @@ -127,14 +127,14 @@ handle_call(Call, From, State) -> ok = logger:error("unexpected gen_server call received: ~p from ~p", [Call, From]), {noreply, State}. --spec handle_cast(Cast :: any(), state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(Cast :: any(), state()) -> mg_skd_utils:gen_server_handle_cast_ret(state()). handle_cast(Cast, State) -> ok = logger:error("unexpected gen_server cast received: ~p", [Cast]), {noreply, State}. --spec handle_info(Info :: any(), state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(Info :: any(), state()) -> mg_skd_utils:gen_server_handle_info_ret(state()). handle_info(?UPDATE_MESSAGE, State) -> - {ok, NewQuota} = mg_core_quota:recalculate_targets(State#state.quota), + {ok, NewQuota} = mg_skd_quota:recalculate_targets(State#state.quota), {noreply, restart_timer(?UPDATE_MESSAGE, State#state{quota = NewQuota})}; handle_info({'DOWN', Monitor, process, _Object, _Info}, State) -> {noreply, forget_about_client(Monitor, State)}; @@ -143,7 +143,7 @@ handle_info(Info, State) -> {noreply, State}. -spec code_change(OldVsn :: any(), state(), Extra :: any()) -> - mg_core_utils:gen_server_code_change_ret(state()). + mg_skd_utils:gen_server_code_change_ret(state()). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -179,7 +179,7 @@ forget_about_client(Monitor, State) -> State#state{ clients = maps:remove(ClientID, AllClients), client_monitors = maps:remove(Monitor, Monitors), - quota = mg_core_quota:remove_client(ClientID, Quota) + quota = mg_skd_quota:remove_client(ClientID, Quota) }; error -> State @@ -187,11 +187,11 @@ forget_about_client(Monitor, State) -> % Worker registration --spec self_ref(name()) -> mg_core_utils:gen_ref(). +-spec self_ref(name()) -> mg_skd_utils:gen_ref(). self_ref(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. --spec self_reg_name(name()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(name()) -> mg_skd_utils:gen_reg_name(). self_reg_name(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. diff --git a/apps/mg_core/src/mg_core_queue_scanner.erl b/apps/mg_scheduler/src/mg_skd_scanner.erl similarity index 84% rename from apps/mg_core/src/mg_core_queue_scanner.erl rename to apps/mg_scheduler/src/mg_skd_scanner.erl index 96297f3a..4a06eac0 100644 --- a/apps/mg_core/src/mg_core_queue_scanner.erl +++ b/apps/mg_scheduler/src/mg_skd_scanner.erl @@ -24,9 +24,9 @@ %%% Distribution process DOES NOT take into account processing locality (_allocate tasks near %%% idling machines_), it just splits tasks uniformly among a set of known schedulers. --module(mg_core_queue_scanner). +-module(mg_skd_scanner). --type scheduler_id() :: mg_core_scheduler:id(). +-type scheduler_id() :: mg_skd:id(). -type scan_delay() :: milliseconds(). -type scan_limit() :: non_neg_integer(). % as in A×X + B @@ -40,7 +40,7 @@ scan_ahead => scan_ahead(), retry_scan_delay => scan_delay(), squad_opts => gen_squad:opts(), - pulse => mg_core_pulse:handler() + pulse => mg_skd_pulse:handler() }. -export_type([options/0]). @@ -54,11 +54,11 @@ %% --type task() :: mg_core_queue_task:task(). +-type task() :: mg_skd_task:task(). -type queue_state() :: any(). -type queue_options() :: any(). --type queue_handler() :: mg_core_utils:mod_opts(queue_options()). +-type queue_handler() :: mg_skd_utils:mod_opts(queue_options()). -callback child_spec(queue_options(), atom()) -> supervisor:child_spec() | undefined. -callback init(queue_options()) -> {ok, queue_state()}. @@ -100,7 +100,7 @@ -spec child_spec(scheduler_id(), options(), _ChildID) -> supervisor:child_spec(). child_spec(SchedulerID, Options, ChildID) -> Flags = #{strategy => rest_for_one}, - ChildSpecs = mg_core_utils:lists_compact([ + ChildSpecs = mg_skd_utils:lists_compact([ handler_child_spec(Options, {ChildID, handler}), #{ id => {ChildID, scanner}, @@ -117,11 +117,11 @@ child_spec(SchedulerID, Options, ChildID) -> -spec handler_child_spec(options(), _ChildID) -> supervisor:child_spec() | undefined. handler_child_spec(#{queue_handler := Handler}, ChildID) -> - mg_core_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). + mg_skd_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). %% --spec start_link(scheduler_id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_skd_utils:gen_start_ret(). start_link(SchedulerID, Options) -> SquadOpts = maps:merge( maps:get(squad_opts, Options, #{}), @@ -139,7 +139,7 @@ start_link(SchedulerID, Options) -> -spec where_is(scheduler_id()) -> pid() | undefined. where_is(SchedulerID) -> - mg_core_utils:gen_ref_to_pid(self_ref(SchedulerID)). + mg_skd_utils:gen_ref_to_pid(self_ref(SchedulerID)). %% @@ -152,7 +152,7 @@ where_is(SchedulerID) -> scan_ahead :: scan_ahead(), retry_delay :: scan_delay(), timer :: reference() | undefined, - pulse :: mg_core_pulse:handler() | undefined + pulse :: mg_skd_pulse:handler() | undefined }). -type st() :: #st{}. @@ -196,7 +196,7 @@ handle_cast(Cast, Rank, _Squad, St) -> ), {noreply, St}. --spec handle_call(_Call, mg_core_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. +-spec handle_call(_Call, mg_skd_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. handle_call(Call, From, Rank, _Squad, St) -> ok = logger:error( "unexpected gen_squad call received: ~p, from ~p, rank ~p, state ~p", @@ -253,30 +253,30 @@ scan_queue(Limit, St = #st{queue_handler = HandlerState, retry_delay = RetryDela ok = emit_scan_success_beat(Result, Limit, StartedAt, St), {Result, St#st{queue_handler = HandlerStateNext}}. --spec disseminate_tasks([task()], [mg_core_scheduler:status()], [scan_limit()], st()) -> ok. +-spec disseminate_tasks([task()], [mg_skd:status()], [scan_limit()], st()) -> ok. disseminate_tasks(Tasks, [_Scheduler = #{pid := Pid}], _Capacities, _St) -> %% A single scheduler, just send him all tasks optimizing away meaningless partitioning - mg_core_scheduler:distribute_tasks(Pid, Tasks); + mg_skd:distribute_tasks(Pid, Tasks); disseminate_tasks(Tasks, Schedulers, Capacities, _St) -> %% Partition tasks among known schedulers proportionally to their capacities - Partitions = mg_core_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), + Partitions = mg_skd_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), %% Distribute shares of tasks among schedulers, sending directly to pids maps:fold( fun(_Scheduler = #{pid := Pid}, TasksShare, _) -> - mg_core_scheduler:distribute_tasks(Pid, TasksShare) + mg_skd:distribute_tasks(Pid, TasksShare) end, ok, Partitions ). --spec inquire_schedulers(gen_squad:squad(), st()) -> [mg_core_scheduler:status()]. +-spec inquire_schedulers(gen_squad:squad(), st()) -> [mg_skd:status()]. inquire_schedulers(Squad, #st{scheduler_id = SchedulerID}) -> %% Take all known members, there's at least one which is `self()` Members = gen_squad:members(Squad), Nodes = lists:map(fun erlang:node/1, Members), - multicall(Nodes, mg_core_scheduler, inquire, [SchedulerID], ?INQUIRY_TIMEOUT). + multicall(Nodes, mg_skd, inquire, [SchedulerID], ?INQUIRY_TIMEOUT). --spec compute_adjusted_capacity(mg_core_scheduler:status(), st()) -> scan_limit(). +-spec compute_adjusted_capacity(mg_skd:status(), st()) -> scan_limit(). compute_adjusted_capacity(#{waiting_tasks := W, capacity := C}, #st{scan_ahead = {A, B}}) -> erlang:max(erlang:round(A * erlang:max(C - W, 0)) + B, 0). @@ -324,32 +324,32 @@ cancel_timer(St) -> -spec init_handler(queue_handler()) -> queue_handler_state(). init_handler(Handler) -> - {ok, InitialState} = mg_core_utils:apply_mod_opts(Handler, init), + {ok, InitialState} = mg_skd_utils:apply_mod_opts(Handler, init), {Handler, InitialState}. -spec run_handler(queue_handler_state(), _Function :: atom(), _Args :: list()) -> {_Result, queue_handler_state()}. run_handler({Handler, State}, Function, Args) -> - {Result, NextState} = mg_core_utils:apply_mod_opts(Handler, Function, Args ++ [State]), + {Result, NextState} = mg_skd_utils:apply_mod_opts(Handler, Function, Args ++ [State]), {Result, {Handler, NextState}}. %% --spec self_reg_name(scheduler_id()) -> mg_core_procreg:reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_skd_procreg:reg_name(). self_reg_name(SchedulerID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, {?MODULE, SchedulerID}). + mg_skd_procreg:reg_name(mg_skd_procreg_gproc, {?MODULE, SchedulerID}). --spec self_ref(scheduler_id()) -> mg_core_procreg:ref(). +-spec self_ref(scheduler_id()) -> mg_skd_procreg:ref(). self_ref(SchedulerID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, {?MODULE, SchedulerID}). + mg_skd_procreg:ref(mg_skd_procreg_gproc, {?MODULE, SchedulerID}). %% --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). --spec emit_scan_error_beat(mg_core_utils:exception(), st()) -> ok. +-spec emit_scan_error_beat(mg_skd_utils:exception(), st()) -> ok. emit_scan_error_beat(Exception, #st{pulse = Pulse, scheduler_id = {Name, NS}}) -> - mg_core_pulse:handle_beat(Pulse, #mg_core_scheduler_search_error{ + mg_skd_pulse:handle_beat(Pulse, #mg_skd_search_error{ namespace = NS, scheduler_name = Name, exception = Exception @@ -360,7 +360,7 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ pulse = Pulse, scheduler_id = {Name, NS} }) -> - mg_core_pulse:handle_beat(Pulse, #mg_core_scheduler_search_success{ + mg_skd_pulse:handle_beat(Pulse, #mg_skd_search_success{ namespace = NS, scheduler_name = Name, delay = Delay, @@ -371,8 +371,8 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ %% --spec handle_beat({mg_core_pulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. +-spec handle_beat({mg_skd_pulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. handle_beat({Handler, {Name, NS}}, Beat) -> Producer = queue_scanner, Extra = [{scheduler_type, Name}, {namespace, NS}], - mg_core_pulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). + mg_skd_pulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). diff --git a/apps/mg_core/src/mg_core_scheduler_sup.erl b/apps/mg_scheduler/src/mg_skd_sup.erl similarity index 64% rename from apps/mg_core/src/mg_core_scheduler_sup.erl rename to apps/mg_scheduler/src/mg_skd_sup.erl index d15081ae..514e34e7 100644 --- a/apps/mg_core/src/mg_core_scheduler_sup.erl +++ b/apps/mg_scheduler/src/mg_skd_sup.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,26 +14,26 @@ %%% limitations under the License. %%% --module(mg_core_scheduler_sup). +-module(mg_skd_sup). --type id() :: mg_core_scheduler:id(). +-type id() :: mg_skd:id(). -type options() :: #{ % manager start_interval => non_neg_integer(), capacity := non_neg_integer(), - quota_name := mg_core_quota_worker:name(), - quota_share => mg_core_quota:share(), + quota_name := mg_skd_quota_worker:name(), + quota_share => mg_skd_quota:share(), % scanner - queue_handler := mg_core_queue_scanner:queue_handler(), - max_scan_limit => mg_core_queue_scanner:scan_limit() | unlimited, - scan_ahead => mg_core_queue_scanner:scan_ahead(), - retry_scan_delay => mg_core_queue_scanner:scan_delay(), + queue_handler := mg_skd_scanner:queue_handler(), + max_scan_limit => mg_skd_scanner:scan_limit() | unlimited, + scan_ahead => mg_skd_scanner:scan_ahead(), + retry_scan_delay => mg_skd_scanner:scan_delay(), squad_opts => gen_squad:opts(), % workers - task_handler := mg_core_utils:mod_opts(), + task_handler := mg_skd_utils:mod_opts(), % common - pulse => mg_core_pulse:handler() + pulse => mg_skd_pulse:handler() }. -export_type([options/0]). @@ -52,7 +52,7 @@ child_spec(ID, Options, ChildID) -> type => supervisor }. --spec start_link(id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_skd_utils:gen_start_ret(). start_link(SchedulerID, Options) -> ManagerOptions = maps:with( [start_interval, capacity, quota_name, quota_share, pulse], @@ -68,9 +68,9 @@ start_link(SchedulerID, Options) -> ), genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_core_utils:lists_compact([ - mg_core_queue_scanner:child_spec(SchedulerID, ScannerOptions, queue), - mg_core_scheduler_worker:child_spec(SchedulerID, WorkerOptions, tasks), - mg_core_scheduler:child_spec(SchedulerID, ManagerOptions, manager) + mg_skd_utils:lists_compact([ + mg_skd_scanner:child_spec(SchedulerID, ScannerOptions, queue), + mg_skd_worker:child_spec(SchedulerID, WorkerOptions, tasks), + mg_skd:child_spec(SchedulerID, ManagerOptions, manager) ]) ). diff --git a/apps/mg_core/src/mg_core_queue_task.erl b/apps/mg_scheduler/src/mg_skd_task.erl similarity index 92% rename from apps/mg_core/src/mg_core_queue_task.erl rename to apps/mg_scheduler/src/mg_skd_task.erl index 796ff310..6d98dc59 100644 --- a/apps/mg_core/src/mg_core_queue_task.erl +++ b/apps/mg_scheduler/src/mg_skd_task.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_queue_task). +-module(mg_skd_task). -type id() :: any(). -type payload() :: any(). @@ -24,7 +24,7 @@ -type task(TaskID, TaskPayload) :: #{ id := TaskID, target_time := target_time(), - machine_id := mg_core:id(), + machine_id := mg_skd_utils:id(), payload => TaskPayload }. diff --git a/apps/mg_scheduler/src/mg_skd_utils.erl b/apps/mg_scheduler/src/mg_skd_utils.erl new file mode 100644 index 00000000..035f8733 --- /dev/null +++ b/apps/mg_scheduler/src/mg_skd_utils.erl @@ -0,0 +1,126 @@ +-module(mg_skd_utils). + +%% OTP + +-export_type([reason/0]). +-export_type([gen_timeout/0]). +-export_type([gen_start_ret/0]). +-export_type([gen_ref/0]). +-export_type([gen_reg_name/0]). +-export_type([gen_server_from/0]). +-export_type([gen_server_init_ret/1]). +-export_type([gen_server_handle_call_ret/1]). +-export_type([gen_server_handle_cast_ret/1]). +-export_type([gen_server_handle_info_ret/1]). +-export_type([gen_server_code_change_ret/1]). +-export_type([supervisor_ret/0]). + +%% + +-export([separate_mod_opts/1]). +-export([lists_compact/1]). + +%% + +-type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. +-type ns() :: binary(). +-type id() :: binary(). +-type request_context() :: opaque(). + +-export_type([opaque/0]). +-export_type([ns/0]). +-export_type([id/0]). +-export_type([request_context/0]). + +-type exception() :: {exit | error | throw, term(), list()}. + +-export_type([exception/0]). + +-type mod_opts() :: mod_opts(term()). +-type mod_opts(Options) :: {module(), Options} | module(). + +-export_type([mod_opts/0]). +-export_type([mod_opts/1]). + +%% OTP + +-type reason() :: + normal + | shutdown + | {shutdown, _} + | _. +-type gen_timeout() :: + 'hibernate' + | timeout(). + +-type gen_start_ret() :: + {ok, pid()} + | ignore + | {error, _}. + +-type gen_ref() :: + atom() + | {atom(), node()} + | {global, atom()} + | {via, atom(), term()} + | pid(). +-type gen_reg_name() :: + {local, atom()} + | {global, term()} + | {via, module(), term()}. + +-type gen_server_from() :: {pid(), _}. + +-type gen_server_init_ret(State) :: + ignore + | {ok, State} + | {stop, reason()} + | {ok, State, gen_timeout()}. + +-type gen_server_handle_call_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {reply, _Reply, State} + | {stop, reason(), State} + | {reply, _Reply, State, gen_timeout()} + | {stop, reason(), _Reply, State}. + +-type gen_server_handle_cast_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {stop, reason(), State}. + +-type gen_server_handle_info_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {stop, reason(), State}. + +-type gen_server_code_change_ret(State) :: + {ok, State} + | {error, _}. + +-type supervisor_ret() :: + ignore + | {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}. + +%% + +-spec separate_mod_opts(mod_opts()) -> {module(), _Arg}. +separate_mod_opts(ModOpts) -> + separate_mod_opts(ModOpts, undefined). + +-spec separate_mod_opts(mod_opts(Defaults), Defaults) -> {module(), Defaults}. +separate_mod_opts(ModOpts = {_, _}, _) -> + ModOpts; +separate_mod_opts(Mod, Default) -> + {Mod, Default}. + +-spec lists_compact(list(T)) -> list(T). +lists_compact(List) -> + lists:filter( + fun + (undefined) -> false; + (_) -> true + end, + List + ). diff --git a/apps/mg_core/src/mg_core_scheduler_worker.erl b/apps/mg_scheduler/src/mg_skd_worker.erl similarity index 78% rename from apps/mg_core/src/mg_core_scheduler_worker.erl rename to apps/mg_scheduler/src/mg_skd_worker.erl index 8dcda772..06a5c402 100644 --- a/apps/mg_core/src/mg_core_scheduler_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_worker.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ %%% limitations under the License. %%% --module(mg_core_scheduler_worker). +-module(mg_skd_worker). --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). -export([child_spec/3]). -export([start_link/2]). @@ -30,13 +30,13 @@ -callback execute_task(Options :: any(), task()) -> ok. %% Internal types --type scheduler_id() :: mg_core_scheduler:id(). --type task() :: mg_core_queue_task:task(). +-type scheduler_id() :: mg_skd:id(). +-type task() :: mg_skd_task:task(). -type maybe_span() :: opentelemetry:span_ctx() | undefined. -type options() :: #{ - task_handler := mg_core_utils:mod_opts(), - pulse => mg_core_pulse:handler() + task_handler := mg_skd_utils:mod_opts(), + pulse => mg_skd_pulse:handler() }. -type monitor() :: reference(). @@ -54,7 +54,7 @@ child_spec(SchedulerID, Options, ChildID) -> type => supervisor }. --spec start_link(scheduler_id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_skd_utils:gen_start_ret(). start_link(SchedulerID, Options) -> genlib_adhoc_supervisor:start_link( self_reg_name(SchedulerID), @@ -78,7 +78,7 @@ start_task(SchedulerID, Task, SpanCtx) -> Error end. --spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_core_utils:gen_start_ret(). +-spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_skd_utils:gen_start_ret(). do_start_task(SchedulerID, Options, Task, SpanCtx) -> proc_lib:start_link(?MODULE, execute, [SchedulerID, Options, Task, SpanCtx]). @@ -91,7 +91,7 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> ok = emit_start_beat(Task, SchedulerID, Options), ok = try - ok = mg_core_utils:apply_mod_opts(Handler, execute_task, [Task]), + ok = mg_skd_utils:apply_mod_opts(Handler, execute_task, [Task]), End = erlang:monotonic_time(), ok = emit_finish_beat(Task, Start, End, SchedulerID, Options) catch @@ -104,13 +104,13 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> % Process registration --spec self_ref(scheduler_id()) -> mg_core_utils:gen_ref(). +-spec self_ref(scheduler_id()) -> mg_skd_utils:gen_ref(). self_ref(ID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, wrap_id(ID)). + mg_skd_procreg:ref(mg_skd_procreg_gproc, wrap_id(ID)). --spec self_reg_name(scheduler_id()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_skd_utils:gen_reg_name(). self_reg_name(ID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, wrap_id(ID)). + mg_skd_procreg:reg_name(mg_skd_procreg_gproc, wrap_id(ID)). -spec wrap_id(scheduler_id()) -> term(). wrap_id(ID) -> @@ -118,9 +118,9 @@ wrap_id(ID) -> %% logging --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mg_skd_pulse:beat()) -> ok. emit_beat(Options, Beat) -> - ok = mg_core_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). + ok = mg_skd_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). -spec get_delay(task()) -> timeout(). get_delay(#{target_time := Target}) -> @@ -129,7 +129,7 @@ get_delay(#{target_time := Target}) -> -spec emit_start_beat(task(), scheduler_id(), options()) -> ok. emit_start_beat(Task, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_started{ + emit_beat(Options, #mg_skd_task_started{ namespace = NS, scheduler_name = Name, task_delay = get_delay(Task), @@ -138,7 +138,7 @@ emit_start_beat(Task, {Name, NS}, Options) -> -spec emit_finish_beat(task(), integer(), integer(), scheduler_id(), options()) -> ok. emit_finish_beat(Task, StartedAt, FinishedAt, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_finished{ + emit_beat(Options, #mg_skd_task_finished{ namespace = NS, scheduler_name = Name, task_delay = get_delay(Task), @@ -147,9 +147,9 @@ emit_finish_beat(Task, StartedAt, FinishedAt, {Name, NS}, Options) -> process_duration = FinishedAt - StartedAt }). --spec emit_error_beat(task(), mg_core_utils:exception(), scheduler_id(), options()) -> ok. +-spec emit_error_beat(task(), mg_skd_utils:exception(), scheduler_id(), options()) -> ok. emit_error_beat(Task, Exception, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_error{ + emit_beat(Options, #mg_skd_task_error{ namespace = NS, scheduler_name = Name, exception = Exception, diff --git a/apps/mg_core/test/mg_core_quota_SUITE.erl b/apps/mg_scheduler/test/mg_skd_quota_SUITE.erl similarity index 91% rename from apps/mg_core/test/mg_core_quota_SUITE.erl rename to apps/mg_scheduler/test/mg_skd_quota_SUITE.erl index 87196b02..1ddff790 100644 --- a/apps/mg_core/test/mg_core_quota_SUITE.erl +++ b/apps/mg_scheduler/test/mg_skd_quota_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2018 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_SUITE). +-module(mg_skd_quota_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("stdlib/include/assert.hrl"). @@ -52,14 +52,14 @@ -type group_name() :: atom(). -record(client, { - options :: mg_core_quota:client_options(), + options :: mg_skd_quota:client_options(), usage = 0 :: resource(), expectation = 0 :: resource(), reserved = 0 :: resource() }). -type client() :: #client{}. --type quota() :: mg_core_quota:state(). --type resource() :: mg_core_quota:resource(). +-type quota() :: mg_skd_quota:state(). +-type resource() :: mg_skd_quota:resource(). %% %% tests descriptions @@ -124,7 +124,7 @@ end_per_test(_Name, _C) -> -spec no_over_allocation_test(config()) -> any(). no_over_allocation_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -135,7 +135,7 @@ no_over_allocation_test(_C) -> -spec fair_sharing_without_usage_test(config()) -> any(). fair_sharing_without_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -144,7 +144,7 @@ fair_sharing_without_usage_test(_C) -> {Clients2, Q1} = reserve(Clients1, Q0), ok = validate_quota_contract(Clients2, Limit), % Don't use reserved resurces and recalculate targets - {ok, Q2} = mg_core_quota:recalculate_targets(Q1), + {ok, Q2} = mg_skd_quota:recalculate_targets(Q1), {Clients3, _Q3} = reserve(Clients2, Q2), ok = validate_quota_contract(Clients3, Limit), Expected = repeat(10, 10), @@ -153,7 +153,7 @@ fair_sharing_without_usage_test(_C) -> -spec sharing_respects_usage_test(config()) -> any(). sharing_respects_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -167,7 +167,7 @@ sharing_respects_usage_test(_C) -> {Clients4, Q2} = reserve(Clients3, Q1), ok = validate_quota_contract(Clients4, Limit), % Recalculate targets - {ok, Q3} = mg_core_quota:recalculate_targets(Q2), + {ok, Q3} = mg_skd_quota:recalculate_targets(Q2), {Clients5, _Q4} = reserve(Clients4, Q3), ok = validate_quota_contract(Clients5, Limit), Expected = repeat(10, 5) ++ repeat(0, 5), @@ -176,7 +176,7 @@ sharing_respects_usage_test(_C) -> -spec fair_sharing_with_full_usage_test(config()) -> any(). fair_sharing_with_full_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -189,7 +189,7 @@ fair_sharing_with_full_usage_test(_C) -> {Clients4, Q2} = reserve(Clients3, Q1), ok = validate_quota_contract(Clients4, Limit), % Recalculate targets - {ok, Q3} = mg_core_quota:recalculate_targets(Q2), + {ok, Q3} = mg_skd_quota:recalculate_targets(Q2), {Clients5, Q4} = reserve(Clients4, Q3), ok = validate_quota_contract(Clients5, Limit), ?assertEqual([50, 0], get_reserve(Clients5)), @@ -202,7 +202,7 @@ fair_sharing_with_full_usage_test(_C) -> -spec fair_share_with_large_limit(config()) -> any(). fair_share_with_large_limit(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -213,7 +213,7 @@ fair_share_with_large_limit(_C) -> -spec unwanted_resources_redistribution_test(config()) -> any(). unwanted_resources_redistribution_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -228,7 +228,7 @@ unwanted_resources_redistribution_test(_C) -> -spec guaranteed_resources_redistribution_test(config()) -> any(). guaranteed_resources_redistribution_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -247,7 +247,7 @@ guaranteed_resources_redistribution_test(_C) -> -spec large_amount_of_clients_not_freeze_test(config()) -> any(). large_amount_of_clients_not_freeze_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10000), @@ -258,7 +258,7 @@ large_amount_of_clients_not_freeze_test(_C) -> -spec large_amount_of_clients_with_zero_share_not_freeze_test(config()) -> any(). large_amount_of_clients_with_zero_share_not_freeze_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10000, repeat(0, 10000)), @@ -269,7 +269,7 @@ large_amount_of_clients_with_zero_share_not_freeze_test(_C) -> -spec sharing_respects_shares(config()) -> any(). sharing_respects_shares(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [1, 2]), @@ -280,7 +280,7 @@ sharing_respects_shares(_C) -> -spec sharing_respects_zero_shares(config()) -> any(). sharing_respects_zero_shares(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [0, 2]), @@ -291,7 +291,7 @@ sharing_respects_zero_shares(_C) -> -spec share_can_be_changed(config()) -> any(). share_can_be_changed(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [1, 1]), @@ -313,7 +313,7 @@ repeat(Element, Count) -> create_clients(Number) -> create_clients(Number, repeat(1, Number)). --spec create_clients(non_neg_integer(), [mg_core_quota:share()]) -> [client()]. +-spec create_clients(non_neg_integer(), [mg_skd_quota:share()]) -> [client()]. create_clients(Number, Shares) -> [ #client{ @@ -333,7 +333,7 @@ reserve(Clients, Quota) -> -spec do_reserve(client(), Acc) -> Acc when Acc :: {[client()], quota()}. do_reserve(Client, {Acc, Quota}) -> #client{options = Options, usage = Usage, expectation = Exp} = Client, - {ok, Reserved, NewQuota} = mg_core_quota:reserve(Options, Usage, Exp, Quota), + {ok, Reserved, NewQuota} = mg_skd_quota:reserve(Options, Usage, Exp, Quota), {[Client#client{reserved = Reserved} | Acc], NewQuota}. -spec loop([client()], quota()) -> {[client()], quota()}. @@ -345,7 +345,7 @@ loop(Clients, Quota, 0) -> {Clients, Quota}; loop(Clients0, Quota0, N) when N > 0 -> {Clients1, Quota1} = reserve(Clients0, Quota0), - {ok, Quota2} = mg_core_quota:recalculate_targets(Quota1), + {ok, Quota2} = mg_skd_quota:recalculate_targets(Quota1), loop(Clients1, Quota2, N - 1). -spec get_reserve([client()]) -> [resource()]. @@ -360,14 +360,14 @@ get_expectation(Clients) -> set_expectation(Clients, Expecations) -> [C#client{expectation = E} || {C, E} <- lists:zip(Clients, Expecations)]. --spec set_share([client()], [mg_core_quota:share()]) -> [client()]. +-spec set_share([client()], [mg_skd_quota:share()]) -> [client()]. set_share(Clients, Shares) -> [ C#client{options = O#{share => S}} || {#client{options = O} = C, S} <- lists:zip(Clients, Shares) ]. --spec validate_quota_contract([client()], Limit :: mg_core_quota:resource()) -> ok. +-spec validate_quota_contract([client()], Limit :: mg_skd_quota:resource()) -> ok. validate_quota_contract(Clients, Limit) -> true = lists:sum(get_reserve(Clients)) =< Limit, TotalUsage = [C#client.usage || C <- Clients], diff --git a/apps/mg_woody/src/mg_woody_pulse_otel.erl b/apps/mg_woody/src/mg_woody_pulse_otel.erl index f9132740..7edc1aac 100644 --- a/apps/mg_woody/src/mg_woody_pulse_otel.erl +++ b/apps/mg_woody/src/mg_woody_pulse_otel.erl @@ -14,7 +14,7 @@ #woody_event{} | #woody_request_handle_error{} | mg_core_pulse:beat() - | mg_core_queue_scanner:beat(). + | mg_skd_scanner:beat(). -export_type([options/0]). diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index 4bd67f57..d6903875 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -505,7 +505,7 @@ modernizer(Name, ModernizerYamlConfig) -> } }. --spec scheduler(mg_core_quota:share(), ?C:yaml_config()) -> mg_core_machine:scheduler_opt(). +-spec scheduler(mg_skd_quota:share(), ?C:yaml_config()) -> mg_core_machine:scheduler_opt(). scheduler(Share, Config) -> #{ max_scan_limit => ?C:conf([scan_limit], Config, 5000), From 9386c79a49590e0d54b19e84023a989cafa60bbf Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 2 May 2024 11:56:22 +0300 Subject: [PATCH 09/31] (WIP) Moves utils and pulse into separate lib directory --- apps/machinegun/src/machinegun.app.src | 1 + .../machinegun/src/mg_configuration_utils.erl | 2 +- apps/machinegun/src/mg_pulse.erl | 4 +- .../src/mg_pulse_lifecycle_kafka.erl | 4 +- apps/machinegun/src/mg_pulse_log.erl | 4 +- apps/mg_conf/src/mg_conf.app.src | 1 + apps/mg_conf/src/mg_conf.erl | 8 +- apps/mg_conf/src/mg_conf_namespace_sup.erl | 4 +- apps/mg_core/include/pulse.hrl | 12 +- apps/mg_core/src/mg_core.app.src | 1 + apps/mg_core/src/mg_core.erl | 45 ++++++- apps/mg_core/src/mg_core_event_sink.erl | 4 +- apps/mg_core/src/mg_core_events_machine.erl | 26 ++-- .../mg_core/src/mg_core_events_modernizer.erl | 4 +- apps/mg_core/src/mg_core_events_storage.erl | 2 +- apps/mg_core/src/mg_core_machine.erl | 32 ++--- apps/mg_core/src/mg_core_notification.erl | 8 +- apps/mg_core/src/mg_core_otel.erl | 2 +- apps/mg_core/src/mg_core_procreg.erl | 16 +-- apps/mg_core/src/mg_core_pulse.erl | 87 ------------ apps/mg_core/src/mg_core_pulse_otel.erl | 5 +- .../mg_core/src/mg_core_queue_interrupted.erl | 2 +- .../src/mg_core_queue_notifications.erl | 8 +- apps/mg_core/src/mg_core_queue_timer.erl | 2 +- apps/mg_core/src/mg_core_storage.erl | 40 +++--- apps/mg_core/src/mg_core_storage_memory.erl | 16 +-- apps/mg_core/src/mg_core_worker.erl | 30 ++--- apps/mg_core/src/mg_core_workers_manager.erl | 20 +-- .../test/mg_core_continuation_retry_SUITE.erl | 4 +- .../test/mg_core_events_machine_SUITE.erl | 4 +- .../test/mg_core_events_modernizer_SUITE.erl | 4 +- .../test/mg_core_events_stash_SUITE.erl | 4 +- .../test/mg_core_instant_timer_task_SUITE.erl | 4 +- .../mg_core_internal_events_logging_SUITE.erl | 4 +- .../test/mg_core_interrupted_SUITE.erl | 4 +- apps/mg_core/test/mg_core_machine_SUITE.erl | 4 +- .../test/mg_core_machine_full_test_SUITE.erl | 4 +- .../mg_core_machine_notification_SUITE.erl | 4 +- .../test/mg_core_notification_SUITE.erl | 2 +- apps/mg_core/test/mg_core_storages_SUITE.erl | 4 +- .../test/mg_core_timer_retry_SUITE.erl | 4 +- apps/mg_core/test/mg_core_workers_SUITE.erl | 2 +- apps/mg_cth/src/mg_cth.app.src | 1 + apps/mg_cth/src/mg_cth.erl | 2 +- apps/mg_cth/src/mg_cth_pulse.erl | 4 +- apps/mg_es_kafka/src/mg_event_sink_kafka.erl | 4 +- .../mg_event_sink_kafka_prometheus_pulse.erl | 6 +- .../test/mg_event_sink_kafka_SUITE.erl | 2 +- .../test/mg_event_sink_kafka_errors_SUITE.erl | 2 +- .../src/mg_event_sink_machine.erl | 14 +- .../test/mg_event_sink_machine_SUITE.erl | 4 +- apps/mg_riak/src/mg_riak.app.src | 1 + apps/mg_riak/src/mg_riak_prometheus.erl | 2 +- apps/mg_riak/src/mg_riak_pulse.erl | 4 +- apps/mg_riak/src/mg_riak_pulse_prometheus.erl | 4 +- apps/mg_riak/src/mg_riak_storage.erl | 36 ++--- apps/mg_riak/test/mg_riak_storage_SUITE.erl | 4 +- apps/mg_scheduler/include/pulse.hrl | 32 ++--- apps/mg_scheduler/src/mg_scheduler.app.src | 1 + apps/mg_scheduler/src/mg_skd.erl | 41 ++++-- apps/mg_scheduler/src/mg_skd_pulse.erl | 25 ---- .../mg_scheduler/src/mg_skd_quota_manager.erl | 2 +- apps/mg_scheduler/src/mg_skd_quota_worker.erl | 18 +-- apps/mg_scheduler/src/mg_skd_scanner.erl | 32 ++--- apps/mg_scheduler/src/mg_skd_sup.erl | 8 +- apps/mg_scheduler/src/mg_skd_task.erl | 2 +- apps/mg_scheduler/src/mg_skd_utils.erl | 126 ------------------ apps/mg_scheduler/src/mg_skd_worker.erl | 20 +-- apps/mg_utils/src/mg_utils.app.src | 15 +++ .../src/mg_utils.erl} | 90 ++++++++++--- apps/mg_utils/src/mpulse.erl | 44 ++++++ apps/mg_woody/include/pulse.hrl | 2 +- apps/mg_woody/src/mg_woody.app.src | 1 + apps/mg_woody/src/mg_woody_automaton.erl | 4 +- apps/mg_woody/src/mg_woody_event_handler.erl | 4 +- apps/mg_woody/src/mg_woody_event_sink.erl | 2 +- apps/mg_woody/src/mg_woody_life_sink.erl | 4 +- apps/mg_woody/src/mg_woody_packer.erl | 2 +- apps/mg_woody/src/mg_woody_pulse_otel.erl | 5 +- apps/mg_woody/src/mg_woody_utils.erl | 4 +- elvis.config | 2 +- 81 files changed, 484 insertions(+), 534 deletions(-) delete mode 100644 apps/mg_core/src/mg_core_pulse.erl delete mode 100644 apps/mg_scheduler/src/mg_skd_pulse.erl delete mode 100644 apps/mg_scheduler/src/mg_skd_utils.erl create mode 100644 apps/mg_utils/src/mg_utils.app.src rename apps/{mg_core/src/mg_core_utils.erl => mg_utils/src/mg_utils.erl} (83%) create mode 100644 apps/mg_utils/src/mpulse.erl diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index 6867616b..03beff5c 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -26,6 +26,7 @@ erl_health, prometheus, prometheus_cowboy, + mg_utils, mg_core, mg_riak, mg_es_kafka, diff --git a/apps/machinegun/src/mg_configuration_utils.erl b/apps/machinegun/src/mg_configuration_utils.erl index 91bc4889..b6d8323b 100644 --- a/apps/machinegun/src/mg_configuration_utils.erl +++ b/apps/machinegun/src/mg_configuration_utils.erl @@ -327,7 +327,7 @@ proplist(Config) -> -spec ip(yaml_string()) -> inet:ip_address(). ip(Host) -> - mg_core_utils:throw_if_error(inet:parse_address(string(Host))). + mg_utils:throw_if_error(inet:parse_address(string(Host))). -spec atom(yaml_string()) -> atom(). atom(AtomStr) -> diff --git a/apps/machinegun/src/mg_pulse.erl b/apps/machinegun/src/mg_pulse.erl index 561e0b5f..1120b506 100644 --- a/apps/machinegun/src/mg_pulse.erl +++ b/apps/machinegun/src/mg_pulse.erl @@ -20,13 +20,13 @@ -include_lib("mg_es_kafka/include/pulse.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). %% pulse types -type beat() :: - mg_core_pulse:beat() + mpulse:beat() | mg_riak_pulse:beat() | mg_skd_scanner:beat() | #woody_event{} diff --git a/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl b/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl index a35f5812..c0be99db 100644 --- a/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl +++ b/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl @@ -18,8 +18,8 @@ -include_lib("mg_core/include/pulse.hrl"). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -type options() :: #{ topic := brod:topic(), diff --git a/apps/machinegun/src/mg_pulse_log.erl b/apps/machinegun/src/mg_pulse_log.erl index b9e1a366..d32a7508 100644 --- a/apps/machinegun/src/mg_pulse_log.erl +++ b/apps/machinegun/src/mg_pulse_log.erl @@ -20,8 +20,8 @@ -include_lib("mg_core/include/pulse.hrl"). -include_lib("mg_woody/include/pulse.hrl"). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -export([handle_beat/2]). diff --git a/apps/mg_conf/src/mg_conf.app.src b/apps/mg_conf/src/mg_conf.app.src index ad1826cc..b82b0e55 100644 --- a/apps/mg_conf/src/mg_conf.app.src +++ b/apps/mg_conf/src/mg_conf.app.src @@ -6,6 +6,7 @@ kernel, stdlib, genlib, + mg_utils, mg_core, mg_riak, mg_es_kafka, diff --git a/apps/mg_conf/src/mg_conf.erl b/apps/mg_conf/src/mg_conf.erl index d64c149a..8af853da 100644 --- a/apps/mg_conf/src/mg_conf.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -44,7 +44,7 @@ -type processor() :: mg_woody_processor:options(). --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). -spec construct_child_specs(config(), [woody_server_thrift_http_handler:route(any())]) -> [supervisor:child_spec()]. construct_child_specs( @@ -214,13 +214,13 @@ worker_manager_options(Config) -> maps:get(worker, Config, #{}) ). --spec processor(processor(), pulse()) -> mg_core_utils:mod_opts(). +-spec processor(processor(), pulse()) -> mg_utils:mod_opts(). processor(Processor, Pulse) -> {mg_woody_processor, Processor#{event_handler => {mg_woody_event_handler, Pulse}}}. -spec sub_storage_options(mg_core:ns(), mg_core_machine:storage_options()) -> mg_core_machine:storage_options(). sub_storage_options(SubNS, Storage0) -> - Storage1 = mg_core_utils:separate_mod_opts(Storage0, #{}), + Storage1 = mg_utils:separate_mod_opts(Storage0, #{}), Storage2 = add_bucket_postfix(SubNS, Storage1), Storage2. @@ -228,7 +228,7 @@ sub_storage_options(SubNS, Storage0) -> add_bucket_postfix(_, {mg_core_storage_memory, _} = Storage) -> Storage; add_bucket_postfix(SubNS, {mg_riak_storage, #{bucket := Bucket} = Options}) -> - {mg_riak_storage, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. + {mg_riak_storage, Options#{bucket := mg_utils:concatenate_namespaces(Bucket, SubNS)}}. -spec modernizer_options(modernizer() | undefined, pulse()) -> #{modernizer => mg_core_events_modernizer:options()}. modernizer_options(#{current_format_version := CurrentFormatVersion, handler := WoodyClient}, Pulse) -> diff --git a/apps/mg_conf/src/mg_conf_namespace_sup.erl b/apps/mg_conf/src/mg_conf_namespace_sup.erl index caef316d..b76246cc 100644 --- a/apps/mg_conf/src/mg_conf_namespace_sup.erl +++ b/apps/mg_conf/src/mg_conf_namespace_sup.erl @@ -34,7 +34,7 @@ child_spec(Namespaces, ChildID) -> type => supervisor }. --spec start_link(namespaces(), _ChildID) -> mg_core_utils:gen_start_ret(). +-spec start_link(namespaces(), _ChildID) -> mg_utils:gen_start_ret(). start_link(Namespaces, ChildID) -> {ok, SupPid} = genlib_adhoc_supervisor:start_link( #{strategy => simple_one_for_one}, @@ -49,7 +49,7 @@ start_link(Namespaces, ChildID) -> ), start_namespace_children(SupPid, Namespaces). --spec start_namespace_children(pid(), namespaces()) -> mg_core_utils:gen_start_ret(). +-spec start_namespace_children(pid(), namespaces()) -> mg_utils:gen_start_ret(). start_namespace_children(SupPid, []) -> {ok, SupPid}; start_namespace_children(SupPid, [Namespace | Rest]) -> diff --git a/apps/mg_core/include/pulse.hrl b/apps/mg_core/include/pulse.hrl index 3bc8dffc..50fd14b0 100644 --- a/apps/mg_core/include/pulse.hrl +++ b/apps/mg_core/include/pulse.hrl @@ -27,7 +27,7 @@ machine_id :: mg_core:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). %% Timer processing @@ -57,7 +57,7 @@ -record(mg_core_machine_process_transient_error, { namespace :: mg_core:ns(), machine_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), request_context :: mg_core:request_context() }). @@ -116,7 +116,7 @@ machine_id :: mg_core:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(mg_core_machine_lifecycle_repaired, { @@ -130,14 +130,14 @@ namespace :: mg_core:ns(), machine_id :: mg_core:id(), request_context :: mg_core:request_context(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(mg_core_machine_lifecycle_transient_error, { context :: atom(), namespace :: mg_core:ns(), machine_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), request_context :: mg_core:request_context(), retry_strategy :: genlib_retry:strategy(), retry_action :: {wait, timeout(), genlib_retry:strategy()} | finish @@ -162,7 +162,7 @@ namespace :: mg_core:ns(), machine_id :: mg_core:id(), notification_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), action :: delete | {reschedule, genlib_time:ts()} | ignore }). diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index 610db3dc..21d5be2a 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -27,6 +27,7 @@ msgpack, snowflake, mg_scheduler, + mg_utils, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_core/src/mg_core.erl b/apps/mg_core/src/mg_core.erl index 39de01c3..8332c2e7 100644 --- a/apps/mg_core/src/mg_core.erl +++ b/apps/mg_core/src/mg_core.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2017 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ %%% -module(mg_core). +-include_lib("mg_core/include/pulse.hrl"). + %% API -export_type([ns/0]). -export_type([id/0]). @@ -23,3 +25,44 @@ -type ns() :: binary(). -type id() :: binary(). -type request_context() :: mg_core_storage:opaque(). + +-export_type([beat/0]). +-type beat() :: + % Timer + #mg_core_timer_lifecycle_created{} + | #mg_core_timer_lifecycle_rescheduled{} + | #mg_core_timer_lifecycle_rescheduling_error{} + | #mg_core_timer_lifecycle_removed{} + % Timer handling + | #mg_core_timer_process_started{} + | #mg_core_timer_process_finished{} + % Machine process state + | #mg_core_machine_lifecycle_created{} + | #mg_core_machine_lifecycle_removed{} + | #mg_core_machine_lifecycle_loaded{} + | #mg_core_machine_lifecycle_unloaded{} + | #mg_core_machine_lifecycle_committed_suicide{} + | #mg_core_machine_lifecycle_failed{} + | #mg_core_machine_lifecycle_repaired{} + | #mg_core_machine_lifecycle_loading_error{} + | #mg_core_machine_lifecycle_transient_error{} + % Machine call handling + | #mg_core_machine_process_started{} + | #mg_core_machine_process_finished{} + | #mg_core_machine_process_transient_error{} + % Machine notification + | #mg_core_machine_notification_created{} + | #mg_core_machine_notification_delivered{} + | #mg_core_machine_notification_delivery_error{} + % Machine worker handling + | #mg_core_worker_call_attempt{} + | #mg_core_worker_start_attempt{} + % Storage calls + | #mg_core_storage_get_start{} + | #mg_core_storage_get_finish{} + | #mg_core_storage_put_start{} + | #mg_core_storage_put_finish{} + | #mg_core_storage_search_start{} + | #mg_core_storage_search_finish{} + | #mg_core_storage_delete_start{} + | #mg_core_storage_delete_finish{}. diff --git a/apps/mg_core/src/mg_core_event_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl index 5ebdd29a..18a80d05 100644 --- a/apps/mg_core/src/mg_core_event_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -29,7 +29,7 @@ %% Types --type handler(Options) :: mg_core_utils:mod_opts(Options). +-type handler(Options) :: mg_utils:mod_opts(Options). -type handler() :: handler(handler_options()). -export_type([handler/1]). @@ -48,4 +48,4 @@ add_events(_Handler, _NS, _ID, [], _ReqCtx, _Deadline) -> ok; add_events(Handler, NS, ID, Events, ReqCtx, Deadline) -> - ok = mg_core_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]). + ok = mg_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]). diff --git a/apps/mg_core/src/mg_core_events_machine.erl b/apps/mg_core/src/mg_core_events_machine.erl index 0742a1ea..b2fc8785 100644 --- a/apps/mg_core/src/mg_core_events_machine.erl +++ b/apps/mg_core/src/mg_core_events_machine.erl @@ -119,16 +119,16 @@ -type options() :: #{ namespace => mg_core:ns(), events_storage => storage_options(), - processor => mg_core_utils:mod_opts(), + processor => mg_utils:mod_opts(), machines => mg_core_machine:options(), retries => #{_Subject => genlib_retry:policy()}, - pulse => mg_core_pulse:handler(), + pulse => mpulse:handler(), event_sinks => [mg_core_event_sink:handler()], default_processing_timeout => timeout(), event_stash_size => non_neg_integer() }. % like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -spec child_spec(options(), atom()) -> supervisor:child_spec(). child_spec(Options, ChildID) -> @@ -139,11 +139,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_events_storage:child_spec(Options), mg_core_machine:child_spec(machine_options(Options), automaton) ]) @@ -207,7 +207,7 @@ call(Options, ID, Args, HRange, ReqCtx, Deadline) -> get_machine(Options, ID, HRange) -> #{state := State, status := Status} = mg_core_machine:get(machine_options(Options), ID), EffectiveState = maybe_apply_delayed_actions(opaque_to_state(State)), - _ = mg_core_utils:throw_if_undefined(EffectiveState, {logic, machine_not_found}), + _ = mg_utils:throw_if_undefined(EffectiveState, {logic, machine_not_found}), machine(Options, ID, EffectiveState, Status, HRange). -spec remove(options(), id(), request_context(), deadline()) -> ok. @@ -244,7 +244,7 @@ notify(Options, MachineID, Args, HRange, ReqCtx) -> -spec processor_child_spec(options()) -> supervisor:child_spec() | undefined. processor_child_spec(Options) -> - mg_core_utils:apply_mod_opts_if_defined( + mg_utils:apply_mod_opts_if_defined( processor_options(Options), processor_child_spec, undefined @@ -465,14 +465,14 @@ emit_action_beats(Options, ID, ReqCtx, ComplexAction) -> -spec emit_timer_action_beats(options(), mg_core:id(), request_context(), complex_action()) -> ok. emit_timer_action_beats(Options, ID, ReqCtx, #{timer := unset_timer}) -> #{namespace := NS, pulse := Pulse} = Options, - mg_core_pulse:handle_beat(Pulse, #mg_core_timer_lifecycle_removed{ + mpulse:handle_beat(Pulse, #mg_core_timer_lifecycle_removed{ namespace = NS, machine_id = ID, request_context = ReqCtx }); emit_timer_action_beats(Options, ID, ReqCtx, #{timer := {set_timer, Timer, _, _}}) -> #{namespace := NS, pulse := Pulse} = Options, - mg_core_pulse:handle_beat(Pulse, #mg_core_timer_lifecycle_created{ + mpulse:handle_beat(Pulse, #mg_core_timer_lifecycle_created{ namespace = NS, machine_id = ID, request_context = ReqCtx, @@ -487,7 +487,7 @@ emit_timer_action_beats(_Options, _ID, _ReqCtx, #{}) -> {ok, state()}. process_signal(Options = #{processor := Processor}, ReqCtx, Deadline, Signal, Machine, State) -> SignalArgs = [ReqCtx, Deadline, {Signal, Machine}], - {StateChange, ComplexAction} = mg_core_utils:apply_mod_opts( + {StateChange, ComplexAction} = mg_utils:apply_mod_opts( Processor, process_signal, SignalArgs @@ -508,7 +508,7 @@ process_signal(Options = #{processor := Processor}, ReqCtx, Deadline, Signal, Ma {_Resp, state()}. process_call(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machine, State) -> CallArgs = [ReqCtx, Deadline, {Args, Machine}], - {Resp, StateChange, ComplexAction} = mg_core_utils:apply_mod_opts( + {Resp, StateChange, ComplexAction} = mg_utils:apply_mod_opts( Processor, process_call, CallArgs @@ -529,7 +529,7 @@ process_call(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machin {ok, {_Resp, state()}} | {error, repair_error()}. process_repair(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machine, State) -> RepairArgs = [ReqCtx, Deadline, {Args, Machine}], - case mg_core_utils:apply_mod_opts(Processor, process_repair, RepairArgs) of + case mg_utils:apply_mod_opts(Processor, process_repair, RepairArgs) of {ok, {Resp, StateChange, ComplexAction}} -> #{id := ID} = Machine, NewState = handle_processing_result( @@ -628,7 +628,7 @@ timer_to_timestamp({deadline, Deadline}) -> %% --spec processor_options(options()) -> mg_core_utils:mod_opts(). +-spec processor_options(options()) -> mg_utils:mod_opts(). processor_options(Options) -> maps:get(processor, Options). diff --git a/apps/mg_core/src/mg_core_events_modernizer.erl b/apps/mg_core/src/mg_core_events_modernizer.erl index e86aa226..81e66281 100644 --- a/apps/mg_core/src/mg_core_events_modernizer.erl +++ b/apps/mg_core/src/mg_core_events_modernizer.erl @@ -26,7 +26,7 @@ -type options() :: #{ current_format_version := mg_core_events:format_version(), - handler := mg_core_utils:mod_opts(handler_opts()) + handler := mg_utils:mod_opts(handler_opts()) }. % handler specific @@ -123,4 +123,4 @@ event_to_machine_event(NS, ID, Event) -> -spec call_handler(options(), request_context(), machine_event()) -> modernized_event_body(). call_handler(#{handler := Handler}, ReqCtx, MachineEvent) -> % TODO обработка ошибок? - mg_core_utils:apply_mod_opts(Handler, modernize_event, [ReqCtx, MachineEvent]). + mg_utils:apply_mod_opts(Handler, modernize_event, [ReqCtx, MachineEvent]). diff --git a/apps/mg_core/src/mg_core_events_storage.erl b/apps/mg_core/src/mg_core_events_storage.erl index 74fb2114..da17f6bf 100644 --- a/apps/mg_core/src/mg_core_events_storage.erl +++ b/apps/mg_core/src/mg_core_events_storage.erl @@ -71,7 +71,7 @@ get_events(Options, ID, Range) -> -spec events_storage_options(mg_core_events_machine:options()) -> mg_core_storage:options(). events_storage_options(#{namespace := NS, events_storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?STORAGE_NS, events}, pulse => Handler}}. -spec events_to_kvs(mg_core:id(), [mg_core_events:event()]) -> [mg_core_storage:kv()]. diff --git a/apps/mg_core/src/mg_core_machine.erl b/apps/mg_core/src/mg_core_machine.erl index e50d5529..95b66a99 100644 --- a/apps/mg_core/src/mg_core_machine.erl +++ b/apps/mg_core/src/mg_core_machine.erl @@ -163,10 +163,10 @@ %% fixed for namespace and pulse, worker -type options() :: #{ namespace := mg_core:ns(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), storage => storage_options(), notification => mg_core_notification:options(), - processor => mg_core_utils:mod_opts(), + processor => mg_utils:mod_opts(), worker := mg_core_workers_manager:ns_options(), retries => retry_opt(), schedulers => schedulers_opt(), @@ -176,7 +176,7 @@ }. % like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -type thrown_error() :: {logic, logic_error()} | {transient, transient_error()} | {timeout, _Reason}. @@ -266,11 +266,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options = #{namespace := NS}) -> start_link(Options, {?MODULE, NS}). --spec start_link(options(), _ChildID) -> mg_core_utils:gen_start_ret(). +-spec start_link(options(), _ChildID) -> mg_utils:gen_start_ret(). start_link(Options, ChildID) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_one}, @@ -287,7 +287,7 @@ machine_sup_child_spec(Options, ChildID) -> start => {genlib_adhoc_supervisor, start_link, [ #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_storage:child_spec(storage_options(Options), storage), notification_child_spec(Options), processor_child_spec(Options), @@ -309,7 +309,7 @@ scheduler_sup_child_spec(Options, ChildID) -> intensity => 10, period => 30 }, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ scheduler_child_spec(timers, Options), scheduler_child_spec(timers_retries, Options), scheduler_child_spec(overseer, Options), @@ -380,14 +380,14 @@ resume_interrupted(Options, ID, Deadline) -> fail(Options, ID, ReqCtx, Deadline) -> fail(Options, ID, {error, explicit_fail, []}, ReqCtx, Deadline). --spec fail(options(), mg_core:id(), mg_core_utils:exception(), request_context(), deadline()) -> ok. +-spec fail(options(), mg_core:id(), mg_utils:exception(), request_context(), deadline()) -> ok. fail(Options, ID, Exception, ReqCtx, Deadline) -> call_(Options, ID, {fail, Exception}, ReqCtx, Deadline). -spec get(options(), mg_core:id()) -> storage_machine() | throws(). get(Options, ID) -> {_, StorageMachine} = - mg_core_utils:throw_if_undefined( + mg_utils:throw_if_undefined( get_storage_machine(Options, ID), {logic, machine_not_found} ), @@ -465,7 +465,7 @@ all_statuses() -> -spec call_(options(), mg_core:id(), _, maybe(request_context()), deadline()) -> _ | no_return(). call_(Options, ID, Call, ReqCtx, Deadline) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( mg_core_workers_manager:call(manager_options(Options), ID, Call, ReqCtx, Deadline) ). @@ -963,7 +963,7 @@ handle_transient_exception(_Reason, State) -> State. -spec handle_exception(Exception, ReqCtx, Deadline, state()) -> state() when - Exception :: mg_core_utils:exception(), + Exception :: mg_utils:exception(), ReqCtx :: request_context(), Deadline :: deadline(). handle_exception(Exception, ReqCtx, Deadline, State) -> @@ -1067,7 +1067,7 @@ handle_notification_processed(NotificationID, State = #{notifications_processed ) -> processor_result(). call_processor(Impact, ProcessingCtx, ReqCtx, Deadline, State) -> #{options := Options, id := ID, storage_machine := #{state := MachineState}} = State, - mg_core_utils:apply_mod_opts( + mg_utils:apply_mod_opts( get_options(processor, Options), process_machine, [ID, Impact, ProcessingCtx, ReqCtx, Deadline, MachineState] @@ -1081,7 +1081,7 @@ notification_child_spec(#{}) -> -spec processor_child_spec(options()) -> supervisor:child_spec(). processor_child_spec(Options) -> - mg_core_utils:apply_mod_opts_if_defined( + mg_utils:apply_mod_opts_if_defined( get_options(processor, Options), processor_child_spec, undefined @@ -1417,7 +1417,7 @@ manager_options(Options = #{namespace := NS, worker := ManagerOptions, pulse := -spec storage_options(options()) -> mg_core_storage:options(). storage_options(#{namespace := NS, storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?MODULE, machines}, pulse => Handler}}. -spec notification_options(options()) -> mg_core_notification:options(). @@ -1585,6 +1585,6 @@ do_with_retry(Options = #{namespace := NS}, ID, Fun, RetryStrategy, ReqCtx, Beat %% logging %% --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(#{pulse := Handler}, Beat) -> - ok = mg_core_pulse:handle_beat(Handler, Beat). + ok = mpulse:handle_beat(Handler, Beat). diff --git a/apps/mg_core/src/mg_core_notification.erl b/apps/mg_core/src/mg_core_notification.erl index d961bc7f..cf60c4bd 100644 --- a/apps/mg_core/src/mg_core_notification.erl +++ b/apps/mg_core/src/mg_core_notification.erl @@ -17,7 +17,7 @@ -type context() :: mg_core_storage:context(). -type options() :: #{ namespace := mg_core:ns(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), storage := storage_options() }. @@ -29,7 +29,7 @@ %% Internal types % FIXME like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -type ts() :: genlib_time:ts(). %% @@ -43,7 +43,7 @@ child_spec(Options, ChildID) -> start => {genlib_adhoc_supervisor, start_link, [ #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_storage:child_spec(storage_options(Options), storage) ]) ]}, @@ -124,5 +124,5 @@ data_to_opaque(#{ -spec storage_options(options()) -> mg_core_storage:options(). storage_options(#{namespace := NS, storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?MODULE, notifications}, pulse => Handler}}. diff --git a/apps/mg_core/src/mg_core_otel.erl b/apps/mg_core/src/mg_core_otel.erl index ae7b2f81..e918f8bc 100644 --- a/apps/mg_core/src/mg_core_otel.erl +++ b/apps/mg_core/src/mg_core_otel.erl @@ -97,7 +97,7 @@ add_event(Name, Attributes) -> _ = otel_span:add_event(otel_tracer:current_span_ctx(), Name, Attributes), ok. --spec record_exception(mg_core_utils:exception(), opentelemetry:attributes_map()) -> ok. +-spec record_exception(mg_utils:exception(), opentelemetry:attributes_map()) -> ok. record_exception({Class, Reason, Stacktrace}, Attributes) -> _ = otel_span:record_exception(otel_tracer:current_span_ctx(), Class, Reason, Stacktrace, Attributes), ok. diff --git a/apps/mg_core/src/mg_core_procreg.erl b/apps/mg_core/src/mg_core_procreg.erl index 71231693..007ab90d 100644 --- a/apps/mg_core/src/mg_core_procreg.erl +++ b/apps/mg_core/src/mg_core_procreg.erl @@ -20,11 +20,11 @@ -type name() :: term(). -type name_pattern() :: ets:match_pattern(). --type ref() :: mg_core_utils:gen_ref(). --type reg_name() :: mg_core_utils:gen_reg_name(). +-type ref() :: mg_utils:gen_ref(). +-type reg_name() :: mg_utils:gen_reg_name(). -type procreg_options() :: term(). --type options() :: mg_core_utils:mod_opts(procreg_options()). +-type options() :: mg_utils:mod_opts(procreg_options()). -export_type([name/0]). -export_type([name_pattern/0]). @@ -61,15 +61,15 @@ -spec ref(options(), name()) -> ref(). ref(Options, Name) -> - mg_core_utils:apply_mod_opts(Options, ref, [Name]). + mg_utils:apply_mod_opts(Options, ref, [Name]). -spec reg_name(options(), name()) -> reg_name(). reg_name(Options, Name) -> - mg_core_utils:apply_mod_opts(Options, reg_name, [Name]). + mg_utils:apply_mod_opts(Options, reg_name, [Name]). -spec select(options(), name_pattern()) -> [{name(), pid()}]. select(Options, NamePattern) -> - mg_core_utils:apply_mod_opts(Options, select, [NamePattern]). + mg_utils:apply_mod_opts(Options, select, [NamePattern]). -spec call(options(), name(), _Call) -> _Reply. call(Options, Name, Call) -> @@ -77,11 +77,11 @@ call(Options, Name, Call) -> -spec call(options(), name(), _Call, timeout()) -> _Reply. call(Options, Name, Call, Timeout) -> - mg_core_utils:apply_mod_opts(Options, call, [ref(Options, Name), Call, Timeout]). + mg_utils:apply_mod_opts(Options, call, [ref(Options, Name), Call, Timeout]). -spec start_link(options(), name(), module(), _Args, list()) -> start_link_ret(). start_link(Options, Name, Module, Args, Opts) -> - mg_core_utils:apply_mod_opts( + mg_utils:apply_mod_opts( Options, start_link, [reg_name(Options, Name), Module, Args, Opts] diff --git a/apps/mg_core/src/mg_core_pulse.erl b/apps/mg_core/src/mg_core_pulse.erl deleted file mode 100644 index d41e8e12..00000000 --- a/apps/mg_core/src/mg_core_pulse.erl +++ /dev/null @@ -1,87 +0,0 @@ -%%% -%%% Copyright 2024 Valitydev -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% --module(mg_core_pulse). - --include_lib("mg_core/include/pulse.hrl"). - --behaviour(mg_skd_pulse). - -%% API --export_type([beat/0]). --export_type([handler/0]). --export([handle_beat/2]). - --callback handle_beat(Options :: any(), beat()) -> ok. - -%% -%% API -%% --type beat() :: - mg_skd_pulse:beat() - % Timer - | #mg_core_timer_lifecycle_created{} - | #mg_core_timer_lifecycle_rescheduled{} - | #mg_core_timer_lifecycle_rescheduling_error{} - | #mg_core_timer_lifecycle_removed{} - % Timer handling - | #mg_core_timer_process_started{} - | #mg_core_timer_process_finished{} - % Machine process state - | #mg_core_machine_lifecycle_created{} - | #mg_core_machine_lifecycle_removed{} - | #mg_core_machine_lifecycle_loaded{} - | #mg_core_machine_lifecycle_unloaded{} - | #mg_core_machine_lifecycle_committed_suicide{} - | #mg_core_machine_lifecycle_failed{} - | #mg_core_machine_lifecycle_repaired{} - | #mg_core_machine_lifecycle_loading_error{} - | #mg_core_machine_lifecycle_transient_error{} - % Machine call handling - | #mg_core_machine_process_started{} - | #mg_core_machine_process_finished{} - | #mg_core_machine_process_transient_error{} - % Machine notification - | #mg_core_machine_notification_created{} - | #mg_core_machine_notification_delivered{} - | #mg_core_machine_notification_delivery_error{} - % Machine worker handling - | #mg_core_worker_call_attempt{} - | #mg_core_worker_start_attempt{} - % Storage calls - | #mg_core_storage_get_start{} - | #mg_core_storage_get_finish{} - | #mg_core_storage_put_start{} - | #mg_core_storage_put_finish{} - | #mg_core_storage_search_start{} - | #mg_core_storage_search_finish{} - | #mg_core_storage_delete_start{} - | #mg_core_storage_delete_finish{}. - --type handler() :: mg_core_utils:mod_opts() | undefined. - --spec handle_beat(handler(), any()) -> ok. -handle_beat(undefined, _Beat) -> - ok; -handle_beat(Handler, Beat) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(Handler), - try - ok = Mod:handle_beat(Options, Beat) - catch - Class:Reason:ST -> - Stacktrace = genlib_format:format_stacktrace(ST), - Msg = "Pulse handler ~p failed at beat ~p: ~p:~p ~s", - ok = logger:error(Msg, [{Mod, Options}, Beat, Class, Reason, Stacktrace]) - end. diff --git a/apps/mg_core/src/mg_core_pulse_otel.erl b/apps/mg_core/src/mg_core_pulse_otel.erl index 4d40298d..d24e0bf4 100644 --- a/apps/mg_core/src/mg_core_pulse_otel.erl +++ b/apps/mg_core/src/mg_core_pulse_otel.erl @@ -4,7 +4,7 @@ -include_lib("opentelemetry_api/include/opentelemetry.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). @@ -12,7 +12,8 @@ -type options() :: map(). -type beat() :: - mg_core_pulse:beat() + mg_core:beat() + | mg_skd:beat() | mg_skd_scanner:beat(). -export_type([options/0]). diff --git a/apps/mg_core/src/mg_core_queue_interrupted.erl b/apps/mg_core/src/mg_core_queue_interrupted.erl index 6bd32388..23642479 100644 --- a/apps/mg_core/src/mg_core_queue_interrupted.erl +++ b/apps/mg_core/src/mg_core_queue_interrupted.erl @@ -27,7 +27,7 @@ -type milliseconds() :: non_neg_integer(). -type options() :: #{ - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), min_scan_delay => milliseconds(), rescan_delay => milliseconds(), diff --git a/apps/mg_core/src/mg_core_queue_notifications.erl b/apps/mg_core/src/mg_core_queue_notifications.erl index 5e55f4d9..067cba36 100644 --- a/apps/mg_core/src/mg_core_queue_notifications.erl +++ b/apps/mg_core/src/mg_core_queue_notifications.erl @@ -35,7 +35,7 @@ -type milliseconds() :: non_neg_integer(). -type options() :: #{ scheduler_id := mg_skd:id(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), notification := mg_core_notification:options(), % how many seconds behind real time we are @@ -209,7 +209,7 @@ get_reschedule_time(Options) -> options(), mg_core:id(), mg_core_notification:id(), - mg_core_utils:exception(), + mg_utils:exception(), fail_action() ) -> ok. emit_delivery_error_beat(Options, MachineID, NotificationID, Exception, Action) -> @@ -233,6 +233,6 @@ emit_delivered_beat(Options, MachineID, NotificationID) -> notification_id = NotificationID }). --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(Options, Beat) -> - ok = mg_core_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). + ok = mpulse:handle_beat(maps:get(pulse, Options, undefined), Beat). diff --git a/apps/mg_core/src/mg_core_queue_timer.erl b/apps/mg_core/src/mg_core_queue_timer.erl index 53a693b4..eeba39ba 100644 --- a/apps/mg_core/src/mg_core_queue_timer.erl +++ b/apps/mg_core/src/mg_core_queue_timer.erl @@ -30,7 +30,7 @@ -type seconds() :: non_neg_integer(). -type milliseconds() :: non_neg_integer(). -type options() :: #{ - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), timer_queue := waiting | retrying, lookahead => seconds(), diff --git a/apps/mg_core/src/mg_core_storage.erl b/apps/mg_core/src/mg_core_storage.erl index 322326e2..85f0efaf 100644 --- a/apps/mg_core/src/mg_core_storage.erl +++ b/apps/mg_core/src/mg_core_storage.erl @@ -77,7 +77,7 @@ %% -type name() :: term(). --type opaque() :: mg_skd_utils:opaque(). +-type opaque() :: mg_utils:opaque(). -type key() :: binary(). -type value() :: opaque(). -type kv() :: {key(), value()}. @@ -104,12 +104,12 @@ -type storage_options() :: #{ name := name(), - pulse := mg_core_pulse:handler(), - sidecar => mg_core_utils:mod_opts(), + pulse := mpulse:handler(), + sidecar => mg_utils:mod_opts(), batching => batching_options(), atom() => any() }. --type options() :: mg_core_utils:mod_opts(storage_options()). +-type options() :: mg_utils:mod_opts(storage_options()). -type batching_options() :: #{ % How many storage requests may be served concurrently at most? @@ -145,12 +145,12 @@ %% --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ - mg_core_utils:apply_mod_opts_if_defined(Options, child_spec, undefined, [storage]), + mg_utils:lists_compact([ + mg_utils:apply_mod_opts_if_defined(Options, child_spec, undefined, [storage]), sidecar_child_spec(Options, sidecar) ]) ). @@ -202,7 +202,7 @@ add_batch_request(Request = {search, _}, Batch) -> -spec run_batch(options(), batch()) -> [{request(), response()}]. run_batch(Options, Batch) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), genlib_pmap:map( fun(Request) -> {Request, do_request(Options, Request)} @@ -224,10 +224,10 @@ construct_pmap_options(Options) -> -spec do_request(options(), request()) -> response(). do_request(Options, Request) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), StartTimestamp = erlang:monotonic_time(), ok = emit_beat_start(Request, StorageOptions), - Result = mg_core_utils:apply_mod_opts(Options, do_request, [Request]), + Result = mg_utils:apply_mod_opts(Options, do_request, [Request]), FinishTimestamp = erlang:monotonic_time(), Duration = FinishTimestamp - StartTimestamp, ok = emit_beat_finish(Request, StorageOptions, Duration), @@ -275,10 +275,10 @@ binary_to_opaque(Binary) -> -spec sidecar_child_spec(options(), term()) -> supervisor:child_spec() | undefined. sidecar_child_spec(Options, ChildID) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), case maps:find(sidecar, StorageOptions) of {ok, Sidecar} -> - mg_core_utils:apply_mod_opts(Sidecar, child_spec, [Options, ChildID]); + mg_utils:apply_mod_opts(Sidecar, child_spec, [Options, ChildID]); error -> undefined end. @@ -289,40 +289,40 @@ sidecar_child_spec(Options, ChildID) -> -spec emit_beat_start(mg_core_storage:request(), storage_options()) -> ok. emit_beat_start({get, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_get_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_get_start{ name = Name }); emit_beat_start({put, _, _, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_put_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_put_start{ name = Name }); emit_beat_start({search, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_search_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_search_start{ name = Name }); emit_beat_start({delete, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_delete_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_delete_start{ name = Name }). -spec emit_beat_finish(mg_core_storage:request(), storage_options(), duration()) -> ok. emit_beat_finish({get, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_get_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_get_finish{ name = Name, duration = Duration }); emit_beat_finish({put, _, _, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_put_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_put_finish{ name = Name, duration = Duration }); emit_beat_finish({search, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_search_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_search_finish{ name = Name, duration = Duration }); emit_beat_finish({delete, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_delete_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_delete_finish{ name = Name, duration = Duration }). diff --git a/apps/mg_core/src/mg_core_storage_memory.erl b/apps/mg_core/src/mg_core_storage_memory.erl index 0c704da5..656634cd 100644 --- a/apps/mg_core/src/mg_core_storage_memory.erl +++ b/apps/mg_core/src/mg_core_storage_memory.erl @@ -36,7 +36,7 @@ %% %% internal API %% --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> gen_server:start_link(reg_name(get_name(Options)), ?MODULE, Options, []). @@ -48,7 +48,7 @@ start_link(Options) -> undefined | #{ name := name(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), existing_storage_name => name(), random_transient_fail => random_fail_policy() }. @@ -92,7 +92,7 @@ get_name(#{name := Name}) -> -type search_result() :: [{mg_core_storage:index_value(), mg_core_storage:key()}] | [mg_core_storage:key()]. --spec init(options()) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(options()) -> mg_utils:gen_server_init_ret(state()). init(Options) -> {ok, #{ options => Options, @@ -100,8 +100,8 @@ init(Options) -> indexes => #{} }}. --spec handle_call(_Call, mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()) | no_return(). +-spec handle_call(_Call, mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()) | no_return(). handle_call({put, Key, Context, Value, IndexesUpdates}, _From, State) -> {Resp, NewState} = do_put(Key, Context, Value, IndexesUpdates, State), {reply, Resp, NewState}; @@ -131,7 +131,7 @@ handle_info(Info, State) -> _ = erlang:exit({'unexpected info received', Info}), {noreply, State}. --spec code_change(_, state(), _) -> mg_core_utils:gen_server_code_change_ret(state()). +-spec code_change(_, state(), _) -> mg_utils:gen_server_code_change_ret(state()). code_change(_, State, _) -> {ok, State}. @@ -380,11 +380,11 @@ start_from_elem(Item, [_ | Tail]) -> %% Registry utils --spec ref(name()) -> mg_core_utils:gen_ref(). +-spec ref(name()) -> mg_utils:gen_ref(). ref(Name) -> {via, gproc, gproc_key(Name)}. --spec reg_name(name()) -> mg_core_utils:gen_reg_name(). +-spec reg_name(name()) -> mg_utils:gen_reg_name(). reg_name(Name) -> {via, gproc, gproc_key(Name)}. diff --git a/apps/mg_core/src/mg_core_worker.erl b/apps/mg_core/src/mg_core_worker.erl index 794d8f03..b30fab88 100644 --- a/apps/mg_core/src/mg_core_worker.erl +++ b/apps/mg_core/src/mg_core_worker.erl @@ -48,7 +48,7 @@ {{reply, _Reply} | noreply, _State}. -type options() :: #{ - worker => mg_core_utils:mod_opts(), + worker => mg_utils:mod_opts(), registry => mg_core_procreg:options(), hibernate_timeout => pos_integer(), unload_timeout => pos_integer(), @@ -64,7 +64,7 @@ -type call_msg() :: {call, mg_core_deadline:deadline(), call_payload(), req_ctx()}. --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). -define(WRAP_ID(NS, ID), {?MODULE, {NS, ID}}). -define(DEFAULT_SHUTDOWN, brutal_kill). @@ -78,7 +78,7 @@ child_spec(ChildID, Options) -> shutdown => shutdown_timeout(Options, ?DEFAULT_SHUTDOWN) }. --spec start_link(options(), mg_core:ns(), mg_core:id(), req_ctx()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options(), mg_core:ns(), mg_core:id(), req_ctx()) -> mg_utils:gen_start_ret(). start_link(Options, NS, ID, ReqCtx) -> mg_core_procreg:start_link( procreg_options(Options), @@ -98,7 +98,7 @@ start_link(Options, NS, ID, ReqCtx) -> pulse() ) -> _Result | {error, _}. call(Options, NS, ID, Call, ReqCtx, Deadline, Pulse) -> - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_worker_call_attempt{ + ok = mpulse:handle_beat(Pulse, #mg_core_worker_call_attempt{ namespace = NS, machine_id = ID, request_context = ReqCtx, @@ -114,7 +114,7 @@ call(Options, NS, ID, Call, ReqCtx, Deadline, Pulse) -> %% for testing -spec brutal_kill(options(), mg_core:ns(), mg_core:id()) -> ok. brutal_kill(Options, NS, ID) -> - case mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)) of + case mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)) of undefined -> ok; Pid -> @@ -130,8 +130,8 @@ reply(CallCtx, Reply) -> -spec get_call_queue(options(), mg_core:ns(), mg_core:id()) -> [_Call]. get_call_queue(Options, NS, ID) -> - Pid = mg_core_utils:exit_if_undefined( - mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), + Pid = mg_utils:exit_if_undefined( + mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), noproc ), [Call || {'$gen_call', _, {call, _Deadline, Call, _ReqCtx}} <- get_call_messages(Pid)]. @@ -143,7 +143,7 @@ get_call_messages(Pid) -> -spec is_alive(options(), mg_core:ns(), mg_core:id()) -> boolean(). is_alive(Options, NS, ID) -> - Pid = mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), + Pid = mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), Pid =/= undefined andalso erlang:is_process_alive(Pid). % TODO nonuniform interface @@ -167,12 +167,12 @@ list(Procreg, NS) -> unload_timeout => timeout() }. --spec init(_) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(_) -> mg_utils:gen_server_init_ret(state()). init({ID, Options = #{worker := WorkerModOpts}, ReqCtx}) -> _ = process_flag(trap_exit, true), HibernateTimeout = maps:get(hibernate_timeout, Options, 5 * 1000), UnloadTimeout = maps:get(unload_timeout, Options, 60 * 1000), - {Mod, Args} = mg_core_utils:separate_mod_opts(WorkerModOpts), + {Mod, Args} = mg_utils:separate_mod_opts(WorkerModOpts), State = #{ id => ID, mod => Mod, @@ -183,8 +183,8 @@ init({ID, Options = #{worker := WorkerModOpts}, ReqCtx}) -> }, {ok, schedule_unload_timer(State)}. --spec handle_call(call_msg(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(call_msg(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). % загрузка делается отдельно и лениво, чтобы не блокировать этим супервизор, % т.к. у него легко может начать расти очередь @@ -225,12 +225,12 @@ handle_call(Call, From, State) -> ok = logger:error("unexpected gen_server call received: ~p from ~p", [Call, From]), {noreply, State, hibernate_timeout(State)}. --spec handle_cast(_Cast, state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(_Cast, state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast(Cast, State) -> ok = logger:error("unexpected gen_server cast received: ~p", [Cast]), {noreply, State, hibernate_timeout(State)}. --spec handle_info(_Info, state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(_Info, state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info(timeout, State) -> {noreply, State, hibernate}; handle_info( @@ -251,7 +251,7 @@ handle_info(Info, State) -> ok = logger:error("unexpected gen_server info ~p", [Info]), {noreply, State, hibernate_timeout(State)}. --spec code_change(_, state(), _) -> mg_core_utils:gen_server_code_change_ret(state()). +-spec code_change(_, state(), _) -> mg_utils:gen_server_code_change_ret(state()). code_change(_, State, _) -> {ok, State}. diff --git a/apps/mg_core/src/mg_core_workers_manager.erl b/apps/mg_core/src/mg_core_workers_manager.erl index aeaa1a8f..fd25dc68 100644 --- a/apps/mg_core/src/mg_core_workers_manager.erl +++ b/apps/mg_core/src/mg_core_workers_manager.erl @@ -46,12 +46,12 @@ %% fixed for name and pulse, registry and worker_options -type options() :: #{ name := name(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), registry := mg_core_procreg:options(), message_queue_len_limit => queue_limit(), % all but `registry` worker_options := mg_core_worker:options(), - sidecar => mg_core_utils:mod_opts() + sidecar => mg_utils:mod_opts() }. -type queue_limit() :: non_neg_integer(). @@ -60,14 +60,14 @@ message_queue_len_limit => queue_limit(), % all but `registry` worker_options => mg_core_worker:options(), - sidecar => mg_core_utils:mod_opts() + sidecar => mg_utils:mod_opts() }. %% Internal types -type id() :: mg_core:id(). -type name() :: mg_core:ns(). -type req_ctx() :: mg_core:request_context(). --type gen_ref() :: mg_core_utils:gen_ref(). +-type gen_ref() :: mg_utils:gen_ref(). -type maybe(T) :: T | undefined. -type deadline() :: mg_core_deadline:deadline(). @@ -87,11 +87,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ manager_child_spec(Options), sidecar_child_spec(Options) ]) @@ -112,7 +112,7 @@ manager_child_spec(Options) -> -spec sidecar_child_spec(options()) -> supervisor:child_spec() | undefined. sidecar_child_spec(#{sidecar := Sidecar} = Options) -> - mg_core_utils:apply_mod_opts(Sidecar, child_spec, [Options, sidecar]); + mg_utils:apply_mod_opts(Sidecar, child_spec, [Options, sidecar]); sidecar_child_spec(#{}) -> undefined. @@ -219,8 +219,8 @@ start_child(Options, ID, ReqCtx) -> SelfRef = self_ref(Options), #{name := Name, pulse := Pulse} = Options, MsgQueueLimit = message_queue_len_limit(Options), - MsgQueueLen = mg_core_utils:msg_queue_len(SelfRef), - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_worker_start_attempt{ + MsgQueueLen = mg_utils:msg_queue_len(SelfRef), + ok = mpulse:handle_beat(Pulse, #mg_core_worker_start_attempt{ namespace = Name, machine_id = ID, request_context = ReqCtx, @@ -251,7 +251,7 @@ message_queue_len_limit(Options) -> self_ref(Options) -> {via, gproc, gproc_key(Options)}. --spec self_reg_name(options()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(options()) -> mg_utils:gen_reg_name(). self_reg_name(Options) -> {via, gproc, gproc_key(Options)}. diff --git a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl index 05e7d0b3..a1e59d43 100644 --- a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl @@ -119,7 +119,7 @@ increment_fail_count() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -144,6 +144,6 @@ automaton_options() -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_events_machine_SUITE.erl b/apps/mg_core/test/mg_core_events_machine_SUITE.erl index 1be41886..06fa9669 100644 --- a/apps/mg_core/test/mg_core_events_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_machine_SUITE.erl @@ -398,7 +398,7 @@ start_automaton(ProcessorOptions, NS) -> -spec start_automaton(mg_core_events_machine:options()) -> {pid(), mg_core_events_machine:options()}. start_automaton(Options) -> - {mg_core_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. + {mg_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -570,7 +570,7 @@ decode(Value) -> -include("pulse.hrl"). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat = #mg_core_machine_lifecycle_failed{}) -> ct:pal("~p", [Beat]); handle_beat(_, Beat = #mg_core_machine_lifecycle_transient_error{}) -> diff --git a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl index d241b726..ff70cee6 100644 --- a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl @@ -175,7 +175,7 @@ start_automaton(ProcessorOptions, NS) -> -spec start_automaton(mg_core_events_machine:options()) -> {pid(), mg_core_events_machine:options()}. start_automaton(Options) -> - {mg_core_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. + {mg_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -281,7 +281,7 @@ decode(Value) -> -include("pulse.hrl"). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat = #mg_core_machine_lifecycle_failed{}) -> ct:pal("~p", [Beat]); handle_beat(_, Beat = #mg_core_machine_lifecycle_transient_error{}) -> diff --git a/apps/mg_core/test/mg_core_events_stash_SUITE.erl b/apps/mg_core/test/mg_core_events_stash_SUITE.erl index 0cf1dc47..06435c04 100644 --- a/apps/mg_core/test/mg_core_events_stash_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_stash_SUITE.erl @@ -211,7 +211,7 @@ dummy_sink_handler(_Events) -> %% Pulse handler --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). @@ -219,7 +219,7 @@ handle_beat(_, Beat) -> -spec start_automaton(options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( mg_core_events_machine:start_link(events_machine_options(Options)) ). diff --git a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl index 8f46a688..2b8786c8 100644 --- a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl +++ b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl @@ -187,7 +187,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -239,6 +239,6 @@ automaton_options_wo_shedulers(NS) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl index d91407c3..ddbd98b5 100644 --- a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl +++ b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl @@ -113,7 +113,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -145,7 +145,7 @@ automaton_options(NS) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> no_return(). +-spec handle_beat(_, mpulse:beat()) -> no_return(). handle_beat(_, _Event) -> erlang:error(logging_oops). diff --git a/apps/mg_core/test/mg_core_interrupted_SUITE.erl b/apps/mg_core/test/mg_core_interrupted_SUITE.erl index 4ebbcbbf..3db76725 100644 --- a/apps/mg_core/test/mg_core_interrupted_SUITE.erl +++ b/apps/mg_core/test/mg_core_interrupted_SUITE.erl @@ -147,7 +147,7 @@ process_machine(_, _, {call, answer}, _, ?REQ_CTX, _, State) -> %% -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -182,7 +182,7 @@ automaton_options(NS, StorageName) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, {squad, _}) -> ok; handle_beat(_, Beat) -> diff --git a/apps/mg_core/test/mg_core_machine_SUITE.erl b/apps/mg_core/test/mg_core_machine_SUITE.erl index e7f0253c..871a10b2 100644 --- a/apps/mg_core/test/mg_core_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_SUITE.erl @@ -218,7 +218,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -249,6 +249,6 @@ automaton_options(C) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl index 94859fcc..cd89286a 100644 --- a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl @@ -294,7 +294,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -324,7 +324,7 @@ automaton_options() -> lists_random(List) -> lists:nth(rand:uniform(length(List)), List). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(Options, Beat) -> ok = mg_core_pulse_otel:handle_beat(Options, Beat), %% NOTE для отладки может понадобится diff --git a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl index 3c539813..f07efe32 100644 --- a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl @@ -279,7 +279,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -320,6 +320,6 @@ notification_options() -> storage => mg_core_storage_memory }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_notification_SUITE.erl b/apps/mg_core/test/mg_core_notification_SUITE.erl index 421116ae..007a2b6d 100644 --- a/apps/mg_core/test/mg_core_notification_SUITE.erl +++ b/apps/mg_core/test/mg_core_notification_SUITE.erl @@ -191,6 +191,6 @@ pass_saved_cfg(C) -> get_cfg(Key, C) -> test_server:lookup_config(Key, C). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_storages_SUITE.erl b/apps/mg_core/test/mg_core_storages_SUITE.erl index d7f71c00..04e66738 100644 --- a/apps/mg_core/test/mg_core_storages_SUITE.erl +++ b/apps/mg_core/test/mg_core_storages_SUITE.erl @@ -360,7 +360,7 @@ storage_options(memory, _) -> -spec start_storage(mg_core_storage:options()) -> pid(). start_storage(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, [mg_core_storage:child_spec(Options, storage)] @@ -372,6 +372,6 @@ stop_storage(Pid) -> ok = proc_lib:stop(Pid, normal, 5000), ok. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl index 82ffb634..78267091 100644 --- a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl @@ -183,7 +183,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -219,7 +219,7 @@ automaton_options(NS, RetryPolicy) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_workers_SUITE.erl b/apps/mg_core/test/mg_core_workers_SUITE.erl index 5e739650..1f68e605 100644 --- a/apps/mg_core/test/mg_core_workers_SUITE.erl +++ b/apps/mg_core/test/mg_core_workers_SUITE.erl @@ -547,7 +547,7 @@ try_unlink(#{}) -> %% -spec start_workers(_Options) -> pid(). start_workers(Options) -> - mg_core_utils:throw_if_error(mg_core_workers_manager:start_link(Options)). + mg_utils:throw_if_error(mg_core_workers_manager:start_link(Options)). -spec stop_workers(pid()) -> ok. stop_workers(Pid) -> diff --git a/apps/mg_cth/src/mg_cth.app.src b/apps/mg_cth/src/mg_cth.app.src index 0d65a4a2..34f44b09 100644 --- a/apps/mg_cth/src/mg_cth.app.src +++ b/apps/mg_cth/src/mg_cth.app.src @@ -6,6 +6,7 @@ kernel, stdlib, genlib, + mg_utils, mg_core, mg_es_machine, mg_es_kafka, diff --git a/apps/mg_cth/src/mg_cth.erl b/apps/mg_cth/src/mg_cth.erl index b27e8aac..3e36a08c 100644 --- a/apps/mg_cth/src/mg_cth.erl +++ b/apps/mg_cth/src/mg_cth.erl @@ -132,7 +132,7 @@ assert_wait_expected(Expected, Fun, Strategy) when is_function(Fun, 0) -> end end. --spec build_storage(mg_core:ns(), mg_core_utils:mod_opts()) -> mg_core_utils:mod_opts(). +-spec build_storage(mg_core:ns(), mg_utils:mod_opts()) -> mg_utils:mod_opts(). build_storage(NS, Module) when is_atom(Module) -> build_storage(NS, {Module, #{}}); build_storage(NS, {Module, Options}) -> diff --git a/apps/mg_cth/src/mg_cth_pulse.erl b/apps/mg_cth/src/mg_cth_pulse.erl index f0ed2789..36998506 100644 --- a/apps/mg_cth/src/mg_cth_pulse.erl +++ b/apps/mg_cth/src/mg_cth_pulse.erl @@ -16,8 +16,8 @@ -module(mg_cth_pulse). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -export([handle_beat/2]). %% diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl index 8ba0c1da..42f3fb29 100644 --- a/apps/mg_es_kafka/src/mg_event_sink_kafka.erl +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl @@ -28,7 +28,7 @@ name := atom(), topic := brod:topic(), client := brod:client(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), encoder := encoder() }. @@ -53,7 +53,7 @@ add_events(Options, NS, MachineID, Events, ReqCtx, Deadline) -> EncodeTimestamp = erlang:monotonic_time(), {ok, Partition, Offset} = produce(Client, Topic, event_key(NS, MachineID), Batch), FinishTimestamp = erlang:monotonic_time(), - ok = mg_core_pulse:handle_beat(Pulse, #mg_event_sink_kafka_sent{ + ok = mpulse:handle_beat(Pulse, #mg_event_sink_kafka_sent{ name = Name, namespace = NS, machine_id = MachineID, diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl index feadea91..83405f39 100644 --- a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl @@ -4,12 +4,12 @@ -export([setup/0]). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -export([handle_beat/2]). %% internal types --type beat() :: #mg_event_sink_kafka_sent{} | mg_core_pulse:beat(). +-type beat() :: #mg_event_sink_kafka_sent{} | mg_core:beat(). -type options() :: #{}. -type metric_name() :: prometheus_metric:name(). -type metric_label_value() :: term(). diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl index 2082985d..e7efa13c 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl @@ -150,7 +150,7 @@ event_sink_options() -> end }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl index ac6f3899..2ce29c05 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl @@ -299,7 +299,7 @@ event_sink_options() -> end }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_es_machine/src/mg_event_sink_machine.erl b/apps/mg_es_machine/src/mg_event_sink_machine.erl index 3a760b50..3d379826 100644 --- a/apps/mg_es_machine/src/mg_event_sink_machine.erl +++ b/apps/mg_es_machine/src/mg_event_sink_machine.erl @@ -49,7 +49,7 @@ machine_id := mg_core:id(), storage := storage_options(), worker := mg_core_workers_manager:ns_options(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), events_storage := mg_core_storage:options(), default_processing_timeout := timeout() }. @@ -57,12 +57,12 @@ namespace := mg_core:ns(), storage := storage_options(), worker := mg_core_workers_manager:ns_options(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), events_storage := storage_options(), default_processing_timeout := timeout() }. % like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -spec child_spec(ns_options(), atom()) -> supervisor:child_spec(). child_spec(Options, ChildID) -> @@ -73,11 +73,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(ns_options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(ns_options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_machine:child_spec(machine_options(Options), automaton), mg_core_storage:child_spec(events_storage_options(Options), events_storage) ]) @@ -226,7 +226,7 @@ machine_options( } ) -> #{ - namespace => mg_core_utils:concatenate_namespaces(Namespace, <<"machines">>), + namespace => mg_utils:concatenate_namespaces(Namespace, <<"machines">>), processor => {?MODULE, Options}, storage => Storage, worker => Worker, @@ -235,7 +235,7 @@ machine_options( -spec events_storage_options(ns_options()) -> mg_core_storage:options(). events_storage_options(#{namespace := NS, events_storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?MODULE, events}, pulse => Handler}}. %% diff --git a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl index ddac1ce4..e462a0bc 100644 --- a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl +++ b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl @@ -136,7 +136,7 @@ get_history(_C) -> -spec start_event_sink(mg_event_sink_machine:ns_options()) -> pid(). start_event_sink(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, [mg_event_sink_machine:child_spec(Options, event_sink)] @@ -164,6 +164,6 @@ event_sink_options() -> machine_id => ?ES_ID }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_riak/src/mg_riak.app.src b/apps/mg_riak/src/mg_riak.app.src index da0470ac..d6219edf 100644 --- a/apps/mg_riak/src/mg_riak.app.src +++ b/apps/mg_riak/src/mg_riak.app.src @@ -24,6 +24,7 @@ gproc, genlib, prometheus, + mg_utils, mg_core, riakc, pooler, diff --git a/apps/mg_riak/src/mg_riak_prometheus.erl b/apps/mg_riak/src/mg_riak_prometheus.erl index bea49a0e..1e163077 100644 --- a/apps/mg_riak/src/mg_riak_prometheus.erl +++ b/apps/mg_riak/src/mg_riak_prometheus.erl @@ -37,7 +37,7 @@ child_spec(_Options, Storage, ChildID) -> -spec init(mg_core_storage:options()) -> genlib_gen:supervisor_ret(). init(Storage) -> - {mg_riak_storage, StorageOptions} = mg_core_utils:separate_mod_opts(Storage), + {mg_riak_storage, StorageOptions} = mg_utils:separate_mod_opts(Storage), true = gproc:add_local_property(?PROPNAME, StorageOptions), % NOTE % We only care about keeping gproc property live through this supervisor process. diff --git a/apps/mg_riak/src/mg_riak_pulse.erl b/apps/mg_riak/src/mg_riak_pulse.erl index a4636dd2..80438a34 100644 --- a/apps/mg_riak/src/mg_riak_pulse.erl +++ b/apps/mg_riak/src/mg_riak_pulse.erl @@ -2,6 +2,8 @@ -include_lib("mg_riak/include/pulse.hrl"). +-behaviour(mpulse). + %% API -export_type([beat/0]). -export([handle_beat/2]). @@ -24,6 +26,6 @@ | #mg_riak_connection_pool_connection_killed{} | #mg_riak_connection_pool_error{}. --spec handle_beat(any(), beat() | _OtherBeat) -> ok. +-spec handle_beat(any(), beat() | mpulse:beat()) -> ok. handle_beat(_Options, _Beat) -> ok. diff --git a/apps/mg_riak/src/mg_riak_pulse_prometheus.erl b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl index aa9bead3..ce3287b5 100644 --- a/apps/mg_riak/src/mg_riak_pulse_prometheus.erl +++ b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl @@ -19,6 +19,8 @@ -include_lib("mg_riak/include/pulse.hrl"). -export([setup/0]). + +-behaviour(mpulse). -export([handle_beat/2]). %% internal types @@ -31,7 +33,7 @@ %% mg_pulse handler %% --spec handle_beat(options(), beat() | _OtherBeat) -> ok. +-spec handle_beat(options(), beat() | mpulse:beat()) -> ok. handle_beat(_Options, Beat) -> ok = dispatch_metrics(Beat). diff --git a/apps/mg_riak/src/mg_riak_storage.erl b/apps/mg_riak/src/mg_riak_storage.erl index 9ddfcf3d..c2e573f7 100644 --- a/apps/mg_riak/src/mg_riak_storage.erl +++ b/apps/mg_riak/src/mg_riak_storage.erl @@ -73,7 +73,7 @@ port := inet:port_number(), bucket := bucket(), pool_options := pool_options(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), resolve_timeout => timeout(), connect_timeout => timeout(), request_timeout => timeout(), @@ -96,7 +96,7 @@ {return_terms, boolean()} | {term_regex, binary()}. -type range_index_opts() :: [index_opt() | range_index_opt()]. --type client_ref() :: mg_core_utils:gen_ref(). +-type client_ref() :: mg_utils:gen_ref(). %% See https://github.com/seth/pooler/blob/master/src/pooler_config.erl for pool option details -type pool_options() :: #{ @@ -111,7 +111,7 @@ -type pool_name() :: atom(). -type pulse_options() :: #{ name := mg_core_storage:name(), - pulse := mg_core_pulse:handler() + pulse := mpulse:handler() }. %% Duration is measured in native units @@ -146,12 +146,12 @@ pool_utilization(Options) -> %% internal API %% --spec start_client(options()) -> mg_core_utils:gen_start_ret(). +-spec start_client(options()) -> mg_utils:gen_start_ret(). start_client(#{port := Port} = Options) -> IP = get_riak_addr(Options), riakc_pb_socket:start_link(IP, Port, [{connect_timeout, get_option(connect_timeout, Options)}]). --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> PoolName = construct_pool_name(Options), PoolConfig = pooler_config:list_to_pool(make_pool_config(PoolName, Options)), @@ -597,7 +597,7 @@ pulse_options(PoolNameString) -> -spec update_or_create([binary()], number(), pooler_metric_type(), []) -> ok. update_or_create([<<"pooler">>, PoolNameString, <<"error_no_members_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, #mg_riak_connection_pool_state_reached{ name = Name, @@ -606,7 +606,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"error_no_members_count">>], _ ); update_or_create([<<"pooler">>, PoolNameString, <<"queue_max_reached">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, #mg_riak_connection_pool_state_reached{ name = Name, @@ -615,7 +615,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"queue_max_reached">>], _, _Co ); update_or_create([<<"pooler">>, PoolNameString, <<"starting_member_timeout">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, #mg_riak_connection_pool_error{ name = Name, @@ -624,7 +624,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"starting_member_timeout">>], ); update_or_create([<<"pooler">>, PoolNameString, <<"killed_free_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, #mg_riak_connection_pool_connection_killed{ name = Name, @@ -633,7 +633,7 @@ update_or_create([<<"pooler">>, PoolNameString, <<"killed_free_count">>], _, _Co ); update_or_create([<<"pooler">>, PoolNameString, <<"killed_in_use_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, #mg_riak_connection_pool_connection_killed{ name = Name, @@ -649,40 +649,40 @@ update_or_create(_MetricKey, _Value, _Type, []) -> -spec emit_beat_start(mg_core_storage:request(), options()) -> ok. emit_beat_start({get, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_get_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_get_start{ name = Name }); emit_beat_start({put, _, _, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_put_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_put_start{ name = Name }); emit_beat_start({search, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_search_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_search_start{ name = Name }); emit_beat_start({delete, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_delete_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_delete_start{ name = Name }). -spec emit_beat_finish(mg_core_storage:request(), options(), duration()) -> ok. emit_beat_finish({get, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_get_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_get_finish{ name = Name, duration = Duration }); emit_beat_finish({put, _, _, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_put_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_put_finish{ name = Name, duration = Duration }); emit_beat_finish({search, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_search_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_search_finish{ name = Name, duration = Duration }); emit_beat_finish({delete, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_riak_client_delete_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_delete_finish{ name = Name, duration = Duration }). diff --git a/apps/mg_riak/test/mg_riak_storage_SUITE.erl b/apps/mg_riak/test/mg_riak_storage_SUITE.erl index dc43be82..00156a29 100644 --- a/apps/mg_riak/test/mg_riak_storage_SUITE.erl +++ b/apps/mg_riak/test/mg_riak_storage_SUITE.erl @@ -497,7 +497,7 @@ riak_options(Namespace, PoolOptions) -> -spec start_storage(mg_core_storage:options()) -> pid(). start_storage(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, [mg_core_storage:child_spec(Options, storage)] @@ -509,6 +509,6 @@ stop_storage(Pid) -> ok = proc_lib:stop(Pid, normal, 5000), ok. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_scheduler/include/pulse.hrl b/apps/mg_scheduler/include/pulse.hrl index fee46873..d30be2c4 100644 --- a/apps/mg_scheduler/include/pulse.hrl +++ b/apps/mg_scheduler/include/pulse.hrl @@ -1,7 +1,7 @@ %% Scheduler -record(mg_skd_search_success, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), delay :: mg_skd_scanner:scan_delay(), tasks :: [mg_skd_task:task()], @@ -11,50 +11,50 @@ }). -record(mg_skd_search_error, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), - exception :: mg_skd_utils:exception() + exception :: mg_utils:exception() }). -record(mg_skd_task_error, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), - exception :: mg_skd_utils:exception(), - machine_id :: mg_skd_utils:id() | undefined + exception :: mg_utils:exception(), + machine_id :: mg_utils:id() | undefined }). -record(mg_skd_task_add_error, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), - exception :: mg_skd_utils:exception(), - machine_id :: mg_skd_utils:id(), - request_context :: mg_skd_utils:request_context() + exception :: mg_utils:exception(), + machine_id :: mg_utils:id(), + request_context :: mg_utils:request_context() }). -record(mg_skd_new_tasks, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), new_tasks_count :: non_neg_integer() }). -record(mg_skd_task_started, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), - machine_id :: mg_skd_utils:id() | undefined, + machine_id :: mg_utils:id() | undefined, task_delay :: timeout() }). -record(mg_skd_task_finished, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), - machine_id :: mg_skd_utils:id() | undefined, + machine_id :: mg_utils:id() | undefined, task_delay :: timeout(), % in native units process_duration :: non_neg_integer() }). -record(mg_skd_quota_reserved, { - namespace :: mg_skd_utils:ns(), + namespace :: mg_utils:ns(), scheduler_name :: mg_skd:name(), active_tasks :: non_neg_integer(), waiting_tasks :: non_neg_integer(), diff --git a/apps/mg_scheduler/src/mg_scheduler.app.src b/apps/mg_scheduler/src/mg_scheduler.app.src index a3daa1b9..133d0547 100644 --- a/apps/mg_scheduler/src/mg_scheduler.app.src +++ b/apps/mg_scheduler/src/mg_scheduler.app.src @@ -8,6 +8,7 @@ genlib, gproc, gen_squad, + mg_utils, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_scheduler/src/mg_skd.erl b/apps/mg_scheduler/src/mg_skd.erl index 17233ea3..6ddbc649 100644 --- a/apps/mg_scheduler/src/mg_skd.erl +++ b/apps/mg_scheduler/src/mg_skd.erl @@ -16,6 +16,8 @@ -module(mg_skd). +-include_lib("mg_scheduler/include/pulse.hrl"). + -export([child_spec/3]). -export([start_link/2]). @@ -30,17 +32,31 @@ -export([handle_cast/2]). -export([handle_call/3]). +%% Beats +-type beat() :: + % Scheduler handling + #mg_skd_task_add_error{} + | #mg_skd_search_success{} + | #mg_skd_search_error{} + | #mg_skd_task_error{} + | #mg_skd_new_tasks{} + | #mg_skd_task_started{} + | #mg_skd_task_finished{} + | #mg_skd_quota_reserved{}. + +-export_type([beat/0]). + %% Types -type options() :: #{ start_interval => non_neg_integer(), capacity := non_neg_integer(), quota_name := mg_skd_quota_worker:name(), quota_share => mg_skd_quota:share(), - pulse => mg_skd_pulse:handler() + pulse => mpulse:handler() }. -type name() :: atom(). --type id() :: {name(), mg_skd_utils:ns()}. +-type id() :: {name(), mg_utils:ns()}. -type task_id() :: mg_skd_task:id(). -type task() :: mg_skd_task:task(). @@ -54,7 +70,7 @@ %% Internal types -record(state, { id :: id(), - pulse :: mg_skd_pulse:handler(), + pulse :: mpulse:handler(), capacity :: non_neg_integer(), quota_name :: mg_skd_quota_worker:name(), quota_share :: mg_skd_quota:share(), @@ -99,7 +115,7 @@ child_spec(ID, Options, ChildID) -> type => worker }. --spec start_link(id(), options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_utils:gen_start_ret(). start_link(ID, Options) -> gen_server:start_link(self_reg_name(ID), ?MODULE, {ID, Options}, []). @@ -117,7 +133,7 @@ distribute_tasks(Pid, Tasks) when is_pid(Pid) -> %% gen_server callbacks --spec init({id(), options()}) -> mg_skd_utils:gen_server_init_ret(state()). +-spec init({id(), options()}) -> mg_utils:gen_server_init_ret(state()). init({ID, Options}) -> {ok, TimerRef} = timer:send_interval(maps:get(start_interval, Options, 1000), start), {ok, #state{ @@ -133,8 +149,8 @@ init({ID, Options}) -> timer = TimerRef }}. --spec handle_call(Call :: any(), mg_skd_utils:gen_server_from(), state()) -> - mg_skd_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). handle_call(inquire, _From, State) -> Status = #{ pid => self(), @@ -150,7 +166,7 @@ handle_call(Call, From, State) -> -type cast() :: {tasks, [task()]}. --spec handle_cast(cast(), state()) -> mg_skd_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(cast(), state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast({tasks, Tasks}, State0) -> State1 = add_tasks(Tasks, State0), State2 = maybe_update_reserved(State1), @@ -164,7 +180,7 @@ handle_cast(Cast, State) -> {'DOWN', monitor(), process, pid(), _Info} | start. --spec handle_info(info(), state()) -> mg_skd_utils:gen_server_handle_info_ret(state()). +-spec handle_info(info(), state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info({'DOWN', Monitor, process, _Object, _Info}, State0) -> State1 = forget_about_task(Monitor, State0), State2 = start_new_tasks(State1), @@ -181,6 +197,7 @@ handle_info(Info, State) -> -spec self_reg_name(id()) -> mg_skd_procreg:reg_name(). self_reg_name(ID) -> + %% TODO Decouple `mg_core'/`mg_skd' with `mg_procreg'. mg_skd_procreg:reg_name(mg_skd_procreg_gproc, {?MODULE, ID}). -spec self_ref(id()) -> mg_skd_procreg:ref(). @@ -337,11 +354,9 @@ get_waiting_task_count(#state{waiting_tasks = WaitingTasks}) -> %% logging --include_lib("mg_scheduler/include/pulse.hrl"). - --spec emit_beat(mg_skd_pulse:handler(), mg_skd_pulse:beat()) -> ok. +-spec emit_beat(mpulse:handler(), mpulse:beat()) -> ok. emit_beat(Handler, Beat) -> - ok = mg_skd_pulse:handle_beat(Handler, Beat). + ok = mpulse:handle_beat(Handler, Beat). -spec emit_new_tasks_beat(non_neg_integer(), state()) -> ok. emit_new_tasks_beat(NewTasksCount, #state{pulse = Pulse, id = {Name, NS}}) -> diff --git a/apps/mg_scheduler/src/mg_skd_pulse.erl b/apps/mg_scheduler/src/mg_skd_pulse.erl deleted file mode 100644 index c932a515..00000000 --- a/apps/mg_scheduler/src/mg_skd_pulse.erl +++ /dev/null @@ -1,25 +0,0 @@ --module(mg_skd_pulse). - --include_lib("mg_scheduler/include/pulse.hrl"). - -%% API --export_type([beat/0]). --export_type([handler/0]). - --callback handle_beat(handler(), beat() | any()) -> ok. - -%% -%% API -%% --type beat() :: - % Scheduler handling - #mg_skd_task_add_error{} - | #mg_skd_search_success{} - | #mg_skd_search_error{} - | #mg_skd_task_error{} - | #mg_skd_new_tasks{} - | #mg_skd_task_started{} - | #mg_skd_task_finished{} - | #mg_skd_quota_reserved{}. - --type handler() :: mg_skd_utils:mod_opts() | undefined. diff --git a/apps/mg_scheduler/src/mg_skd_quota_manager.erl b/apps/mg_scheduler/src/mg_skd_quota_manager.erl index f995d9bb..857f141d 100644 --- a/apps/mg_scheduler/src/mg_skd_quota_manager.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_manager.erl @@ -41,7 +41,7 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_one}, diff --git a/apps/mg_scheduler/src/mg_skd_quota_worker.erl b/apps/mg_scheduler/src/mg_skd_quota_worker.erl index 6cfa0ea8..898be921 100644 --- a/apps/mg_scheduler/src/mg_skd_quota_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_worker.erl @@ -86,7 +86,7 @@ child_spec(Options, ChildID) -> shutdown => 5000 }. --spec start_link(options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(#{name := Name} = Options) -> gen_server:start_link(self_reg_name(Name), ?MODULE, Options, []). @@ -99,7 +99,7 @@ reserve(ClientOptions, Usage, Expectation, Name) -> %% gen_server callbacks --spec init(options()) -> mg_skd_utils:gen_server_init_ret(state()). +-spec init(options()) -> mg_utils:gen_server_init_ret(state()). init(Options) -> #{limit := Limit} = Options, Interval = maps:get(update_interval, Options, ?DEFAULT_UPDATE_INTERVAL), @@ -112,8 +112,8 @@ init(Options) -> timer = erlang:send_after(Interval, self(), ?UPDATE_MESSAGE) }}. --spec handle_call(Call :: any(), mg_skd_utils:gen_server_from(), state()) -> - mg_skd_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). handle_call({reserve, ClientOptions, Usage, Expectation}, {Pid, _Tag}, State0) -> State1 = ensure_is_registered(ClientOptions, Pid, State0), {ok, NewReserved, NewQuota} = mg_skd_quota:reserve( @@ -127,12 +127,12 @@ handle_call(Call, From, State) -> ok = logger:error("unexpected gen_server call received: ~p from ~p", [Call, From]), {noreply, State}. --spec handle_cast(Cast :: any(), state()) -> mg_skd_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(Cast :: any(), state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast(Cast, State) -> ok = logger:error("unexpected gen_server cast received: ~p", [Cast]), {noreply, State}. --spec handle_info(Info :: any(), state()) -> mg_skd_utils:gen_server_handle_info_ret(state()). +-spec handle_info(Info :: any(), state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info(?UPDATE_MESSAGE, State) -> {ok, NewQuota} = mg_skd_quota:recalculate_targets(State#state.quota), {noreply, restart_timer(?UPDATE_MESSAGE, State#state{quota = NewQuota})}; @@ -143,7 +143,7 @@ handle_info(Info, State) -> {noreply, State}. -spec code_change(OldVsn :: any(), state(), Extra :: any()) -> - mg_skd_utils:gen_server_code_change_ret(state()). + mg_utils:gen_server_code_change_ret(state()). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -187,11 +187,11 @@ forget_about_client(Monitor, State) -> % Worker registration --spec self_ref(name()) -> mg_skd_utils:gen_ref(). +-spec self_ref(name()) -> mg_utils:gen_ref(). self_ref(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. --spec self_reg_name(name()) -> mg_skd_utils:gen_reg_name(). +-spec self_reg_name(name()) -> mg_utils:gen_reg_name(). self_reg_name(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. diff --git a/apps/mg_scheduler/src/mg_skd_scanner.erl b/apps/mg_scheduler/src/mg_skd_scanner.erl index 4a06eac0..b31e2c9d 100644 --- a/apps/mg_scheduler/src/mg_skd_scanner.erl +++ b/apps/mg_scheduler/src/mg_skd_scanner.erl @@ -40,7 +40,7 @@ scan_ahead => scan_ahead(), retry_scan_delay => scan_delay(), squad_opts => gen_squad:opts(), - pulse => mg_skd_pulse:handler() + pulse => mpulse:handler() }. -export_type([options/0]). @@ -58,7 +58,7 @@ -type queue_state() :: any(). -type queue_options() :: any(). --type queue_handler() :: mg_skd_utils:mod_opts(queue_options()). +-type queue_handler() :: mg_utils:mod_opts(queue_options()). -callback child_spec(queue_options(), atom()) -> supervisor:child_spec() | undefined. -callback init(queue_options()) -> {ok, queue_state()}. @@ -100,7 +100,7 @@ -spec child_spec(scheduler_id(), options(), _ChildID) -> supervisor:child_spec(). child_spec(SchedulerID, Options, ChildID) -> Flags = #{strategy => rest_for_one}, - ChildSpecs = mg_skd_utils:lists_compact([ + ChildSpecs = mg_utils:lists_compact([ handler_child_spec(Options, {ChildID, handler}), #{ id => {ChildID, scanner}, @@ -117,11 +117,11 @@ child_spec(SchedulerID, Options, ChildID) -> -spec handler_child_spec(options(), _ChildID) -> supervisor:child_spec() | undefined. handler_child_spec(#{queue_handler := Handler}, ChildID) -> - mg_skd_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). + mg_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). %% --spec start_link(scheduler_id(), options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> SquadOpts = maps:merge( maps:get(squad_opts, Options, #{}), @@ -139,7 +139,7 @@ start_link(SchedulerID, Options) -> -spec where_is(scheduler_id()) -> pid() | undefined. where_is(SchedulerID) -> - mg_skd_utils:gen_ref_to_pid(self_ref(SchedulerID)). + mg_utils:gen_ref_to_pid(self_ref(SchedulerID)). %% @@ -152,7 +152,7 @@ where_is(SchedulerID) -> scan_ahead :: scan_ahead(), retry_delay :: scan_delay(), timer :: reference() | undefined, - pulse :: mg_skd_pulse:handler() | undefined + pulse :: mpulse:handler() | undefined }). -type st() :: #st{}. @@ -196,7 +196,7 @@ handle_cast(Cast, Rank, _Squad, St) -> ), {noreply, St}. --spec handle_call(_Call, mg_skd_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. +-spec handle_call(_Call, mg_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. handle_call(Call, From, Rank, _Squad, St) -> ok = logger:error( "unexpected gen_squad call received: ~p, from ~p, rank ~p, state ~p", @@ -259,7 +259,7 @@ disseminate_tasks(Tasks, [_Scheduler = #{pid := Pid}], _Capacities, _St) -> mg_skd:distribute_tasks(Pid, Tasks); disseminate_tasks(Tasks, Schedulers, Capacities, _St) -> %% Partition tasks among known schedulers proportionally to their capacities - Partitions = mg_skd_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), + Partitions = mg_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), %% Distribute shares of tasks among schedulers, sending directly to pids maps:fold( fun(_Scheduler = #{pid := Pid}, TasksShare, _) -> @@ -324,13 +324,13 @@ cancel_timer(St) -> -spec init_handler(queue_handler()) -> queue_handler_state(). init_handler(Handler) -> - {ok, InitialState} = mg_skd_utils:apply_mod_opts(Handler, init), + {ok, InitialState} = mg_utils:apply_mod_opts(Handler, init), {Handler, InitialState}. -spec run_handler(queue_handler_state(), _Function :: atom(), _Args :: list()) -> {_Result, queue_handler_state()}. run_handler({Handler, State}, Function, Args) -> - {Result, NextState} = mg_skd_utils:apply_mod_opts(Handler, Function, Args ++ [State]), + {Result, NextState} = mg_utils:apply_mod_opts(Handler, Function, Args ++ [State]), {Result, {Handler, NextState}}. %% @@ -347,9 +347,9 @@ self_ref(SchedulerID) -> -include_lib("mg_scheduler/include/pulse.hrl"). --spec emit_scan_error_beat(mg_skd_utils:exception(), st()) -> ok. +-spec emit_scan_error_beat(mg_utils:exception(), st()) -> ok. emit_scan_error_beat(Exception, #st{pulse = Pulse, scheduler_id = {Name, NS}}) -> - mg_skd_pulse:handle_beat(Pulse, #mg_skd_search_error{ + mpulse:handle_beat(Pulse, #mg_skd_search_error{ namespace = NS, scheduler_name = Name, exception = Exception @@ -360,7 +360,7 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ pulse = Pulse, scheduler_id = {Name, NS} }) -> - mg_skd_pulse:handle_beat(Pulse, #mg_skd_search_success{ + mpulse:handle_beat(Pulse, #mg_skd_search_success{ namespace = NS, scheduler_name = Name, delay = Delay, @@ -371,8 +371,8 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ %% --spec handle_beat({mg_skd_pulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. +-spec handle_beat({mpulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. handle_beat({Handler, {Name, NS}}, Beat) -> Producer = queue_scanner, Extra = [{scheduler_type, Name}, {namespace, NS}], - mg_skd_pulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). + mpulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). diff --git a/apps/mg_scheduler/src/mg_skd_sup.erl b/apps/mg_scheduler/src/mg_skd_sup.erl index 514e34e7..a34e590e 100644 --- a/apps/mg_scheduler/src/mg_skd_sup.erl +++ b/apps/mg_scheduler/src/mg_skd_sup.erl @@ -31,9 +31,9 @@ retry_scan_delay => mg_skd_scanner:scan_delay(), squad_opts => gen_squad:opts(), % workers - task_handler := mg_skd_utils:mod_opts(), + task_handler := mg_utils:mod_opts(), % common - pulse => mg_skd_pulse:handler() + pulse => mpulse:handler() }. -export_type([options/0]). @@ -52,7 +52,7 @@ child_spec(ID, Options, ChildID) -> type => supervisor }. --spec start_link(id(), options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> ManagerOptions = maps:with( [start_interval, capacity, quota_name, quota_share, pulse], @@ -68,7 +68,7 @@ start_link(SchedulerID, Options) -> ), genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_skd_utils:lists_compact([ + mg_utils:lists_compact([ mg_skd_scanner:child_spec(SchedulerID, ScannerOptions, queue), mg_skd_worker:child_spec(SchedulerID, WorkerOptions, tasks), mg_skd:child_spec(SchedulerID, ManagerOptions, manager) diff --git a/apps/mg_scheduler/src/mg_skd_task.erl b/apps/mg_scheduler/src/mg_skd_task.erl index 6d98dc59..6465d86e 100644 --- a/apps/mg_scheduler/src/mg_skd_task.erl +++ b/apps/mg_scheduler/src/mg_skd_task.erl @@ -24,7 +24,7 @@ -type task(TaskID, TaskPayload) :: #{ id := TaskID, target_time := target_time(), - machine_id := mg_skd_utils:id(), + machine_id := mg_utils:id(), payload => TaskPayload }. diff --git a/apps/mg_scheduler/src/mg_skd_utils.erl b/apps/mg_scheduler/src/mg_skd_utils.erl deleted file mode 100644 index 035f8733..00000000 --- a/apps/mg_scheduler/src/mg_skd_utils.erl +++ /dev/null @@ -1,126 +0,0 @@ --module(mg_skd_utils). - -%% OTP - --export_type([reason/0]). --export_type([gen_timeout/0]). --export_type([gen_start_ret/0]). --export_type([gen_ref/0]). --export_type([gen_reg_name/0]). --export_type([gen_server_from/0]). --export_type([gen_server_init_ret/1]). --export_type([gen_server_handle_call_ret/1]). --export_type([gen_server_handle_cast_ret/1]). --export_type([gen_server_handle_info_ret/1]). --export_type([gen_server_code_change_ret/1]). --export_type([supervisor_ret/0]). - -%% - --export([separate_mod_opts/1]). --export([lists_compact/1]). - -%% - --type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. --type ns() :: binary(). --type id() :: binary(). --type request_context() :: opaque(). - --export_type([opaque/0]). --export_type([ns/0]). --export_type([id/0]). --export_type([request_context/0]). - --type exception() :: {exit | error | throw, term(), list()}. - --export_type([exception/0]). - --type mod_opts() :: mod_opts(term()). --type mod_opts(Options) :: {module(), Options} | module(). - --export_type([mod_opts/0]). --export_type([mod_opts/1]). - -%% OTP - --type reason() :: - normal - | shutdown - | {shutdown, _} - | _. --type gen_timeout() :: - 'hibernate' - | timeout(). - --type gen_start_ret() :: - {ok, pid()} - | ignore - | {error, _}. - --type gen_ref() :: - atom() - | {atom(), node()} - | {global, atom()} - | {via, atom(), term()} - | pid(). --type gen_reg_name() :: - {local, atom()} - | {global, term()} - | {via, module(), term()}. - --type gen_server_from() :: {pid(), _}. - --type gen_server_init_ret(State) :: - ignore - | {ok, State} - | {stop, reason()} - | {ok, State, gen_timeout()}. - --type gen_server_handle_call_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {reply, _Reply, State} - | {stop, reason(), State} - | {reply, _Reply, State, gen_timeout()} - | {stop, reason(), _Reply, State}. - --type gen_server_handle_cast_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {stop, reason(), State}. - --type gen_server_handle_info_ret(State) :: - {noreply, State} - | {noreply, State, gen_timeout()} - | {stop, reason(), State}. - --type gen_server_code_change_ret(State) :: - {ok, State} - | {error, _}. - --type supervisor_ret() :: - ignore - | {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}. - -%% - --spec separate_mod_opts(mod_opts()) -> {module(), _Arg}. -separate_mod_opts(ModOpts) -> - separate_mod_opts(ModOpts, undefined). - --spec separate_mod_opts(mod_opts(Defaults), Defaults) -> {module(), Defaults}. -separate_mod_opts(ModOpts = {_, _}, _) -> - ModOpts; -separate_mod_opts(Mod, Default) -> - {Mod, Default}. - --spec lists_compact(list(T)) -> list(T). -lists_compact(List) -> - lists:filter( - fun - (undefined) -> false; - (_) -> true - end, - List - ). diff --git a/apps/mg_scheduler/src/mg_skd_worker.erl b/apps/mg_scheduler/src/mg_skd_worker.erl index 06a5c402..c4e1aa00 100644 --- a/apps/mg_scheduler/src/mg_skd_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_worker.erl @@ -35,8 +35,8 @@ -type maybe_span() :: opentelemetry:span_ctx() | undefined. -type options() :: #{ - task_handler := mg_skd_utils:mod_opts(), - pulse => mg_skd_pulse:handler() + task_handler := mg_utils:mod_opts(), + pulse => mpulse:handler() }. -type monitor() :: reference(). @@ -54,7 +54,7 @@ child_spec(SchedulerID, Options, ChildID) -> type => supervisor }. --spec start_link(scheduler_id(), options()) -> mg_skd_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> genlib_adhoc_supervisor:start_link( self_reg_name(SchedulerID), @@ -78,7 +78,7 @@ start_task(SchedulerID, Task, SpanCtx) -> Error end. --spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_skd_utils:gen_start_ret(). +-spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_utils:gen_start_ret(). do_start_task(SchedulerID, Options, Task, SpanCtx) -> proc_lib:start_link(?MODULE, execute, [SchedulerID, Options, Task, SpanCtx]). @@ -91,7 +91,7 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> ok = emit_start_beat(Task, SchedulerID, Options), ok = try - ok = mg_skd_utils:apply_mod_opts(Handler, execute_task, [Task]), + ok = mg_utils:apply_mod_opts(Handler, execute_task, [Task]), End = erlang:monotonic_time(), ok = emit_finish_beat(Task, Start, End, SchedulerID, Options) catch @@ -104,11 +104,11 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> % Process registration --spec self_ref(scheduler_id()) -> mg_skd_utils:gen_ref(). +-spec self_ref(scheduler_id()) -> mg_utils:gen_ref(). self_ref(ID) -> mg_skd_procreg:ref(mg_skd_procreg_gproc, wrap_id(ID)). --spec self_reg_name(scheduler_id()) -> mg_skd_utils:gen_reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_utils:gen_reg_name(). self_reg_name(ID) -> mg_skd_procreg:reg_name(mg_skd_procreg_gproc, wrap_id(ID)). @@ -118,9 +118,9 @@ wrap_id(ID) -> %% logging --spec emit_beat(options(), mg_skd_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(Options, Beat) -> - ok = mg_skd_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). + ok = mpulse:handle_beat(maps:get(pulse, Options, undefined), Beat). -spec get_delay(task()) -> timeout(). get_delay(#{target_time := Target}) -> @@ -147,7 +147,7 @@ emit_finish_beat(Task, StartedAt, FinishedAt, {Name, NS}, Options) -> process_duration = FinishedAt - StartedAt }). --spec emit_error_beat(task(), mg_skd_utils:exception(), scheduler_id(), options()) -> ok. +-spec emit_error_beat(task(), mg_utils:exception(), scheduler_id(), options()) -> ok. emit_error_beat(Task, Exception, {Name, NS}, Options) -> emit_beat(Options, #mg_skd_task_error{ namespace = NS, diff --git a/apps/mg_utils/src/mg_utils.app.src b/apps/mg_utils/src/mg_utils.app.src new file mode 100644 index 00000000..2efde35f --- /dev/null +++ b/apps/mg_utils/src/mg_utils.app.src @@ -0,0 +1,15 @@ +{application, mg_utils, [ + {description, "Machinegun utils library"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_utils.erl b/apps/mg_utils/src/mg_utils.erl similarity index 83% rename from apps/mg_core/src/mg_core_utils.erl rename to apps/mg_utils/src/mg_utils.erl index 4fea8c6b..5e5eeb85 100644 --- a/apps/mg_core/src/mg_core_utils.erl +++ b/apps/mg_utils/src/mg_utils.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2017 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ %%% То, чего не хватает в OTP. %%% TODO перенести в genlib %%% --module(mg_core_utils). +-module(mg_utils). %% API %% OTP @@ -75,23 +75,81 @@ -export([take_defined/1]). +%% FIXME Minor crutch for `concatenate_namespaces/2' and scheduler beats +-type ns() :: binary(). +-export_type([ns/0]). + +-type id() :: binary(). +-export_type([id/0]). + +-export_type([opaque/0]). +-type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. + +-export_type([request_context/0]). +-type request_context() :: opaque(). + %% %% API %% OTP %% -%% TODO Refactor or move somewhere else. --type reason() :: mg_skd_utils:reason(). --type gen_timeout() :: mg_skd_utils:gen_timeout(). --type gen_start_ret() :: mg_skd_utils:gen_start_ret(). --type gen_ref() :: mg_skd_utils:gen_ref(). --type gen_reg_name() :: mg_skd_utils:gen_reg_name(). --type gen_server_from() :: mg_skd_utils:gen_server_from(). --type gen_server_init_ret(State) :: mg_skd_utils:gen_server_init_ret(State). --type gen_server_handle_call_ret(State) :: mg_skd_utils:gen_server_handle_call_ret(State). --type gen_server_handle_cast_ret(State) :: mg_skd_utils:gen_server_handle_cast_ret(State). --type gen_server_handle_info_ret(State) :: mg_skd_utils:gen_server_handle_info_ret(State). --type gen_server_code_change_ret(State) :: mg_skd_utils:gen_server_code_change_ret(State). --type supervisor_ret() :: mg_skd_utils:supervisor_ret(). +-type reason() :: + normal + | shutdown + | {shutdown, _} + | _. +-type gen_timeout() :: + 'hibernate' + | timeout(). + +-type gen_start_ret() :: + {ok, pid()} + | ignore + | {error, _}. + +-type gen_ref() :: + atom() + | {atom(), node()} + | {global, atom()} + | {via, atom(), term()} + | pid(). +-type gen_reg_name() :: + {local, atom()} + | {global, term()} + | {via, module(), term()}. + +-type gen_server_from() :: {pid(), _}. + +-type gen_server_init_ret(State) :: + ignore + | {ok, State} + | {stop, reason()} + | {ok, State, gen_timeout()}. + +-type gen_server_handle_call_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {reply, _Reply, State} + | {stop, reason(), State} + | {reply, _Reply, State, gen_timeout()} + | {stop, reason(), _Reply, State}. + +-type gen_server_handle_cast_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {stop, reason(), State}. + +-type gen_server_handle_info_ret(State) :: + {noreply, State} + | {noreply, State, gen_timeout()} + | {stop, reason(), State}. + +-type gen_server_code_change_ret(State) :: + {ok, State} + | {error, _}. + +-type supervisor_ret() :: + ignore + | {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}. -spec gen_reg_name_to_ref(gen_reg_name()) -> gen_ref(). gen_reg_name_to_ref({local, Name}) -> Name; @@ -297,7 +355,7 @@ lists_compact(List) -> List ). --spec concatenate_namespaces(mg_core:ns(), mg_core:ns()) -> mg_core:ns(). +-spec concatenate_namespaces(ns(), ns()) -> ns(). concatenate_namespaces(NamespaceA, NamespaceB) -> <>. diff --git a/apps/mg_utils/src/mpulse.erl b/apps/mg_utils/src/mpulse.erl new file mode 100644 index 00000000..b9497070 --- /dev/null +++ b/apps/mg_utils/src/mpulse.erl @@ -0,0 +1,44 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% +-module(mpulse). + +%% API +-export_type([beat/0]). +-export_type([handler/0]). +-export([handle_beat/2]). + +-callback handle_beat(Options :: any(), beat()) -> ok. + +%% +%% API +%% +-type beat() :: tuple() | atom() | any(). + +-type handler() :: mg_utils:mod_opts() | undefined. + +-spec handle_beat(handler(), any()) -> ok. +handle_beat(undefined, _Beat) -> + ok; +handle_beat(Handler, Beat) -> + {Mod, Options} = mg_utils:separate_mod_opts(Handler), + try + ok = Mod:handle_beat(Options, Beat) + catch + Class:Reason:ST -> + Stacktrace = genlib_format:format_stacktrace(ST), + Msg = "Pulse handler ~p failed at beat ~p: ~p:~p ~s", + ok = logger:error(Msg, [{Mod, Options}, Beat, Class, Reason, Stacktrace]) + end. diff --git a/apps/mg_woody/include/pulse.hrl b/apps/mg_woody/include/pulse.hrl index b4df40cf..344f3971 100644 --- a/apps/mg_woody/include/pulse.hrl +++ b/apps/mg_woody/include/pulse.hrl @@ -3,7 +3,7 @@ machine_id :: mg_core_events_machine:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(woody_event, { diff --git a/apps/mg_woody/src/mg_woody.app.src b/apps/mg_woody/src/mg_woody.app.src index dfce7df8..1745099d 100644 --- a/apps/mg_woody/src/mg_woody.app.src +++ b/apps/mg_woody/src/mg_woody.app.src @@ -24,6 +24,7 @@ genlib, mg_proto, woody, + mg_utils, mg_core, mg_es_machine, opentelemetry_api diff --git a/apps/mg_woody/src/mg_woody_automaton.erl b/apps/mg_woody/src/mg_woody_automaton.erl index 681b51c4..f8b84eb0 100644 --- a/apps/mg_woody/src/mg_woody_automaton.erl +++ b/apps/mg_woody/src/mg_woody_automaton.erl @@ -237,7 +237,7 @@ get_ns_options(Namespace, Options) -> throw({logic, namespace_not_found}) end. --spec pulse(mg_core:ns(), options()) -> mg_core_pulse:handler(). +-spec pulse(mg_core:ns(), options()) -> mpulse:handler(). pulse(Namespace, Options) -> try get_machine_options(Namespace, Options) of #{machines := #{pulse := Pulse}} -> @@ -265,7 +265,7 @@ default_processing_timeout(Namespace, Options) -> simplify_core_machine(Machine = #{status := Status}) -> Machine#{status => simplify_machine_status(Status)}. --spec exception_to_string(mg_core_utils:exception()) -> binary(). +-spec exception_to_string(mg_utils:exception()) -> binary(). exception_to_string(Exception) -> iolist_to_binary(genlib_format:format_exception(Exception)). diff --git a/apps/mg_woody/src/mg_woody_event_handler.erl b/apps/mg_woody/src/mg_woody_event_handler.erl index d31b0f1b..b949211d 100644 --- a/apps/mg_woody/src/mg_woody_event_handler.erl +++ b/apps/mg_woody/src/mg_woody_event_handler.erl @@ -29,9 +29,9 @@ Event :: woody_event_handler:event(), RpcID :: woody:rpc_id(), EventMeta :: woody_event_handler:event_meta(), - PulseHandler :: mg_core_pulse:handler(). + PulseHandler :: mpulse:handler(). handle_event(Event, RpcID, EventMeta, PulseHandler) -> - mg_core_pulse:handle_beat(PulseHandler, #woody_event{ + mpulse:handle_beat(PulseHandler, #woody_event{ event = Event, rpc_id = RpcID, event_meta = EventMeta diff --git a/apps/mg_woody/src/mg_woody_event_sink.erl b/apps/mg_woody/src/mg_woody_event_sink.erl index 71b0970e..2f13e110 100644 --- a/apps/mg_woody/src/mg_woody_event_sink.erl +++ b/apps/mg_woody/src/mg_woody_event_sink.erl @@ -110,6 +110,6 @@ check_event_sink(AvaliableEventSinks, EventSinkID) -> throw({logic, event_sink_not_found}) end. --spec pulse(mg_event_sink_machine:ns_options()) -> mg_core_pulse:handler(). +-spec pulse(mg_event_sink_machine:ns_options()) -> mpulse:handler(). pulse(#{pulse := Pulse}) -> Pulse. diff --git a/apps/mg_woody/src/mg_woody_life_sink.erl b/apps/mg_woody/src/mg_woody_life_sink.erl index 7838e840..6a4bd733 100644 --- a/apps/mg_woody/src/mg_woody_life_sink.erl +++ b/apps/mg_woody/src/mg_woody_life_sink.erl @@ -41,7 +41,7 @@ -type machine_lifecycle_failed_event() :: event(machine_lifecycle_failed, #{ occurred_at := timestamp_ns(), - exception := mg_core_utils:exception() + exception := mg_utils:exception() }). -type machine_lifecycle_repaired_event() :: event(machine_lifecycle_repaired, #{ @@ -105,7 +105,7 @@ serialize_data({machine_lifecycle_repaired, _}) -> serialize_data({machine_lifecycle_removed, _}) -> {machine, {removed, #mg_lifesink_MachineLifecycleRemovedEvent{}}}. --spec exception_to_string(mg_core_utils:exception()) -> binary(). +-spec exception_to_string(mg_utils:exception()) -> binary(). exception_to_string(Exception) -> iolist_to_binary(genlib_format:format_exception(Exception)). diff --git a/apps/mg_woody/src/mg_woody_packer.erl b/apps/mg_woody/src/mg_woody_packer.erl index 66b72544..dc125956 100644 --- a/apps/mg_woody/src/mg_woody_packer.erl +++ b/apps/mg_woody/src/mg_woody_packer.erl @@ -356,7 +356,7 @@ unpack(state_change, MachineStateChange) -> } = MachineStateChange, { unpack(aux_state, AuxState), - unpack({list, event_body}, mg_core_utils:take_defined([EventBodies, []])) + unpack({list, event_body}, mg_utils:take_defined([EventBodies, []])) }; unpack(signal, {timeout, #mg_stateproc_TimeoutSignal{}}) -> timeout; diff --git a/apps/mg_woody/src/mg_woody_pulse_otel.erl b/apps/mg_woody/src/mg_woody_pulse_otel.erl index 7edc1aac..e79915b4 100644 --- a/apps/mg_woody/src/mg_woody_pulse_otel.erl +++ b/apps/mg_woody/src/mg_woody_pulse_otel.erl @@ -3,7 +3,7 @@ -include_lib("mg_woody/include/pulse.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). @@ -13,7 +13,8 @@ -type beat() :: #woody_event{} | #woody_request_handle_error{} - | mg_core_pulse:beat() + | mg_core:beat() + | mg_skd:beat() | mg_skd_scanner:beat(). -export_type([options/0]). diff --git a/apps/mg_woody/src/mg_woody_utils.erl b/apps/mg_woody/src/mg_woody_utils.erl index 86b7427e..0b0ab2cc 100644 --- a/apps/mg_woody/src/mg_woody_utils.erl +++ b/apps/mg_woody/src/mg_woody_utils.erl @@ -38,7 +38,7 @@ deadline => mg_core_deadline:deadline(), request_context := mg_core:request_context() }. --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). %% %% Woody @@ -58,7 +58,7 @@ handle_error(Ctx, F, Pulse) -> machine_id := ID, request_context := ReqCtx } = Ctx, - ok = mg_core_pulse:handle_beat(Pulse, #woody_request_handle_error{ + ok = mpulse:handle_beat(Pulse, #woody_request_handle_error{ namespace = NS, machine_id = ID, request_context = ReqCtx, diff --git a/elvis.config b/elvis.config index b076488b..fb74f980 100644 --- a/elvis.config +++ b/elvis.config @@ -55,7 +55,7 @@ {elvis_style, invalid_dynamic_call, #{ ignore => [ % Working with generic registries. - mg_core_utils + mg_utils ] }}, {elvis_style, no_debug_call, #{ From 31fe5b1220734906af6f17b7c63e77fb09a249c2 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 2 May 2024 12:43:36 +0300 Subject: [PATCH 10/31] Extracts procreg into separate lib and fixes types deps --- .../test/mg_prometheus_metric_SUITE.erl | 2 +- apps/mg_conf/src/mg_conf.erl | 2 +- apps/mg_core/src/mg_core.app.src | 1 + apps/mg_core/src/mg_core_worker.erl | 16 ++++++++-------- apps/mg_core/src/mg_core_workers_manager.erl | 4 ++-- .../test/mg_core_continuation_retry_SUITE.erl | 2 +- .../test/mg_core_events_machine_SUITE.erl | 2 +- .../test/mg_core_events_modernizer_SUITE.erl | 2 +- .../test/mg_core_events_stash_SUITE.erl | 4 ++-- .../test/mg_core_instant_timer_task_SUITE.erl | 4 ++-- .../mg_core_internal_events_logging_SUITE.erl | 2 +- .../mg_core/test/mg_core_interrupted_SUITE.erl | 2 +- apps/mg_core/test/mg_core_machine_SUITE.erl | 4 ++-- .../test/mg_core_machine_full_test_SUITE.erl | 2 +- .../mg_core_machine_notification_SUITE.erl | 2 +- .../mg_core/test/mg_core_timer_retry_SUITE.erl | 2 +- apps/mg_core/test/mg_core_workers_SUITE.erl | 4 ++-- .../test/mg_event_sink_machine_SUITE.erl | 2 +- apps/mg_procreg/src/mg_procreg.app.src | 17 +++++++++++++++++ .../src/mg_procreg.erl} | 4 ++-- .../src/mg_procreg_global.erl} | 16 ++++++++-------- .../src/mg_procreg_gproc.erl} | 18 +++++++++--------- apps/mg_scheduler/src/mg_scheduler.app.src | 1 + apps/mg_scheduler/src/mg_skd.erl | 8 ++++---- apps/mg_scheduler/src/mg_skd_scanner.erl | 8 ++++---- apps/mg_scheduler/src/mg_skd_worker.erl | 4 ++-- .../test/mg_modernizer_tests_SUITE.erl | 2 +- apps/mg_woody/test/mg_stress_SUITE.erl | 2 +- apps/mg_woody/test/mg_woody_tests_SUITE.erl | 6 +++--- config/config.yaml | 5 +++-- rel_scripts/configurator.escript | 11 +++++++---- 31 files changed, 92 insertions(+), 69 deletions(-) create mode 100644 apps/mg_procreg/src/mg_procreg.app.src rename apps/{mg_core/src/mg_core_procreg.erl => mg_procreg/src/mg_procreg.erl} (97%) rename apps/{mg_core/src/mg_core_procreg_global.erl => mg_procreg/src/mg_procreg_global.erl} (86%) rename apps/{mg_core/src/mg_core_procreg_gproc.erl => mg_procreg/src/mg_procreg_gproc.erl} (69%) diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 358bf9cd..09364c19 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -550,7 +550,7 @@ mg_config() -> {namespaces, #{}}, {event_sink_ns, #{ storage => mg_core_storage_memory, - registry => mg_core_procreg_global + registry => mg_procreg_global }}, {pulse, {mg_pulse, #{}}} ]. diff --git a/apps/mg_conf/src/mg_conf.erl b/apps/mg_conf/src/mg_conf.erl index 8af853da..b495dcec 100644 --- a/apps/mg_conf/src/mg_conf.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -209,7 +209,7 @@ event_sink_namespace_options(#{storage := Storage} = EventSinkNS, Pulse) -> worker_manager_options(Config) -> maps:merge( #{ - registry => mg_core_procreg_gproc + registry => mg_procreg_gproc }, maps:get(worker, Config, #{}) ). diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index 21d5be2a..d21f48ab 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -28,6 +28,7 @@ snowflake, mg_scheduler, mg_utils, + mg_procreg, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_core/src/mg_core_worker.erl b/apps/mg_core/src/mg_core_worker.erl index b30fab88..d58feff5 100644 --- a/apps/mg_core/src/mg_core_worker.erl +++ b/apps/mg_core/src/mg_core_worker.erl @@ -49,7 +49,7 @@ -type options() :: #{ worker => mg_utils:mod_opts(), - registry => mg_core_procreg:options(), + registry => mg_procreg:options(), hibernate_timeout => pos_integer(), unload_timeout => pos_integer(), shutdown_timeout => timeout() @@ -80,7 +80,7 @@ child_spec(ChildID, Options) -> -spec start_link(options(), mg_core:ns(), mg_core:id(), req_ctx()) -> mg_utils:gen_start_ret(). start_link(Options, NS, ID, ReqCtx) -> - mg_core_procreg:start_link( + mg_procreg:start_link( procreg_options(Options), ?WRAP_ID(NS, ID), ?MODULE, @@ -104,7 +104,7 @@ call(Options, NS, ID, Call, ReqCtx, Deadline, Pulse) -> request_context = ReqCtx, deadline = Deadline }), - mg_core_procreg:call( + mg_procreg:call( procreg_options(Options), ?WRAP_ID(NS, ID), {call, Deadline, Call, ReqCtx}, @@ -147,11 +147,11 @@ is_alive(Options, NS, ID) -> Pid =/= undefined andalso erlang:is_process_alive(Pid). % TODO nonuniform interface --spec list(mg_core_procreg:options(), mg_core:ns()) -> [{mg_core:ns(), mg_core:id(), pid()}]. +-spec list(mg_procreg:options(), mg_core:ns()) -> [{mg_core:ns(), mg_core:id(), pid()}]. list(Procreg, NS) -> [ {NS, ID, Pid} - || {?WRAP_ID(_, ID), Pid} <- mg_core_procreg:select(Procreg, ?WRAP_ID(NS, '$1')) + || {?WRAP_ID(_, ID), Pid} <- mg_procreg:select(Procreg, ?WRAP_ID(NS, '$1')) ]. %% @@ -300,10 +300,10 @@ schedule_unload_timer(State = #{unload_tref := UnloadTRef}) -> start_timer(State) -> erlang:start_timer(unload_timeout(State), erlang:self(), unload). --spec self_ref(options(), mg_core:ns(), mg_core:id()) -> mg_core_procreg:ref(). +-spec self_ref(options(), mg_core:ns(), mg_core:id()) -> mg_procreg:ref(). self_ref(Options, NS, ID) -> - mg_core_procreg:ref(procreg_options(Options), ?WRAP_ID(NS, ID)). + mg_procreg:ref(procreg_options(Options), ?WRAP_ID(NS, ID)). --spec procreg_options(options()) -> mg_core_procreg:options(). +-spec procreg_options(options()) -> mg_procreg:options(). procreg_options(#{registry := ProcregOptions}) -> ProcregOptions. diff --git a/apps/mg_core/src/mg_core_workers_manager.erl b/apps/mg_core/src/mg_core_workers_manager.erl index fd25dc68..9953a171 100644 --- a/apps/mg_core/src/mg_core_workers_manager.erl +++ b/apps/mg_core/src/mg_core_workers_manager.erl @@ -47,7 +47,7 @@ -type options() :: #{ name := name(), pulse := mpulse:handler(), - registry := mg_core_procreg:options(), + registry := mg_procreg:options(), message_queue_len_limit => queue_limit(), % all but `registry` worker_options := mg_core_worker:options(), @@ -56,7 +56,7 @@ -type queue_limit() :: non_neg_integer(). -type ns_options() :: #{ - registry => mg_core_procreg:options(), + registry => mg_procreg:options(), message_queue_len_limit => queue_limit(), % all but `registry` worker_options => mg_core_worker:options(), diff --git a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl index a1e59d43..76042066 100644 --- a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl @@ -132,7 +132,7 @@ automaton_options() -> namespace => ?MH_NS, processor => ?MODULE, storage => mg_core_storage_memory, - worker => #{registry => mg_core_procreg_global}, + worker => #{registry => mg_procreg_global}, pulse => ?MODULE, notification => #{ namespace => ?MH_NS, diff --git a/apps/mg_core/test/mg_core_events_machine_SUITE.erl b/apps/mg_core/test/mg_core_events_machine_SUITE.erl index 06fa9669..221dc45a 100644 --- a/apps/mg_core/test/mg_core_events_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_machine_SUITE.erl @@ -439,7 +439,7 @@ events_machine_options(Base, StorageOptions, ProcessorOptions, NS) -> namespace => NS, storage => mg_cth:build_storage(NS, Storage), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl index ff70cee6..55c38a1d 100644 --- a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl @@ -195,7 +195,7 @@ events_machine_options(ProcessorOptions, NS) -> namespace => NS, storage => mg_cth:build_storage(NS, Storage), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_events_stash_SUITE.erl b/apps/mg_core/test/mg_core_events_stash_SUITE.erl index 06435c04..5e929270 100644 --- a/apps/mg_core/test/mg_core_events_stash_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_stash_SUITE.erl @@ -231,7 +231,7 @@ stop_automaton(Pid) -> -spec events_machine_options(options()) -> mg_core_events_machine:options(). events_machine_options(Options) -> NS = maps:get(namespace, Options), - Scheduler = #{registry => mg_core_procreg_global, interval => 100}, + Scheduler = #{registry => mg_procreg_global, interval => 100}, #{ namespace => NS, processor => maps:get(processor, Options), @@ -242,7 +242,7 @@ events_machine_options(Options) -> existing_storage_name => ?MODULE }}, worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl index 2b8786c8..36ae60d5 100644 --- a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl +++ b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl @@ -204,7 +204,7 @@ automaton_options(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -226,7 +226,7 @@ automaton_options_wo_shedulers(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl index ddbd98b5..4e4e852a 100644 --- a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl +++ b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl @@ -127,7 +127,7 @@ automaton_options(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_interrupted_SUITE.erl b/apps/mg_core/test/mg_core_interrupted_SUITE.erl index 3db76725..1c15c5f8 100644 --- a/apps/mg_core/test/mg_core_interrupted_SUITE.erl +++ b/apps/mg_core/test/mg_core_interrupted_SUITE.erl @@ -169,7 +169,7 @@ automaton_options(NS, StorageName) -> }} ), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_machine_SUITE.erl b/apps/mg_core/test/mg_core_machine_SUITE.erl index 871a10b2..10c76964 100644 --- a/apps/mg_core/test/mg_core_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_SUITE.erl @@ -78,9 +78,9 @@ end_per_suite(C) -> -spec init_per_group(group_name(), config()) -> config(). init_per_group(with_gproc, C) -> - [{registry, mg_core_procreg_gproc} | C]; + [{registry, mg_procreg_gproc} | C]; init_per_group(with_global, C) -> - [{registry, mg_core_procreg_global} | C]; + [{registry, mg_procreg_global} | C]; init_per_group(base, C) -> C. diff --git a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl index cd89286a..2ed26d6a 100644 --- a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl @@ -310,7 +310,7 @@ automaton_options() -> storage => mg_core_storage_memory, worker => #{ %% Use 'global' process registry - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl index f07efe32..999d350d 100644 --- a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl @@ -296,7 +296,7 @@ automaton_options(_C) -> processor => ?MODULE, storage => mg_core_storage_memory, worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => notification_options(), notification_processing_timeout => 500, diff --git a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl index 78267091..799f9a35 100644 --- a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl @@ -201,7 +201,7 @@ automaton_options(NS, RetryPolicy) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_workers_SUITE.erl b/apps/mg_core/test/mg_core_workers_SUITE.erl index 1f68e605..44dfa142 100644 --- a/apps/mg_core/test/mg_core_workers_SUITE.erl +++ b/apps/mg_core/test/mg_core_workers_SUITE.erl @@ -110,7 +110,7 @@ end_per_suite(C) -> -spec init_per_group(group_name(), config()) -> config(). init_per_group(with_global, C) -> [ - {registry, mg_core_procreg_global}, + {registry, mg_procreg_global}, {load_pressure, 100}, {runner_retry_strategy, #{ noproc => genlib_retry:linear(3, 100), @@ -120,7 +120,7 @@ init_per_group(with_global, C) -> ]; init_per_group(with_gproc, C) -> [ - {registry, mg_core_procreg_gproc}, + {registry, mg_procreg_gproc}, {load_pressure, 100}, {runner_retry_strategy, #{ noproc => genlib_retry:linear(3, 100), diff --git a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl index e462a0bc..71a1c71c 100644 --- a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl +++ b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl @@ -149,7 +149,7 @@ event_sink_ns_options() -> namespace => ?ES_ID, storage => mg_core_storage_memory, worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, pulse => ?MODULE, default_processing_timeout => 1000, diff --git a/apps/mg_procreg/src/mg_procreg.app.src b/apps/mg_procreg/src/mg_procreg.app.src new file mode 100644 index 00000000..d7480419 --- /dev/null +++ b/apps/mg_procreg/src/mg_procreg.app.src @@ -0,0 +1,17 @@ +{application, mg_procreg, [ + {description, "Machinegun process registry lib"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + gproc, + mg_utils + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_procreg.erl b/apps/mg_procreg/src/mg_procreg.erl similarity index 97% rename from apps/mg_core/src/mg_core_procreg.erl rename to apps/mg_procreg/src/mg_procreg.erl index 007ab90d..8d2827ba 100644 --- a/apps/mg_core/src/mg_core_procreg.erl +++ b/apps/mg_procreg/src/mg_procreg.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_procreg). +-module(mg_procreg). % Any term sans ephemeral ones, like `reference()`s / `pid()`s / `fun()`s. -type name() :: term(). diff --git a/apps/mg_core/src/mg_core_procreg_global.erl b/apps/mg_procreg/src/mg_procreg_global.erl similarity index 86% rename from apps/mg_core/src/mg_core_procreg_global.erl rename to apps/mg_procreg/src/mg_procreg_global.erl index 6e9e6d97..c7a8e200 100644 --- a/apps/mg_core/src/mg_core_procreg_global.erl +++ b/apps/mg_procreg/src/mg_procreg_global.erl @@ -1,8 +1,8 @@ --module(mg_core_procreg_global). +-module(mg_procreg_global). %% --behaviour(mg_core_procreg). +-behaviour(mg_procreg). -export([ref/2]). -export([reg_name/2]). @@ -15,15 +15,15 @@ %% --spec ref(options(), mg_core_procreg:name()) -> mg_core_procreg:ref(). +-spec ref(options(), mg_procreg:name()) -> mg_procreg:ref(). ref(_Options, Name) -> {global, Name}. --spec reg_name(options(), mg_core_procreg:name()) -> mg_core_procreg:reg_name(). +-spec reg_name(options(), mg_procreg:name()) -> mg_procreg:reg_name(). reg_name(Options, Name) -> ref(Options, Name). --spec select(options(), mg_core_procreg:name_pattern()) -> [{mg_core_procreg:name(), pid()}]. +-spec select(options(), mg_procreg:name_pattern()) -> [{mg_procreg:name(), pid()}]. select(_Options, NamePattern) -> lists:foldl( fun(Name, Acc) -> @@ -36,12 +36,12 @@ select(_Options, NamePattern) -> global:registered_names() ). --spec start_link(options(), mg_core_procreg:reg_name(), module(), _Args, list()) -> - mg_core_procreg:start_link_ret(). +-spec start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> + mg_procreg:start_link_ret(). start_link(_Options, RegName, Module, Args, Opts) -> gen_server:start_link(RegName, Module, Args, Opts). --spec call(options(), mg_core_procreg:ref(), _Call, timeout()) -> _Reply. +-spec call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. call(_Options, Ref, Call, Timeout) -> gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_core/src/mg_core_procreg_gproc.erl b/apps/mg_procreg/src/mg_procreg_gproc.erl similarity index 69% rename from apps/mg_core/src/mg_core_procreg_gproc.erl rename to apps/mg_procreg/src/mg_procreg_gproc.erl index a9a9e2e9..074f0dbe 100644 --- a/apps/mg_core/src/mg_core_procreg_gproc.erl +++ b/apps/mg_procreg/src/mg_procreg_gproc.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,11 +14,11 @@ %%% limitations under the License. %%% --module(mg_core_procreg_gproc). +-module(mg_procreg_gproc). %% --behaviour(mg_core_procreg). +-behaviour(mg_procreg). -export([ref/2]). -export([reg_name/2]). @@ -31,24 +31,24 @@ %% --spec ref(options(), mg_core_procreg:name()) -> mg_core_procreg:ref(). +-spec ref(options(), mg_procreg:name()) -> mg_procreg:ref(). ref(_Options, Name) -> {via, gproc, {n, l, Name}}. --spec reg_name(options(), mg_core_procreg:name()) -> mg_core_procreg:reg_name(). +-spec reg_name(options(), mg_procreg:name()) -> mg_procreg:reg_name(). reg_name(Options, Name) -> ref(Options, Name). --spec select(options(), mg_core_procreg:name_pattern()) -> [{mg_core_procreg:name(), pid()}]. +-spec select(options(), mg_procreg:name_pattern()) -> [{mg_procreg:name(), pid()}]. select(_Options, NamePattern) -> MatchSpec = [{{{n, l, NamePattern}, '_', '_'}, [], ['$$']}], [{Name, Pid} || [{n, l, Name}, Pid, _] <- gproc:select(MatchSpec)]. --spec start_link(options(), mg_core_procreg:reg_name(), module(), _Args, list()) -> - mg_core_procreg:start_link_ret(). +-spec start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> + mg_procreg:start_link_ret(). start_link(_Options, RegName, Module, Args, Opts) -> gen_server:start_link(RegName, Module, Args, Opts). --spec call(options(), mg_core_procreg:ref(), _Call, timeout()) -> _Reply. +-spec call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. call(_Options, Ref, Call, Timeout) -> gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_scheduler/src/mg_scheduler.app.src b/apps/mg_scheduler/src/mg_scheduler.app.src index 133d0547..6f640c39 100644 --- a/apps/mg_scheduler/src/mg_scheduler.app.src +++ b/apps/mg_scheduler/src/mg_scheduler.app.src @@ -9,6 +9,7 @@ gproc, gen_squad, mg_utils, + mg_procreg, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_scheduler/src/mg_skd.erl b/apps/mg_scheduler/src/mg_skd.erl index 6ddbc649..80711196 100644 --- a/apps/mg_scheduler/src/mg_skd.erl +++ b/apps/mg_scheduler/src/mg_skd.erl @@ -195,14 +195,14 @@ handle_info(Info, State) -> % Process registration --spec self_reg_name(id()) -> mg_skd_procreg:reg_name(). +-spec self_reg_name(id()) -> mg_procreg:reg_name(). self_reg_name(ID) -> %% TODO Decouple `mg_core'/`mg_skd' with `mg_procreg'. - mg_skd_procreg:reg_name(mg_skd_procreg_gproc, {?MODULE, ID}). + mg_procreg:reg_name(mg_procreg_gproc, {?MODULE, ID}). --spec self_ref(id()) -> mg_skd_procreg:ref(). +-spec self_ref(id()) -> mg_procreg:ref(). self_ref(ID) -> - mg_skd_procreg:ref(mg_skd_procreg_gproc, {?MODULE, ID}). + mg_procreg:ref(mg_procreg_gproc, {?MODULE, ID}). % Helpers diff --git a/apps/mg_scheduler/src/mg_skd_scanner.erl b/apps/mg_scheduler/src/mg_skd_scanner.erl index b31e2c9d..1276eacb 100644 --- a/apps/mg_scheduler/src/mg_skd_scanner.erl +++ b/apps/mg_scheduler/src/mg_skd_scanner.erl @@ -335,13 +335,13 @@ run_handler({Handler, State}, Function, Args) -> %% --spec self_reg_name(scheduler_id()) -> mg_skd_procreg:reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_procreg:reg_name(). self_reg_name(SchedulerID) -> - mg_skd_procreg:reg_name(mg_skd_procreg_gproc, {?MODULE, SchedulerID}). + mg_procreg:reg_name(mg_procreg_gproc, {?MODULE, SchedulerID}). --spec self_ref(scheduler_id()) -> mg_skd_procreg:ref(). +-spec self_ref(scheduler_id()) -> mg_procreg:ref(). self_ref(SchedulerID) -> - mg_skd_procreg:ref(mg_skd_procreg_gproc, {?MODULE, SchedulerID}). + mg_procreg:ref(mg_procreg_gproc, {?MODULE, SchedulerID}). %% diff --git a/apps/mg_scheduler/src/mg_skd_worker.erl b/apps/mg_scheduler/src/mg_skd_worker.erl index c4e1aa00..73ef1644 100644 --- a/apps/mg_scheduler/src/mg_skd_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_worker.erl @@ -106,11 +106,11 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> -spec self_ref(scheduler_id()) -> mg_utils:gen_ref(). self_ref(ID) -> - mg_skd_procreg:ref(mg_skd_procreg_gproc, wrap_id(ID)). + mg_procreg:ref(mg_procreg_gproc, wrap_id(ID)). -spec self_reg_name(scheduler_id()) -> mg_utils:gen_reg_name(). self_reg_name(ID) -> - mg_skd_procreg:reg_name(mg_skd_procreg_gproc, wrap_id(ID)). + mg_procreg:reg_name(mg_procreg_gproc, wrap_id(ID)). -spec wrap_id(scheduler_id()) -> term(). wrap_id(ID) -> diff --git a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl index 3662a022..0caed6a3 100644 --- a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl @@ -178,7 +178,7 @@ mg_woody_config(Name, C) -> retries => #{}, event_stash_size => 0, worker => #{ - registry => mg_core_procreg_global, + registry => mg_procreg_global, sidecar => mg_cth_worker } }, diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index 7212ca8b..34206798 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -127,7 +127,7 @@ mg_woody_config(_C) -> ], event_stash_size => 10, worker => #{ - registry => mg_core_procreg_global, + registry => mg_procreg_global, sidecar => mg_cth_worker } } diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index 3a67094a..e64ba68e 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -364,7 +364,7 @@ mg_woody_config(C) -> }} ], worker => #{ - registry => mg_core_procreg_global, + registry => mg_procreg_global, sidecar => mg_cth_worker } } @@ -738,7 +738,7 @@ config_with_multiple_event_sinks(_C) -> {mg_event_sink_machine, #{name => default, machine_id => <<"SingleES">>}} ], worker => #{ - registry => mg_core_procreg_global, + registry => mg_procreg_global, sidecar => mg_cth_worker } }, @@ -766,7 +766,7 @@ config_with_multiple_event_sinks(_C) -> }} ], worker => #{ - registry => mg_core_procreg_global, + registry => mg_procreg_global, sidecar => mg_cth_worker } } diff --git a/config/config.yaml b/config/config.yaml index 9d9d1e56..882ab981 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -136,9 +136,10 @@ cluster: # optional, default value 5000 ms reconnect_timeout: 5000 -# if undefined then 'mg_core_procreg_gproc' will be used +# TODO Use aliases, not actual module names. +# if undefined then 'mg_procreg_gproc' will be used process_registry: - module: mg_core_procreg_global + module: mg_procreg_global limits: process_heap: 2M # heap limit diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index d6903875..ad6bd622 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -270,8 +270,8 @@ health_check(YamlConfig) -> health_check_fun(_YamlConfig) -> %% TODO Review necessity of that configuration handle - %% case ?C:conf([process_registry, module], YamlConfig, <<"mg_core_procreg_global">>) of - %% <<"mg_core_procreg_global">> -> global + %% case ?C:conf([process_registry, module], YamlConfig, <<"mg_procreg_global">>) of + %% <<"mg_procreg_global">> -> global %% end. global. @@ -565,11 +565,14 @@ event_sink(kafka, Name, ESYamlConfig) -> }}. procreg(YamlConfig) -> - % Use process_registry if it's set up or gproc otherwise + %% Use process_registry if it's set up or gproc otherwise + %% TODO Add support for aliases for procreg modules. It's + %% improper to expose internal module name in yaml + %% configuration file. conf_with( [process_registry], YamlConfig, - mg_core_procreg_gproc, + mg_procreg_gproc, fun(ProcRegYamlConfig) -> ?C:atom(?C:conf([module], ProcRegYamlConfig)) end ). From 6fc6bf424ef67a93e909ca332e38e6c1cb3e4505 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 2 May 2024 20:03:10 +0300 Subject: [PATCH 11/31] Refactors procreg to simplify impl modules --- apps/mg_procreg/src/mg_procreg.erl | 48 ++++++++++++++++++++--- apps/mg_procreg/src/mg_procreg_global.erl | 12 ------ apps/mg_procreg/src/mg_procreg_gproc.erl | 12 ------ apps/mg_scheduler/src/mg_skd.erl | 1 - apps/mg_utils/src/mg_utils.erl | 24 +++++++++++- 5 files changed, 64 insertions(+), 33 deletions(-) diff --git a/apps/mg_procreg/src/mg_procreg.erl b/apps/mg_procreg/src/mg_procreg.erl index 8d2827ba..16d97ba2 100644 --- a/apps/mg_procreg/src/mg_procreg.erl +++ b/apps/mg_procreg/src/mg_procreg.erl @@ -57,6 +57,11 @@ -callback start_link(procreg_options(), reg_name(), module(), _Args, list()) -> start_link_ret(). +-optional_callbacks([ + call/4, + start_link/5 +]). + %% -spec ref(options(), name()) -> ref(). @@ -67,22 +72,53 @@ ref(Options, Name) -> reg_name(Options, Name) -> mg_utils:apply_mod_opts(Options, reg_name, [Name]). +%% TODO Review usage of this function -spec select(options(), name_pattern()) -> [{name(), pid()}]. select(Options, NamePattern) -> mg_utils:apply_mod_opts(Options, select, [NamePattern]). +%% @doc Functions `call/3', `call/4' and `start_link/5' wrap according +%% `gen_server:call/4' and `gen_server:start_link/4' calls to a +%% worker's gen_server implementation. +%% +%% Direct interaction with a process registry implementation is +%% expected to be performed by separate module with an actual process +%% registry "behaviour". +%% In general this behaviour requires `ref/2' +%% (for referencing pid) and `reg_name/2' (for registering new +%% reference for pid) callbacks that return via-tuple: +%% +%% {via, RegMod :: module(), ViaName :: term()} +%% +%% Register the gen_server process with the registry represented +%% by RegMod. The RegMod callback is to export the functions +%% register_name/2, unregister_name/1, whereis_name/1, and send/2, +%% which are to behave like the corresponding functions in +%% global. Thus, {via,global,GlobalName} is a valid reference +%% equivalent to {global,GlobalName}. +%% +%% @end -spec call(options(), name(), _Call) -> _Reply. call(Options, Name, Call) -> call(Options, Name, Call, 5000). -spec call(options(), name(), _Call, timeout()) -> _Reply. call(Options, Name, Call, Timeout) -> - mg_utils:apply_mod_opts(Options, call, [ref(Options, Name), Call, Timeout]). + CallArgs = [ref(Options, Name), Call, Timeout], + mg_utils:apply_mod_opts_with_fallback(Options, call, fun gen_call/4, CallArgs). -spec start_link(options(), name(), module(), _Args, list()) -> start_link_ret(). start_link(Options, Name, Module, Args, Opts) -> - mg_utils:apply_mod_opts( - Options, - start_link, - [reg_name(Options, Name), Module, Args, Opts] - ). + StartArgs = [reg_name(Options, Name), Module, Args, Opts], + mg_utils:apply_mod_opts_with_fallback(Options, start_link, fun gen_start_link/5, StartArgs). + +%% Internal + +-spec gen_start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> + mg_procreg:start_link_ret(). +gen_start_link(_Options, RegName, Module, Args, Opts) -> + gen_server:start_link(RegName, Module, Args, Opts). + +-spec gen_call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. +gen_call(_Options, Ref, Call, Timeout) -> + gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_procreg/src/mg_procreg_global.erl b/apps/mg_procreg/src/mg_procreg_global.erl index c7a8e200..d12f4fa3 100644 --- a/apps/mg_procreg/src/mg_procreg_global.erl +++ b/apps/mg_procreg/src/mg_procreg_global.erl @@ -8,9 +8,6 @@ -export([reg_name/2]). -export([select/2]). --export([start_link/5]). --export([call/4]). - -type options() :: undefined. %% @@ -36,15 +33,6 @@ select(_Options, NamePattern) -> global:registered_names() ). --spec start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> - mg_procreg:start_link_ret(). -start_link(_Options, RegName, Module, Args, Opts) -> - gen_server:start_link(RegName, Module, Args, Opts). - --spec call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. -call(_Options, Ref, Call, Timeout) -> - gen_server:call(Ref, Call, Timeout). - %% Internal functions -spec match(term(), term()) -> boolean(). diff --git a/apps/mg_procreg/src/mg_procreg_gproc.erl b/apps/mg_procreg/src/mg_procreg_gproc.erl index 074f0dbe..027b84f5 100644 --- a/apps/mg_procreg/src/mg_procreg_gproc.erl +++ b/apps/mg_procreg/src/mg_procreg_gproc.erl @@ -24,9 +24,6 @@ -export([reg_name/2]). -export([select/2]). --export([start_link/5]). --export([call/4]). - -type options() :: undefined. %% @@ -43,12 +40,3 @@ reg_name(Options, Name) -> select(_Options, NamePattern) -> MatchSpec = [{{{n, l, NamePattern}, '_', '_'}, [], ['$$']}], [{Name, Pid} || [{n, l, Name}, Pid, _] <- gproc:select(MatchSpec)]. - --spec start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> - mg_procreg:start_link_ret(). -start_link(_Options, RegName, Module, Args, Opts) -> - gen_server:start_link(RegName, Module, Args, Opts). - --spec call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. -call(_Options, Ref, Call, Timeout) -> - gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_scheduler/src/mg_skd.erl b/apps/mg_scheduler/src/mg_skd.erl index 80711196..7c3ea07f 100644 --- a/apps/mg_scheduler/src/mg_skd.erl +++ b/apps/mg_scheduler/src/mg_skd.erl @@ -197,7 +197,6 @@ handle_info(Info, State) -> -spec self_reg_name(id()) -> mg_procreg:reg_name(). self_reg_name(ID) -> - %% TODO Decouple `mg_core'/`mg_skd' with `mg_procreg'. mg_procreg:reg_name(mg_procreg_gproc, {?MODULE, ID}). -spec self_ref(id()) -> mg_procreg:ref(). diff --git a/apps/mg_utils/src/mg_utils.erl b/apps/mg_utils/src/mg_utils.erl index 5e5eeb85..c8d92856 100644 --- a/apps/mg_utils/src/mg_utils.erl +++ b/apps/mg_utils/src/mg_utils.erl @@ -54,6 +54,7 @@ -export([apply_mod_opts/3]). -export([apply_mod_opts_if_defined/3]). -export([apply_mod_opts_if_defined/4]). +-export([apply_mod_opts_with_fallback/4]). -export([separate_mod_opts/1]). -export([separate_mod_opts/2]). @@ -238,14 +239,33 @@ apply_mod_opts_if_defined(ModOpts, Function, Default) -> -spec apply_mod_opts_if_defined(mod_opts(), atom(), _Default, list(_Arg)) -> _Result. apply_mod_opts_if_defined(ModOpts, Function, Default, Args) -> + case prepare_applicable_mod_opts(ModOpts, Function, Args) of + {ok, {Mod, Function, FunctionArgs}} -> + erlang:apply(Mod, Function, FunctionArgs); + {error, {undefined, _FunctionArgs}} -> + Default + end. + +-spec apply_mod_opts_with_fallback(mod_opts(), atom(), Fallback :: fun(), list(_Arg)) -> _Result. +apply_mod_opts_with_fallback(ModOpts, Function, Fallback, Args) -> + case prepare_applicable_mod_opts(ModOpts, Function, Args) of + {ok, {Mod, Function, FunctionArgs}} -> + erlang:apply(Mod, Function, FunctionArgs); + {error, {undefined, FunctionArgs}} -> + erlang:apply(Fallback, FunctionArgs) + end. + +-spec prepare_applicable_mod_opts(mod_opts(), atom(), list(_Arg)) -> + {ok, MFArgs :: {module(), atom(), list(_Arg)}} | {error, {undefined, list(_Arg)}}. +prepare_applicable_mod_opts(ModOpts, Function, Args) -> {Mod, Arg} = separate_mod_opts(ModOpts), FunctionArgs = [Arg | Args], ok = maybe_load_module(Mod), case erlang:function_exported(Mod, Function, length(FunctionArgs)) of true -> - erlang:apply(Mod, Function, FunctionArgs); + {ok, {Mod, Function, FunctionArgs}}; false -> - Default + {error, {undefined, FunctionArgs}} end. -spec maybe_load_module(module()) -> ok. From 4c40efecd7233a1ef90c66af47ed88ed4c12d823 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 21 May 2024 13:25:04 +0300 Subject: [PATCH 12/31] Removes eventsink machine and cleans up deps --- Makefile | 5 - apps/machinegun/src/machinegun.app.src | 1 - .../test/mg_prometheus_metric_SUITE.erl | 4 - apps/machinegun/test/mg_tests_SUITE.erl | 8 - apps/mg_conf/src/mg_conf.app.src | 1 - apps/mg_conf/src/mg_conf.erl | 78 +---- apps/mg_cth/src/mg_cth.app.src | 1 - ...g_cth_configurator.erl => mg_cth_conf.erl} | 3 +- apps/mg_cth/src/mg_cth_processor.erl | 2 +- apps/mg_es_machine/rebar.config | 3 - apps/mg_es_machine/src/mg_es_machine.app.src | 16 - .../src/mg_event_sink_machine.erl | 299 ------------------ .../test/mg_event_sink_machine_SUITE.erl | 169 ---------- apps/mg_woody/src/mg_woody.app.src | 1 - apps/mg_woody/src/mg_woody.erl | 7 +- apps/mg_woody/src/mg_woody_event_sink.erl | 66 +--- apps/mg_woody/src/mg_woody_packer.erl | 25 -- apps/mg_woody/src/mg_woody_utils.erl | 1 - apps/mg_woody/test/mg_event_sink_client.erl | 51 --- .../test/mg_modernizer_tests_SUITE.erl | 4 - apps/mg_woody/test/mg_stress_SUITE.erl | 13 +- apps/mg_woody/test/mg_woody_tests_SUITE.erl | 152 +-------- config/config.yaml | 19 -- docs/c4.dsl | 10 - rebar.lock | 8 +- rel_scripts/configurator.escript | 15 - 26 files changed, 37 insertions(+), 925 deletions(-) rename apps/mg_cth/src/{mg_cth_configurator.erl => mg_cth_conf.erl} (83%) delete mode 100644 apps/mg_es_machine/rebar.config delete mode 100644 apps/mg_es_machine/src/mg_es_machine.app.src delete mode 100644 apps/mg_es_machine/src/mg_event_sink_machine.erl delete mode 100644 apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl delete mode 100644 apps/mg_woody/test/mg_event_sink_client.erl diff --git a/Makefile b/Makefile index 622758d7..9a150cd1 100644 --- a/Makefile +++ b/Makefile @@ -107,9 +107,4 @@ cover-report: $(REBAR) cover test-configurator: - $(MAKE) $(FILE_PERMISSIONS) ERL_LIBS=_build/default/lib ./rel_scripts/configurator.escript config/config.yaml config - -FILE_PERMISSIONS = $(patsubst %,%.target,$(wildcard config/*._perms)) -$(FILE_PERMISSIONS): config/%._perms.target: config/%._perms - chmod $$(cat $^) config/$* diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index 03beff5c..8f236ffb 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -30,7 +30,6 @@ mg_core, mg_riak, mg_es_kafka, - mg_es_machine, mg_woody, opentelemetry_api, opentelemetry_exporter, diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 09364c19..ad4354c3 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -548,10 +548,6 @@ mg_config() -> [ {woody_server, #{ip => {0, 0, 0, 0}, port => 8022}}, {namespaces, #{}}, - {event_sink_ns, #{ - storage => mg_core_storage_memory, - registry => mg_procreg_global - }}, {pulse, {mg_pulse, #{}}} ]. diff --git a/apps/machinegun/test/mg_tests_SUITE.erl b/apps/machinegun/test/mg_tests_SUITE.erl index 3f2c6175..a487529b 100644 --- a/apps/machinegun/test/mg_tests_SUITE.erl +++ b/apps/machinegun/test/mg_tests_SUITE.erl @@ -247,10 +247,6 @@ mg_config(#{endpoint := {IP, Port}}, C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_event_sink_machine, #{ - name => machine, - machine_id => ?ES_ID - }}, {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, @@ -259,10 +255,6 @@ mg_config(#{endpoint := {IP, Port}}, C) -> ] } }}, - {event_sink_ns, #{ - storage => mg_core_storage_memory, - default_processing_timeout => 5000 - }}, {pulse, {mg_pulse, #{}}} ]. diff --git a/apps/mg_conf/src/mg_conf.app.src b/apps/mg_conf/src/mg_conf.app.src index b82b0e55..5bd86d93 100644 --- a/apps/mg_conf/src/mg_conf.app.src +++ b/apps/mg_conf/src/mg_conf.app.src @@ -10,7 +10,6 @@ mg_core, mg_riak, mg_es_kafka, - mg_es_machine, mg_woody ]}, {env, []}, diff --git a/apps/mg_conf/src/mg_conf.erl b/apps/mg_conf/src/mg_conf.erl index b495dcec..e6828692 100644 --- a/apps/mg_conf/src/mg_conf.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -21,24 +21,16 @@ event_stash_size := non_neg_integer() }. --type event_sink_ns() :: #{ - default_processing_timeout := timeout(), - storage => mg_core_storage:options(), - worker => mg_core_worker:options() -}. - -type namespaces() :: #{mg_core:ns() => events_machines()}. -type config() :: #{ woody_server := mg_woody:woody_server(), - event_sink_ns := event_sink_ns(), namespaces := namespaces(), quotas => [mg_skd_quota_worker:options()], pulse := pulse(), health_check => erl_health:check() }. --export_type([event_sink_ns/0]). -export_type([namespaces/0]). -export_type([config/0]). @@ -50,7 +42,6 @@ construct_child_specs( #{ woody_server := WoodyServer, - event_sink_ns := EventSinkNS, namespaces := Namespaces, pulse := Pulse } = Config, @@ -61,14 +52,12 @@ construct_child_specs( ClusterOpts = maps:get(cluster, Config, #{}), QuotasChildSpec = quotas_child_specs(Quotas, quota), - EventSinkChildSpec = event_sink_ns_child_spec(EventSinkNS, event_sink, Pulse), - EventMachinesChildSpec = events_machines_child_specs(Namespaces, EventSinkNS, Pulse), + EventMachinesChildSpec = events_machines_child_specs(Namespaces, Pulse), WoodyServerChildSpec = mg_woody:child_spec( woody_server, #{ pulse => Pulse, - automaton => api_automaton_options(Namespaces, EventSinkNS, Pulse), - event_sink => api_event_sink_options(Namespaces, EventSinkNS, Pulse), + automaton => api_automaton_options(Namespaces, Pulse), woody_server => WoodyServer, additional_routes => AdditionalRoutes } @@ -77,7 +66,6 @@ construct_child_specs( lists:flatten([ QuotasChildSpec, - EventSinkChildSpec, EventMachinesChildSpec, ClusterSpec, WoodyServerChildSpec @@ -92,22 +80,16 @@ quotas_child_specs(Quotas, ChildID) -> || Options <- Quotas ]. --spec events_machines_child_specs(namespaces(), event_sink_ns(), pulse()) -> supervisor:child_spec(). -events_machines_child_specs(NSs, EventSinkNS, Pulse) -> - NsOptions = [ - events_machine_options(NS, NSs, EventSinkNS, Pulse) - || NS <- maps:keys(NSs) - ], +-spec events_machines_child_specs(namespaces(), pulse()) -> supervisor:child_spec(). +events_machines_child_specs(NSs, Pulse) -> + NsOptions = [events_machine_options(NS, NSs, Pulse) || NS <- maps:keys(NSs)], mg_conf_namespace_sup:child_spec(NsOptions, namespaces_sup). --spec events_machine_options(mg_core:ns(), namespaces(), event_sink_ns(), pulse()) -> mg_core_events_machine:options(). -events_machine_options(NS, NSs, EventSinkNS, Pulse) -> +-spec events_machine_options(mg_core:ns(), namespaces(), pulse()) -> mg_core_events_machine:options(). +events_machine_options(NS, NSs, Pulse) -> NSConfigs = maps:get(NS, NSs), #{processor := ProcessorConfig, storage := Storage} = NSConfigs, - EventSinks = [ - event_sink_options(SinkConfig, EventSinkNS, Pulse) - || SinkConfig <- maps:get(event_sinks, NSConfigs, []) - ], + EventSinks = [event_sink_options(SinkConfig, Pulse) || SinkConfig <- maps:get(event_sinks, NSConfigs, [])], EventsStorage = sub_storage_options(<<"events">>, Storage), #{ namespace => NS, @@ -147,14 +129,14 @@ machine_options(NS, Config, Pulse) -> suicide_probability => maps:get(suicide_probability, Config, undefined) }. --spec api_automaton_options(namespaces(), event_sink_ns(), pulse()) -> mg_woody_automaton:options(). -api_automaton_options(NSs, EventSinkNS, Pulse) -> +-spec api_automaton_options(namespaces(), pulse()) -> mg_woody_automaton:options(). +api_automaton_options(NSs, Pulse) -> maps:fold( fun(NS, ConfigNS, Options) -> Options#{ NS => maps:merge( #{ - machine => events_machine_options(NS, NSs, EventSinkNS, Pulse) + machine => events_machine_options(NS, NSs, Pulse) }, modernizer_options(maps:get(modernizer, ConfigNS, undefined), Pulse) ) @@ -164,47 +146,13 @@ api_automaton_options(NSs, EventSinkNS, Pulse) -> NSs ). --spec event_sink_options(mg_core_event_sink:handler(), event_sink_ns(), pulse()) -> mg_core_event_sink:handler(). -event_sink_options({mg_event_sink_machine, EventSinkConfig}, EvSinks, Pulse) -> - EventSinkNS = event_sink_namespace_options(EvSinks, Pulse), - {mg_event_sink_machine, maps:merge(EventSinkNS, EventSinkConfig)}; -event_sink_options({mg_event_sink_kafka, EventSinkConfig}, _Config, Pulse) -> +-spec event_sink_options(mg_core_event_sink:handler(), pulse()) -> mg_core_event_sink:handler(). +event_sink_options({mg_event_sink_kafka, EventSinkConfig}, Pulse) -> {mg_event_sink_kafka, EventSinkConfig#{ pulse => Pulse, encoder => fun mg_woody_event_sink:serialize/3 }}. --spec event_sink_ns_child_spec(event_sink_ns(), atom(), pulse()) -> supervisor:child_spec(). -event_sink_ns_child_spec(EventSinkNS, ChildID, Pulse) -> - mg_event_sink_machine:child_spec(event_sink_namespace_options(EventSinkNS, Pulse), ChildID). - --spec api_event_sink_options(namespaces(), event_sink_ns(), pulse()) -> mg_woody_event_sink:options(). -api_event_sink_options(NSs, EventSinkNS, Pulse) -> - EventSinkMachines = collect_event_sink_machines(NSs), - {EventSinkMachines, event_sink_namespace_options(EventSinkNS, Pulse)}. - --spec collect_event_sink_machines(namespaces()) -> [mg_core:id()]. -collect_event_sink_machines(NSs) -> - NSConfigs = maps:values(NSs), - EventSinks = ordsets:from_list([ - maps:get(machine_id, SinkConfig) - || NSConfig <- NSConfigs, {mg_event_sink_machine, SinkConfig} <- maps:get(event_sinks, NSConfig, []) - ]), - ordsets:to_list(EventSinks). - --spec event_sink_namespace_options(event_sink_ns(), pulse()) -> mg_event_sink_machine:ns_options(). -event_sink_namespace_options(#{storage := Storage} = EventSinkNS, Pulse) -> - NS = <<"_event_sinks">>, - MachinesStorage = sub_storage_options(<<"machines">>, Storage), - EventsStorage = sub_storage_options(<<"events">>, Storage), - EventSinkNS#{ - namespace => NS, - pulse => Pulse, - storage => MachinesStorage, - events_storage => EventsStorage, - worker => worker_manager_options(EventSinkNS) - }. - -spec worker_manager_options(map()) -> mg_core_workers_manager:ns_options(). worker_manager_options(Config) -> maps:merge( diff --git a/apps/mg_cth/src/mg_cth.app.src b/apps/mg_cth/src/mg_cth.app.src index 34f44b09..a73f6609 100644 --- a/apps/mg_cth/src/mg_cth.app.src +++ b/apps/mg_cth/src/mg_cth.app.src @@ -8,7 +8,6 @@ genlib, mg_utils, mg_core, - mg_es_machine, mg_es_kafka, mg_woody ]}, diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_conf.erl similarity index 83% rename from apps/mg_cth/src/mg_cth_configurator.erl rename to apps/mg_cth/src/mg_cth_conf.erl index 7e9737d9..b87e4e86 100644 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ b/apps/mg_cth/src/mg_cth_conf.erl @@ -1,10 +1,9 @@ --module(mg_cth_configurator). +-module(mg_cth_conf). -export([construct_child_specs/1]). -type config() :: #{ woody_server := mg_woody:woody_server(), - event_sink_ns := mg_conf:event_sink_ns(), namespaces := mg_conf:namespaces(), quotas => [mg_skd_quota_worker:options()] }. diff --git a/apps/mg_cth/src/mg_cth_processor.erl b/apps/mg_cth/src/mg_cth_processor.erl index 4c58339d..6c7d235b 100644 --- a/apps/mg_cth/src/mg_cth_processor.erl +++ b/apps/mg_cth/src/mg_cth_processor.erl @@ -103,7 +103,7 @@ start_link(ID, {Host, Port}, Options, MgConfig) -> ) } ) - | mg_cth_configurator:construct_child_specs(MgConfig) + | mg_cth_conf:construct_child_specs(MgConfig) ], case genlib_adhoc_supervisor:start_link(Flags, ChildsSpecs) of {ok, SupPid} -> diff --git a/apps/mg_es_machine/rebar.config b/apps/mg_es_machine/rebar.config deleted file mode 100644 index 02763852..00000000 --- a/apps/mg_es_machine/rebar.config +++ /dev/null @@ -1,3 +0,0 @@ -{deps, [ - {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} -]}. diff --git a/apps/mg_es_machine/src/mg_es_machine.app.src b/apps/mg_es_machine/src/mg_es_machine.app.src deleted file mode 100644 index f6d2383d..00000000 --- a/apps/mg_es_machine/src/mg_es_machine.app.src +++ /dev/null @@ -1,16 +0,0 @@ -{application, mg_es_machine, [ - {description, "Event sink machine implementation"}, - {vsn, "1"}, - {registered, []}, - {applications, [ - kernel, - stdlib, - genlib, - mg_core - ]}, - {env, []}, - {modules, []}, - {maintainers, []}, - {licenses, []}, - {links, []} -]}. diff --git a/apps/mg_es_machine/src/mg_event_sink_machine.erl b/apps/mg_es_machine/src/mg_event_sink_machine.erl deleted file mode 100644 index 3d379826..00000000 --- a/apps/mg_es_machine/src/mg_event_sink_machine.erl +++ /dev/null @@ -1,299 +0,0 @@ -%%% -%%% Copyright 2024 Valitydev -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% - --module(mg_event_sink_machine). - -%% API --export_type([event_body/0]). --export_type([options/0]). --export_type([storage_options/0]). --export_type([ns_options/0]). --export([child_spec/2]). --export([start_link/1]). --export([get_history/3]). --export([repair/4]). - -%% mg_core_event_sink handler --behaviour(mg_core_event_sink). --export([add_events/6]). - -%% mg_core_machine handler --behaviour(mg_core_machine). --export([process_machine/7]). - -%% -%% API -%% --type event_body() :: #{ - source_ns => mg_core:ns(), - source_id => mg_core:id(), - event => mg_core_events:event() -}. --type event() :: mg_core_events:event(event_body()). --type options() :: #{ - name := atom(), - namespace := mg_core:ns(), - machine_id := mg_core:id(), - storage := storage_options(), - worker := mg_core_workers_manager:ns_options(), - pulse := mpulse:handler(), - events_storage := mg_core_storage:options(), - default_processing_timeout := timeout() -}. --type ns_options() :: #{ - namespace := mg_core:ns(), - storage := storage_options(), - worker := mg_core_workers_manager:ns_options(), - pulse := mpulse:handler(), - events_storage := storage_options(), - default_processing_timeout := timeout() -}. -% like mg_core_storage:options() except `name` --type storage_options() :: mg_utils:mod_opts(map()). - --spec child_spec(ns_options(), atom()) -> supervisor:child_spec(). -child_spec(Options, ChildID) -> - #{ - id => ChildID, - start => {?MODULE, start_link, [Options]}, - restart => permanent, - type => supervisor - }. - --spec start_link(ns_options()) -> mg_utils:gen_start_ret(). -start_link(Options) -> - genlib_adhoc_supervisor:start_link( - #{strategy => one_for_all}, - mg_utils:lists_compact([ - mg_core_machine:child_spec(machine_options(Options), automaton), - mg_core_storage:child_spec(events_storage_options(Options), events_storage) - ]) - ). - --spec add_events( - options(), - mg_core:ns(), - mg_core:id(), - [mg_core_events:event()], - ReqCtx, - Deadline -) -> ok when - ReqCtx :: mg_core:request_context(), - Deadline :: mg_core_deadline:deadline(). -add_events( - #{machine_id := EventSinkID} = Options, - SourceNS, - SourceMachineID, - Events, - ReqCtx, - Deadline -) -> - NSOptions = maps:without([machine_id, name], Options), - ok = mg_core_machine:call_with_lazy_start( - machine_options(NSOptions), - EventSinkID, - {add_events, SourceNS, SourceMachineID, Events}, - ReqCtx, - Deadline, - undefined - ). - --spec get_history(ns_options(), mg_core:id(), mg_core_events:history_range()) -> [event()]. -get_history(Options, EventSinkID, HistoryRange) -> - #{events_range := EventsRange} = get_state(Options, EventSinkID), - StorageOptions = events_storage_options(Options), - Batch = mg_core_dirange:fold( - fun(EventID, Batch) -> - Key = mg_core_events:add_machine_id( - EventSinkID, - mg_core_events:event_id_to_key(EventID) - ), - mg_core_storage:add_batch_request({get, Key}, Batch) - end, - mg_core_storage:new_batch(), - mg_core_events:intersect_range(EventsRange, HistoryRange) - ), - BatchResults = mg_core_storage:run_batch(StorageOptions, Batch), - lists:map( - fun({{get, Key}, {_Context, Value}}) -> - kv_to_sink_event(EventSinkID, {Key, Value}) - end, - BatchResults - ). - --spec repair(ns_options(), mg_core:id(), mg_core:request_context(), mg_core_deadline:deadline()) -> - ok. -repair(Options, EventSinkID, ReqCtx, Deadline) -> - mg_core_machine:repair(machine_options(Options), EventSinkID, undefined, ReqCtx, Deadline). - -%% -%% mg_core_processor handler -%% --type state() :: #{ - events_range => mg_core_events:events_range() -}. - --spec process_machine(Options, EventSinkID, Impact, PCtx, ReqCtx, Deadline, PackedState) -> - Result -when - Options :: ns_options(), - EventSinkID :: mg_core:id(), - Impact :: mg_core_machine:processor_impact(), - PCtx :: mg_core_machine:processing_context(), - ReqCtx :: mg_core:request_context(), - Deadline :: mg_core_deadline:deadline(), - PackedState :: mg_core_machine:machine_state(), - Result :: mg_core_machine:processor_result(). -process_machine(Options, EventSinkID, Impact, _PCtx, _ReqCtx, _Deadline, PackedState) -> - State = - case {Impact, PackedState} of - {{init, _}, null} -> new_state(); - {_, _} -> opaque_to_state(PackedState) - end, - NewState = process_machine_(Options, EventSinkID, Impact, State), - {{reply, ok}, sleep, state_to_opaque(NewState)}. - --spec process_machine_(ns_options(), mg_core:id(), mg_core_machine:processor_impact(), state()) -> - state(). -process_machine_(_, _, {init, undefined}, State) -> - State; -process_machine_(_, _, {repair, undefined}, State) -> - State; -process_machine_( - Options, - EventSinkID, - {call, {add_events, SourceNS, SourceMachineID, Events}}, - State -) -> - {SinkEvents, NewState} = generate_sink_events(SourceNS, SourceMachineID, Events, State), - ok = store_sink_events(Options, EventSinkID, SinkEvents), - NewState. - -%% - --spec store_sink_events(ns_options(), mg_core:id(), [event()]) -> ok. -store_sink_events(Options, EventSinkID, SinkEvents) -> - lists:foreach( - fun(SinkEvent) -> - store_event(Options, EventSinkID, SinkEvent) - end, - SinkEvents - ). - --spec store_event(ns_options(), mg_core:id(), event()) -> ok. -store_event(Options, EventSinkID, SinkEvent) -> - {Key, Value} = sink_event_to_kv(EventSinkID, SinkEvent), - _ = mg_core_storage:put( - events_storage_options(Options), - Key, - undefined, - Value, - [] - ), - ok. - --spec get_state(ns_options(), mg_core:id()) -> state(). -get_state(Options, EventSinkID) -> - try - #{state := State} = mg_core_machine:get(machine_options(Options), EventSinkID), - opaque_to_state(State) - catch - throw:{logic, machine_not_found} -> - new_state() - end. - --spec new_state() -> state(). -new_state() -> - #{events_range => undefined}. - --spec machine_options(ns_options()) -> mg_core_machine:options(). -machine_options( - Options = #{ - namespace := Namespace, storage := Storage, worker := Worker, pulse := Pulse - } -) -> - #{ - namespace => mg_utils:concatenate_namespaces(Namespace, <<"machines">>), - processor => {?MODULE, Options}, - storage => Storage, - worker => Worker, - pulse => Pulse - }. - --spec events_storage_options(ns_options()) -> mg_core_storage:options(). -events_storage_options(#{namespace := NS, events_storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), - {Mod, Options#{name => {NS, ?MODULE, events}, pulse => Handler}}. - -%% - --spec generate_sink_events(mg_core:ns(), mg_core:id(), [mg_core_events:event()], state()) -> - {[event()], state()}. -generate_sink_events(SourceNS, SourceMachineID, Events, State = #{events_range := EventsRange}) -> - Bodies = [generate_sink_event_body(SourceNS, SourceMachineID, Event) || Event <- Events], - {SinkEvents, NewEventsRange} = mg_core_events:generate_events_with_range(Bodies, EventsRange), - {SinkEvents, State#{events_range := NewEventsRange}}. - --spec generate_sink_event_body(mg_core:ns(), mg_core:id(), mg_core_events:event()) -> event_body(). -generate_sink_event_body(SourceNS, SourceMachineID, Event) -> - #{ - source_ns => SourceNS, - source_id => SourceMachineID, - event => Event - }. - -%% -%% packer to opaque -%% --spec state_to_opaque(state()) -> mg_core_storage:opaque(). -state_to_opaque(#{events_range := EventsRange}) -> - [1, mg_core_events:events_range_to_opaque(EventsRange)]. - --spec opaque_to_state(mg_core_storage:opaque()) -> state(). -opaque_to_state([1, EventsRange]) -> - #{ - events_range => mg_core_events:opaque_to_events_range(EventsRange) - }. - --spec sink_event_body_to_opaque(Vsn :: integer(), event_body()) -> mg_core_storage:opaque(). -sink_event_body_to_opaque(_Vsn, #{ - source_ns := SourceNS, - source_id := SourceMachineID, - event := Event -}) -> - [1, SourceNS, SourceMachineID, mg_core_events:event_to_opaque(Event)]. - --spec opaque_to_sink_event_body(Vsn :: integer(), mg_core_storage:opaque()) -> event_body(). -opaque_to_sink_event_body(_Vsn, [1, SourceNS, SourceMachineID, Event]) -> - #{ - source_ns => SourceNS, - source_id => SourceMachineID, - event => mg_core_events:opaque_to_event(Event) - }. - --spec sink_event_to_kv(mg_core:id(), event()) -> mg_core_storage:kv(). -sink_event_to_kv(EventSinkID, Event) -> - mg_core_events:add_machine_id( - EventSinkID, - mg_core_events:event_to_kv(Event, fun sink_event_body_to_opaque/2) - ). - --spec kv_to_sink_event(mg_core:id(), mg_core_storage:kv()) -> event(). -kv_to_sink_event(EventSinkID, Kvs) -> - mg_core_events:kv_to_event( - mg_core_events:remove_machine_id(EventSinkID, Kvs), - fun opaque_to_sink_event_body/2 - ). diff --git a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl b/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl deleted file mode 100644 index 71a1c71c..00000000 --- a/apps/mg_es_machine/test/mg_event_sink_machine_SUITE.erl +++ /dev/null @@ -1,169 +0,0 @@ -%%% -%%% Copyright 2017 RBKmoney -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% - --module(mg_event_sink_machine_SUITE). --include_lib("stdlib/include/assert.hrl"). --include_lib("common_test/include/ct.hrl"). - -%% tests descriptions --export([all/0]). --export([groups/0]). --export([init_per_suite/1]). --export([end_per_suite/1]). - -%% tests --export([add_events_test/1]). --export([get_unexisted_event_test/1]). --export([not_idempotent_add_get_events_test/1]). - -%% Pulse --export([handle_beat/2]). - -%% -%% tests descriptions -%% --type group_name() :: atom(). --type test_name() :: atom(). --type config() :: [{atom(), _}]. - --spec all() -> [test_name() | {group, group_name()}]. -all() -> - [ - {group, main} - ]. - --spec groups() -> [{group_name(), list(_), [test_name()]}]. -groups() -> - [ - {main, [sequence], [ - add_events_test, - get_unexisted_event_test, - not_idempotent_add_get_events_test - ]} - ]. - -%% -%% starting/stopping -%% --spec init_per_suite(config()) -> config(). -init_per_suite(C) -> - % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_event_sink_machine, '_', '_'}, x), - Apps = mg_cth:start_applications([mg_core]), - Pid = start_event_sink(event_sink_ns_options()), - true = erlang:unlink(Pid), - {Events, _} = mg_core_events:generate_events_with_range( - [{#{}, Body} || Body <- [1, 2, 3]], - undefined - ), - [{apps, Apps}, {pid, Pid}, {events, Events} | C]. - --spec end_per_suite(config()) -> ok. -end_per_suite(C) -> - ok = proc_lib:stop(?config(pid, C)), - mg_cth:stop_applications(?config(apps, C)). - -%% -%% tests -%% --define(ES_ID, <<"event_sink_id">>). --define(SOURCE_NS, <<"source_ns">>). --define(SOURCE_ID, <<"source_id">>). - --spec add_events_test(config()) -> _. -add_events_test(C) -> - ?assertEqual(ok, add_events(C)). - --spec get_unexisted_event_test(config()) -> _. -get_unexisted_event_test(_C) -> - [] = mg_event_sink_machine:get_history( - event_sink_ns_options(), - ?ES_ID, - {42, undefined, forward} - ). - --spec not_idempotent_add_get_events_test(config()) -> _. -not_idempotent_add_get_events_test(C) -> - ?assertEqual(ok, add_events(C)), - ConfigEvents = [ - #{event => Event, source_ns => ?SOURCE_NS, source_id => ?SOURCE_ID} - || Event <- ?config(events, C) - ], - ExpectedEvents = lists:zip( - lists:seq(1, erlang:length(?config(events, C)) * 2), - ConfigEvents ++ ConfigEvents - ), - ?assertEqual(ExpectedEvents, get_history(C)). - -%% -%% utils -%% - --spec add_events(config()) -> _. -add_events(C) -> - mg_event_sink_machine:add_events( - event_sink_options(), - ?SOURCE_NS, - ?SOURCE_ID, - ?config(events, C), - null, - mg_core_deadline:default() - ). - --spec get_history(config()) -> _. -get_history(_C) -> - HRange = {undefined, undefined, forward}, - % _ = ct:pal("~p", [PreparedEvents]), - EventsSinkEvents = mg_event_sink_machine:get_history( - event_sink_ns_options(), - ?ES_ID, - HRange - ), - [{ID, Body} || #{id := ID, body := Body} <- EventsSinkEvents]. - --spec start_event_sink(mg_event_sink_machine:ns_options()) -> pid(). -start_event_sink(Options) -> - mg_utils:throw_if_error( - genlib_adhoc_supervisor:start_link( - #{strategy => one_for_all}, - [mg_event_sink_machine:child_spec(Options, event_sink)] - ) - ). - --spec event_sink_ns_options() -> mg_event_sink_machine:ns_options(). -event_sink_ns_options() -> - #{ - namespace => ?ES_ID, - storage => mg_core_storage_memory, - worker => #{ - registry => mg_procreg_global - }, - pulse => ?MODULE, - default_processing_timeout => 1000, - events_storage => mg_core_storage_memory - }. - --spec event_sink_options() -> mg_event_sink_machine:options(). -event_sink_options() -> - NSOptions = event_sink_ns_options(), - NSOptions#{ - name => machine, - machine_id => ?ES_ID - }. - --spec handle_beat(_, mpulse:beat()) -> ok. -handle_beat(_, Beat) -> - ct:pal("~p", [Beat]). diff --git a/apps/mg_woody/src/mg_woody.app.src b/apps/mg_woody/src/mg_woody.app.src index 1745099d..fcdede20 100644 --- a/apps/mg_woody/src/mg_woody.app.src +++ b/apps/mg_woody/src/mg_woody.app.src @@ -26,7 +26,6 @@ woody, mg_utils, mg_core, - mg_es_machine, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_woody/src/mg_woody.erl b/apps/mg_woody/src/mg_woody.erl index 7d3eb8db..f3373b44 100644 --- a/apps/mg_woody/src/mg_woody.erl +++ b/apps/mg_woody/src/mg_woody.erl @@ -40,12 +40,9 @@ -type automaton() :: mg_woody_automaton:options(). --type event_sink() :: mg_woody_event_sink:options(). - -type options() :: #{ pulse := module(), automaton := automaton(), - event_sink := event_sink(), woody_server := woody_server(), additional_routes => [woody_server_thrift_http_handler:route(any())] }. @@ -55,7 +52,6 @@ child_spec(ID, Options) -> #{ woody_server := WoodyConfig, automaton := Automaton, - event_sink := EventSink, pulse := PulseHandler } = Options, WoodyOptions = maps:merge( @@ -66,8 +62,7 @@ child_spec(ID, Options) -> port => maps:get(port, WoodyConfig), event_handler => {mg_woody_event_handler, PulseHandler}, handlers => [ - mg_woody_automaton:handler(Automaton), - mg_woody_event_sink:handler(EventSink) + mg_woody_automaton:handler(Automaton) ] }, genlib_map:compact(#{ diff --git a/apps/mg_woody/src/mg_woody_event_sink.erl b/apps/mg_woody/src/mg_woody_event_sink.erl index 2f13e110..06845bef 100644 --- a/apps/mg_woody/src/mg_woody_event_sink.erl +++ b/apps/mg_woody/src/mg_woody_event_sink.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 Valitydev +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -18,54 +18,7 @@ -include_lib("mg_proto/include/mg_proto_event_sink_thrift.hrl"). -%% API --export([handler/1]). -export([serialize/3]). --export_type([options/0]). - -%% woody handler --behaviour(woody_server_thrift_handler). --export([handle_function/4]). - -%% -%% API -%% --type options() :: {[mg_core:id()], mg_event_sink_machine:ns_options()}. - --spec handler(options()) -> mg_woody_utils:woody_handler(). -handler(Options) -> - {"/v1/event_sink", {{mg_proto_state_processing_thrift, 'EventSink'}, {?MODULE, Options}}}. - -%% -%% woody handler -%% --spec handle_function(woody:func(), woody:args(), woody_context:ctx(), options()) -> - {ok, _Result} | no_return(). - -handle_function('GetHistory', {EventSinkID, Range}, WoodyContext, {AvaliableEventSinks, Options}) -> - ReqCtx = mg_woody_utils:woody_context_to_opaque(WoodyContext), - DefaultTimeout = maps:get(default_processing_timeout, Options), - DefaultDeadline = mg_core_deadline:from_timeout(DefaultTimeout), - Deadline = mg_woody_utils:get_deadline(WoodyContext, DefaultDeadline), - SinkHistory = - mg_woody_utils:handle_error( - #{ - namespace => undefined, - machine_id => EventSinkID, - request_context => ReqCtx, - deadline => Deadline - }, - fun() -> - _ = check_event_sink(AvaliableEventSinks, EventSinkID), - mg_event_sink_machine:get_history( - Options, - EventSinkID, - mg_woody_packer:unpack(history_range, Range) - ) - end, - pulse(Options) - ), - {ok, mg_woody_packer:pack(sink_history, SinkHistory)}. %% %% event_sink events encoder @@ -96,20 +49,3 @@ serialize(SourceNS, SourceID, Event) -> {error, Reason} -> erlang:error({?MODULE, Reason}) end. - -%% -%% Internals -%% - --spec check_event_sink([mg_core:id()], mg_core:id()) -> ok | no_return(). -check_event_sink(AvaliableEventSinks, EventSinkID) -> - case lists:member(EventSinkID, AvaliableEventSinks) of - true -> - ok; - false -> - throw({logic, event_sink_not_found}) - end. - --spec pulse(mg_event_sink_machine:ns_options()) -> mpulse:handler(). -pulse(#{pulse := Pulse}) -> - Pulse. diff --git a/apps/mg_woody/src/mg_woody_packer.erl b/apps/mg_woody/src/mg_woody_packer.erl index dc125956..8a1c62a1 100644 --- a/apps/mg_woody/src/mg_woody_packer.erl +++ b/apps/mg_woody/src/mg_woody_packer.erl @@ -211,18 +211,6 @@ pack(machine_descriptor, {NS, Ref, Range}) -> ref = pack(ref, Ref), range = pack(history_range, Range) }; -pack(sink_event, #{ - id := ID, - body := #{source_ns := SourceNS, source_id := SourceID, event := Event} -}) -> - #mg_stateproc_SinkEvent{ - id = pack(event_id, ID), - source_id = pack(id, SourceID), - source_ns = pack(ns, SourceNS), - event = pack(event, Event) - }; -pack(sink_history, SinkHistory) -> - pack({list, sink_event}, SinkHistory); pack(Type, Value) -> erlang:error(badarg, [Type, Value]). @@ -413,19 +401,6 @@ unpack(history_range, #mg_stateproc_HistoryRange{ {unpack(event_id, After), unpack(integer, Limit), unpack(direction, Direction)}; unpack(machine_descriptor, #mg_stateproc_MachineDescriptor{ns = NS, ref = Ref, range = Range}) -> {unpack(ns, NS), unpack(ref, Ref), unpack(history_range, Range)}; -unpack(sink_event, SinkEvent) -> - #mg_stateproc_SinkEvent{id = ID, source_ns = SourceNS, source_id = SourceID, event = Event} = - SinkEvent, - #{ - id => unpack(id, ID), - body => #{ - source_ns => unpack(ns, SourceNS), - source_id => unpack(id, SourceID), - event => unpack(event, Event) - } - }; -unpack(sink_history, SinkHistory) -> - unpack({list, sink_event}, SinkHistory); unpack(Type, Value) -> erlang:error(badarg, [Type, Value]). diff --git a/apps/mg_woody/src/mg_woody_utils.erl b/apps/mg_woody/src/mg_woody_utils.erl index 0b0ab2cc..574f18be 100644 --- a/apps/mg_woody/src/mg_woody_utils.erl +++ b/apps/mg_woody/src/mg_woody_utils.erl @@ -88,7 +88,6 @@ handle_logic_error(machine_already_exist) -> #mg_stateproc_MachineAlreadyExists{ handle_logic_error(machine_failed) -> #mg_stateproc_MachineFailed{}; handle_logic_error(machine_already_working) -> #mg_stateproc_MachineAlreadyWorking{}; handle_logic_error(namespace_not_found) -> #mg_stateproc_NamespaceNotFound{}; -handle_logic_error(event_sink_not_found) -> #mg_stateproc_EventSinkNotFound{}; % TODO обработать случай создания машины c некорректным ID в рамках thrift handle_logic_error({invalid_machine_id, _}) -> #mg_stateproc_MachineNotFound{}. diff --git a/apps/mg_woody/test/mg_event_sink_client.erl b/apps/mg_woody/test/mg_event_sink_client.erl deleted file mode 100644 index 469f5b68..00000000 --- a/apps/mg_woody/test/mg_event_sink_client.erl +++ /dev/null @@ -1,51 +0,0 @@ -%%% -%%% Copyright 2020 Valitydev -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% - --module(mg_event_sink_client). - -%% API --export_type([options/0]). --export([get_history/3]). - -%% -%% API -%% --type options() :: URL :: string(). - --spec get_history(options(), mg_core:id(), mg_proto_state_processing_thrift:'HistoryRange'()) -> - mg_proto_state_processing_thrift:'SinkHistory'(). -get_history(BaseURL, EventSinkID, Range) -> - call_service(BaseURL, 'GetHistory', {EventSinkID, Range}). - -%% -%% local -%% --spec call_service(_BaseURL, atom(), woody:args()) -> _. -call_service(BaseURL, Function, Args) -> - WR = woody_client:call( - {{mg_proto_state_processing_thrift, 'EventSink'}, Function, Args}, - #{ - url => BaseURL ++ "/v1/event_sink", - event_handler => {mg_woody_event_handler, mg_cth_pulse} - }, - woody_context:new() - ), - case WR of - {ok, R} -> - R; - {exception, Exception} -> - erlang:throw(Exception) - end. diff --git a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl index 0caed6a3..b7c3c520 100644 --- a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl @@ -194,10 +194,6 @@ mg_woody_config(Name, C) -> } end ) - }, - event_sink_ns => #{ - storage => mg_core_storage_memory, - default_processing_timeout => 5000 } }. diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index 34206798..7971717f 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -26,7 +26,6 @@ -export([stress_test/1]). -define(NS, <<"NS">>). --define(ES_ID, <<"test_event_sink">>). -type test_name() :: atom(). -type config() :: [{atom(), _}]. @@ -41,6 +40,7 @@ all() -> init_per_suite(C) -> Config = mg_woody_config(C), Apps = mg_cth:start_applications([ + brod, {hackney, [{use_default_pool, false}]}, mg_woody, opentelemetry_exporter, @@ -83,7 +83,6 @@ init_per_suite(C) -> ns => ?NS, retry_strategy => genlib_retry:new_strategy({exponential, 5, 2, 1000}) }}, - {event_sink_options, "http://localhost:8022"}, {processor_pid, ProcessorPid} | C ]. @@ -123,7 +122,11 @@ mg_woody_config(_C) -> }, retries => #{}, event_sinks => [ - {mg_event_sink_machine, #{name => default, machine_id => ?ES_ID}} + {mg_event_sink_kafka, #{ + name => kafka, + topic => <<"mg_core_event_sink">>, + client => mg_cth:config(kafka_client_name) + }} ], event_stash_size => 10, worker => #{ @@ -131,10 +134,6 @@ mg_woody_config(_C) -> sidecar => mg_cth_worker } } - }, - event_sink_ns => #{ - storage => mg_core_storage_memory, - default_processing_timeout => 5000 } }. diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index e64ba68e..cb0cf4e5 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -70,14 +70,6 @@ -export([success_call_with_deadline/1]). -export([timeout_call_with_deadline/1]). -%% event_sink group tests --export([event_sink_get_empty_history/1]). --export([event_sink_get_not_empty_history/1]). --export([event_sink_get_last_event/1]). --export([event_sink_incorrect_event_id/1]). --export([event_sink_incorrect_sink_id/1]). --export([event_sink_lots_events_ordering/1]). - %% -export([config_with_multiple_event_sinks/1]). @@ -96,7 +88,6 @@ all() -> {group, history}, {group, repair}, {group, timers}, - {group, event_sink}, {group, deadline}, config_with_multiple_event_sinks ]. @@ -160,16 +151,6 @@ groups() -> machine_start, success_call_with_deadline, timeout_call_with_deadline - ]}, - - {event_sink, [sequence], [ - event_sink_get_empty_history, - event_sink_get_not_empty_history, - event_sink_get_last_event, - % TODO event_not_found - % event_sink_incorrect_event_id, - event_sink_incorrect_sink_id, - event_sink_lots_events_ordering ]} ]. @@ -235,7 +216,6 @@ init_per_group(C) -> ns => ?NS, retry_strategy => genlib_retry:linear(3, 1) }}, - {event_sink_options, "http://localhost:8022"}, {processor_pid, ProcessorPid} | C ]. @@ -353,10 +333,6 @@ mg_woody_config(C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_event_sink_machine, #{ - name => machine, - machine_id => ?ES_ID - }}, {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, @@ -368,10 +344,6 @@ mg_woody_config(C) -> sidecar => mg_cth_worker } } - }, - event_sink_ns => #{ - storage => mg_core_storage_memory, - default_processing_timeout => 5000 } }. @@ -627,95 +599,6 @@ success_call_with_deadline(C) -> <<"sleep">> = mg_cth_automaton_client:call(Options, ?ID, <<"sleep">>, Deadline). %% -%% event_sink group test -%% --spec event_sink_get_empty_history(config()) -> _. -event_sink_get_empty_history(C) -> - [] = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ - direction = forward - }). - --spec event_sink_get_not_empty_history(config()) -> _. -event_sink_get_not_empty_history(C) -> - ok = start_machine(C, ?ID), - - _ = create_events(3, C, ?ID), - - AllEvents = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ - direction = forward - }), - GeneratedEvents = [ - E - || E = #mg_stateproc_SinkEvent{ - source_id = ?ID, - source_ns = ?NS, - event = #mg_stateproc_Event{} - } <- AllEvents - ], - ?assert(erlang:length(GeneratedEvents) >= 3). - --spec event_sink_get_last_event(config()) -> _. -event_sink_get_last_event(C) -> - [ - #mg_stateproc_SinkEvent{ - id = 3, - source_id = _ID, - source_ns = _NS, - event = #mg_stateproc_Event{} - } - ] = - mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ - direction = backward, - limit = 1 - }). - --spec event_sink_incorrect_event_id(config()) -> _. -event_sink_incorrect_event_id(C) -> - #mg_stateproc_EventNotFound{} = - (catch mg_event_sink_client:get_history(es_opts(C), ?ES_ID, #mg_stateproc_HistoryRange{ - 'after' = 42 - })). - --spec event_sink_incorrect_sink_id(config()) -> _. -event_sink_incorrect_sink_id(C) -> - HRange = #mg_stateproc_HistoryRange{}, - #mg_stateproc_EventSinkNotFound{} = - (catch mg_event_sink_client:get_history(es_opts(C), <<"incorrect_event_sink_id">>, HRange)). - --spec event_sink_lots_events_ordering(config()) -> _. -event_sink_lots_events_ordering(C) -> - MachineID = genlib:unique(), - ok = start_machine(C, MachineID), - N = 20, - _ = create_events(N, C, MachineID), - - HRange = #mg_stateproc_HistoryRange{direction = forward}, - Events = mg_event_sink_client:get_history(es_opts(C), ?ES_ID, HRange), - % event_sink не гарантирует отсутствия дублей событий, но гарантирует - % сохранения порядка событий отдельной машины. - lists:foldl( - fun(Ev, LastEvIDMap) -> - #mg_stateproc_SinkEvent{ - source_id = Machine, - source_ns = NS, - event = Body - } = Ev, - Key = {NS, Machine}, - LastID = maps:get(Key, LastEvIDMap, 0), - case Body#mg_stateproc_Event.id of - ID when ID =:= LastID + 1 -> - LastEvIDMap#{Key => ID}; - ID when ID =< LastID -> - % Дубликат одного из уже известных событий - LastEvIDMap; - ID -> - % Нарушен порядок событий, получился пропуск - erlang:error({invalid_order, ID, LastID}, [Ev, LastEvIDMap]) - end - end, - #{}, - Events - ). -spec config_with_multiple_event_sinks(config()) -> _. config_with_multiple_event_sinks(_C) -> @@ -735,7 +618,11 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_event_sink_machine, #{name => default, machine_id => <<"SingleES">>}} + {mg_event_sink_kafka, #{ + name => kafka, + topic => <<"mg_core_event_sink">>, + client => mg_cth:config(kafka_client_name) + }} ], worker => #{ registry => mg_procreg_global, @@ -755,9 +642,10 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_event_sink_machine, #{ - name => machine, - machine_id => <<"SingleES">> + {mg_event_sink_kafka, #{ + name => kafka_other, + topic => <<"mg_core_event_sink_2">>, + client => mg_cth:config(kafka_client_name) }}, {mg_event_sink_kafka, #{ name => kafka, @@ -770,10 +658,6 @@ config_with_multiple_event_sinks(_C) -> sidecar => mg_cth_worker } } - }, - event_sink_ns => #{ - storage => mg_core_storage_memory, - default_processing_timeout => 5000 } }, Apps = mg_cth:start_applications([ @@ -783,7 +667,7 @@ config_with_multiple_event_sinks(_C) -> {ok, _Pid} = genlib_adhoc_supervisor:start_link( {local, mg_core_sup_does_nothing}, #{strategy => rest_for_one}, - mg_cth_configurator:construct_child_specs(Config) + mg_cth_conf:construct_child_specs(Config) ), ok = mg_cth:stop_applications(Apps). @@ -803,25 +687,9 @@ start_machine(C, ID, Args) -> ok end. --spec create_event(mg_core_storage:opaque(), config(), mg_core:id()) -> _. -create_event(Event, C, ID) -> - mg_cth_automaton_client:call(automaton_options(C), ID, Event). - --spec create_events(integer(), config(), mg_core:id()) -> _. -create_events(N, C, ID) -> - lists:foreach( - fun(I) -> - I = create_event([<<"event">>, I], C, ID) - end, - lists:seq(1, N) - ). - -spec automaton_options(config()) -> _. automaton_options(C) -> ?config(automaton_options, C). --spec es_opts(config()) -> _. -es_opts(C) -> ?config(event_sink_options, C). - -spec no_timeout_automaton_options(config()) -> _. no_timeout_automaton_options(C) -> Options0 = automaton_options(C), diff --git a/config/config.yaml b/config/config.yaml index 882ab981..7ee197ac 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -100,22 +100,6 @@ erlang: ipv6: true disable_dns_cache: false -# TODO Retire this option, rely upon OTEL env variables -# https://empayre.youtrack.cloud/issue/TD-838 -# Opentelemetry settings -# By default opentelemetry is disabled which is equivalent to -# "opentelemetry: disabled" -opentelemetry: - # TODO Describe sampling - # Name of the service to use in recording machinegun's spans - service_name: machinegun - # For now spans processed always in batches. - # We support only "otlp" traces exporter - exporter: - # Supports only "http/protobuf" or "grpc" - protocol: http/protobuf - endpoint: http://jaeger:4318 - # API server options. woody_server: ip: "::" @@ -176,9 +160,6 @@ namespaces: # only for testing, default 0 # suicide_probability: 0.1 event_sinks: - machine: - type: machine - machine_id: main_event_sink kafka: type: kafka client: default_kafka_client diff --git a/docs/c4.dsl b/docs/c4.dsl index cfdc6215..ecf1fd76 100644 --- a/docs/c4.dsl +++ b/docs/c4.dsl @@ -45,15 +45,6 @@ workspace { support_services -> this "Executes RPC" "http, thrift" } - mg_es_thrift = container "Event Sink Thrift API" "Thrift service for reading all events of all machine in strictly ordered fashion" "erlang, http, thrift" { - unknown -> this "Executes RPC" "http, thrift" - } - - mg_es_machine = container "Event sink collector machine" { - description "This process starts as a machine with internal processor. Other than that it is a machine with a namespace and its very own options." - mg_es_thrift -> this "Queries" "erlang" - } - mg_es_kafka = container "Machines Business Event Sink" "Machines business events publisher via kafka topic on per-namcespace basis and thirft serialization of its data" "erlang, kafka, thrift" { this -> eventstream "Produces" "thrift" } @@ -148,7 +139,6 @@ workspace { this -> mg_processor "Calls state processor" this -> mg_events_storage "Reads and writes" "erlang" this -> mg_es_kafka "Publishes business events" "erlang" - this -> mg_es_machine "Publishes business events" "erlang" } } } diff --git a/rebar.lock b/rebar.lock index 90b966f9..c6f57f77 100644 --- a/rebar.lock +++ b/rebar.lock @@ -35,9 +35,9 @@ {<<"metrics">>,{pkg,<<"metrics">>,<<"1.0.1">>},2}, {<<"mg_proto">>, {git,"https://github.com/valitydev/machinegun-proto", - {ref,"96f7f11b184c29d8b7e83cd7646f3f2c13662bda"}}, + {ref,"3decc8f8b13c9cd1701deab47781aacddd7dbc92"}}, 0}, - {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.2.0">>},2}, + {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.3.0">>},2}, {<<"msgpack">>, {git,"https://github.com/msgpack/msgpack-erlang", {ref,"9d56647ed77498c7655da39891c4985142697083"}}, @@ -112,7 +112,7 @@ {<<"jsx">>, <<"D12516BAA0BB23A59BB35DCCAF02A1BD08243FCBB9EFE24F2D9D056CCFF71268">>}, {<<"kafka_protocol">>, <<"FC696880C73483C8B032C4BB60F2873046035C7824E1EDCB924CFCE643CF23DD">>}, {<<"metrics">>, <<"25F094DEA2CDA98213CECC3AEFF09E940299D950904393B2A29D191C346A8486">>}, - {<<"mimerl">>, <<"67E2D3F571088D5CFD3E550C383094B47159F3EEE8FFA08E64106CDF5E981BE3">>}, + {<<"mimerl">>, <<"D0CD9FC04B9061F82490F6581E0128379830E78535E017F7780F37FEA7545726">>}, {<<"opentelemetry">>, <<"988AC3C26ACAC9720A1D4FB8D9DC52E95B45ECFEC2D5B5583276A09E8936BC5E">>}, {<<"opentelemetry_api">>, <<"7B69ED4F40025C005DE0B74FCE8C0549625D59CB4DF12D15C32FE6DC5076FF42">>}, {<<"opentelemetry_exporter">>, <<"1D8809C0D4F4ACF986405F7700ED11992BCBDB6A4915DD11921E80777FFA7167">>}, @@ -148,7 +148,7 @@ {<<"jsx">>, <<"0C5CC8FDC11B53CC25CF65AC6705AD39E54ECC56D1C22E4ADB8F5A53FB9427F3">>}, {<<"kafka_protocol">>, <<"687BFD9989998EC8FBBC3ED50D1239A6C07A7DC15B52914AD477413B89ECB621">>}, {<<"metrics">>, <<"69B09ADDDC4F74A40716AE54D140F93BEB0FB8978D8636EADED0C31B6F099F16">>}, - {<<"mimerl">>, <<"F278585650AA581986264638EBF698F8BB19DF297F66AD91B18910DFC6E19323">>}, + {<<"mimerl">>, <<"A1E15A50D1887217DE95F0B9B0793E32853F7C258A5CD227650889B38839FE9D">>}, {<<"opentelemetry">>, <<"8E09EDC26AAD11161509D7ECAD854A3285D88580F93B63B0B1CF0BAC332BFCC0">>}, {<<"opentelemetry_api">>, <<"6D7A27B7CAD2AD69A09CABF6670514CAFCEC717C8441BEB5C96322BAC3D05350">>}, {<<"opentelemetry_exporter">>, <<"2B40007F509D38361744882FD060A8841AF772AB83BB542AA5350908B303AD65">>}, diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index ad6bd622..7caac23b 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -216,7 +216,6 @@ machinegun(YamlConfig) -> {health_check, health_check(YamlConfig)}, {quotas, quotas(YamlConfig)}, {namespaces, namespaces(YamlConfig)}, - {event_sink_ns, event_sink_ns(YamlConfig)}, {pulse, pulse(YamlConfig)}, {cluster, cluster(YamlConfig)} ]. @@ -540,23 +539,9 @@ notification_scheduler(Share, Config) -> timeout(Name, Config, Default, Unit) -> ?C:time_interval(?C:conf([Name], Config, Default), Unit). -event_sink_ns(YamlConfig) -> - #{ - registry => procreg(YamlConfig), - storage => storage(<<"_event_sinks">>, YamlConfig), - worker => #{registry => procreg(YamlConfig)}, - duplicate_search_batch => 1000, - default_processing_timeout => ?C:milliseconds(<<"30s">>) - }. - event_sink({Name, ESYamlConfig}) -> event_sink(?C:atom(?C:conf([type], ESYamlConfig)), Name, ESYamlConfig). -event_sink(machine, Name, ESYamlConfig) -> - {mg_event_sink_machine, #{ - name => ?C:atom(Name), - machine_id => ?C:conf([machine_id], ESYamlConfig) - }}; event_sink(kafka, Name, ESYamlConfig) -> {mg_event_sink_kafka, #{ name => ?C:atom(Name), From 63e1624b41b059141f8f2f2692cd22d446cd85a1 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 28 May 2024 10:17:51 +0300 Subject: [PATCH 13/31] Minor reformat --- apps/mg_core/src/mg_core_otel.erl | 2 +- apps/mg_woody/src/mg_woody_processor.erl | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/apps/mg_core/src/mg_core_otel.erl b/apps/mg_core/src/mg_core_otel.erl index e918f8bc..f19d4c9b 100644 --- a/apps/mg_core/src/mg_core_otel.erl +++ b/apps/mg_core/src/mg_core_otel.erl @@ -47,7 +47,7 @@ pack_otel_stub(Ctx) -> -spec restore_otel_stub(otel_ctx:t(), packed_otel_stub()) -> otel_ctx:t(). restore_otel_stub(Ctx, [TraceID, SpanID, TraceFlags]) -> SpanCtx = otel_tracer:from_remote_span(binary_to_id(TraceID), binary_to_id(SpanID), TraceFlags), - %% NOTE Thus resored span context is considered being not recording and remote. + %% NOTE Thus restored span context is considered being remote and not recording. otel_tracer:set_current_span(Ctx, SpanCtx); restore_otel_stub(Ctx, _Other) -> Ctx. diff --git a/apps/mg_woody/src/mg_woody_processor.erl b/apps/mg_woody/src/mg_woody_processor.erl index 9c1381d0..9fbf03cb 100644 --- a/apps/mg_woody/src/mg_woody_processor.erl +++ b/apps/mg_woody/src/mg_woody_processor.erl @@ -101,11 +101,9 @@ call_processor(Options, ReqCtx, Deadline, Function, Args) -> % TODO сделать нормально! {ok, TRef} = timer:kill_after(call_duration_limit(Options, Deadline) + ?KILL_TIMEOUT), try - woody_client:call( - {{mg_proto_state_processing_thrift, 'Processor'}, Function, Args}, - Options, - mg_woody_utils:set_deadline(Deadline, request_context_to_woody_context(ReqCtx)) - ) + WoodyContext = mg_woody_utils:set_deadline(Deadline, request_context_to_woody_context(ReqCtx)), + Service = {mg_proto_state_processing_thrift, 'Processor'}, + woody_client:call({Service, Function, Args}, Options, WoodyContext) of {ok, _} = Result -> Result; From 6c00b7b91716d94ff27f8920cae25a58b11fa69f Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 21 Jun 2024 12:24:41 +0300 Subject: [PATCH 14/31] Reverts accidental kafka metric renaming --- .../src/mg_event_sink_kafka_prometheus_pulse.erl | 10 +++++----- .../test/mg_event_sink_kafka_prometheus_SUITE.erl | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl index 83405f39..1da1099e 100644 --- a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl @@ -31,13 +31,13 @@ handle_beat(_Options, Beat) -> setup() -> %% Event sink / kafka true = prometheus_counter:declare([ - {name, mg_event_sink_produced_total}, + {name, mg_events_sink_produced_total}, {registry, registry()}, {labels, [namespace, name]}, {help, "Total number of Machinegun event sink events."} ]), true = prometheus_histogram:declare([ - {name, mg_event_sink_kafka_produced_duration_seconds}, + {name, mg_events_sink_kafka_produced_duration_seconds}, {registry, registry()}, {labels, [namespace, name, action]}, {buckets, duration_buckets()}, @@ -56,9 +56,9 @@ dispatch_metrics(#mg_event_sink_kafka_sent{ encode_duration = EncodeDuration, send_duration = SendDuration }) -> - ok = inc(mg_event_sink_produced_total, [NS, Name]), - ok = observe(mg_event_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), - ok = observe(mg_event_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); + ok = inc(mg_events_sink_produced_total, [NS, Name]), + ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), + ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); % Unknown dispatch_metrics(_Beat) -> ok. diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl index 690d38d3..3d0e20a4 100644 --- a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl @@ -78,11 +78,11 @@ event_sink_kafka_sent_test(_C) -> partition = 0, offset = 0 }), - ?assertEqual(prometheus_counter:value(mg_event_sink_produced_total, [?NS, Name]), Counter), + ?assertEqual(prometheus_counter:value(mg_events_sink_produced_total, [?NS, Name]), Counter), {BucketsHits, _} = - prometheus_histogram:value(mg_event_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), + prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), {BucketsHits, _} = - prometheus_histogram:value(mg_event_sink_kafka_produced_duration_seconds, [?NS, Name, send]), + prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, send]), BucketHit = lists:nth(BucketIdx, BucketsHits), %% Check that bucket under index BucketIdx received one hit ?assertEqual(maps:get(BucketIdx, BucketAcc, 0) + 1, BucketHit), From f20fe6bc25013dd72fe7dd4d0e79e3845c880d45 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Mon, 24 Jun 2024 14:43:05 +0300 Subject: [PATCH 15/31] Removes obsolete module --- apps/mg_riak/src/mg_riak.erl | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 apps/mg_riak/src/mg_riak.erl diff --git a/apps/mg_riak/src/mg_riak.erl b/apps/mg_riak/src/mg_riak.erl deleted file mode 100644 index e4647aef..00000000 --- a/apps/mg_riak/src/mg_riak.erl +++ /dev/null @@ -1,3 +0,0 @@ --module(mg_riak). - -%% From 728e94172566d4cb6d17ebce8fbb60d9533cdada Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 27 Jun 2024 12:35:49 +0300 Subject: [PATCH 16/31] Initial load test setup - Setups mg cluster - Attempts an elixirization of a processor w/ protocol - Implements simple processor --- .dockerignore | 2 + example/.dockerignore | 0 example/Dockerfile | 18 +++ example/compose.yaml | 157 +++++++++++++++++++++++ example/load_processor.exs | 220 +++++++++++++++++++++++++++++++++ example/machinegun/config.yaml | 131 ++++++++++++++++++++ example/machinegun/cookie | 1 + example/riak/riak_user.conf | 8 ++ 8 files changed, 537 insertions(+) create mode 100644 example/.dockerignore create mode 100644 example/Dockerfile create mode 100644 example/compose.yaml create mode 100644 example/load_processor.exs create mode 100644 example/machinegun/config.yaml create mode 100644 example/machinegun/cookie create mode 100644 example/riak/riak_user.conf diff --git a/.dockerignore b/.dockerignore index 17c5a18e..c5e20fec 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,3 +5,5 @@ /.idea/ erl_crash.dump rebar3.crashdump + +/example/ \ No newline at end of file diff --git a/example/.dockerignore b/example/.dockerignore new file mode 100644 index 00000000..e69de29b diff --git a/example/Dockerfile b/example/Dockerfile new file mode 100644 index 00000000..72eb77d3 --- /dev/null +++ b/example/Dockerfile @@ -0,0 +1,18 @@ +ARG ELIXIR_VERSION +ARG OTP_VERSION + +FROM docker.io/library/elixir:${ELIXIR_VERSION}-otp-${OTP_VERSION} +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Install woorl +RUN wget -q -O- "https://github.com/valitydev/woorl/releases/download/1.8/woorl-1.8.tar.gz" \ + | tar -xvz -C /usr/local/bin/ + +# Set env +ENV CHARSET=UTF-8 +ENV LANG=C.UTF-8 + +RUN apt update && apt install -y inotify-tools && mix local.hex --force && mix local.rebar --force + +# Set runtime +CMD ["/bin/bash"] diff --git a/example/compose.yaml b/example/compose.yaml new file mode 100644 index 00000000..c08f4f82 --- /dev/null +++ b/example/compose.yaml @@ -0,0 +1,157 @@ +version: '3' +services: + + load-processor: + build: + dockerfile: Dockerfile + context: . + args: + OTP_VERSION: 26 + ELIXIR_VERSION: 1.16.2 + volumes: + - .:$PWD + environment: + OTEL_TRACES_EXPORTER: otlp + OTEL_TRACES_SAMPLER: parentbased_always_on + OTEL_EXPORTER_OTLP_PROTOCOL: http_protobuf + OTEL_EXPORTER_OTLP_ENDPOINT: http://jaeger:4318 + OTEL_SERVICE_NAME: load-processor + depends_on: + machinegun: + condition: service_healthy + working_dir: $PWD + command: /usr/local/bin/iex + # command: /usr/local/bin/elixir ./load_processor.exs + + machinegun: + build: + dockerfile: Dockerfile + context: .. + args: + SERVICE_NAME: machinegun + OTP_VERSION: 25.3 + REBAR_VERSION: 3.18 + THRIFT_VERSION: 0.14.2.2 + volumes: + - ./machinegun/config.yaml:/opt/machinegun/etc/config.yaml + - ./machinegun/cookie:/opt/machinegun/etc/cookie + healthcheck: + # For `ERL_DIST_PORT` see `dist_port` entry in `example/machinegun/config.yaml` + test: "ERL_DIST_PORT=31337 /opt/machinegun/bin/machinegun ping" + interval: 5s + timeout: 1s + retries: 20 + environment: + OTEL_TRACES_EXPORTER: otlp + OTEL_TRACES_SAMPLER: parentbased_always_off + OTEL_EXPORTER_OTLP_PROTOCOL: http_protobuf + OTEL_EXPORTER_OTLP_ENDPOINT: http://jaeger:4318 + depends_on: + jaeger: + condition: service_healthy + riakdb: + condition: service_started + member1: + condition: service_started + member2: + condition: service_started + kafka1: + condition: service_healthy + kafka2: + condition: service_healthy + kafka3: + condition: service_healthy + # See https://docs.docker.com/compose/compose-file/deploy/ + deploy: + mode: replicated + replicas: 5 + endpoint_mode: dnsrr + resources: + limits: + cpus: '0.2' + memory: 512M + reservations: + cpus: '0.1' + memory: 256M + restart_policy: + condition: on-failure + + riakdb: &member-node + image: docker.io/basho/riak-kv:ubuntu-2.2.3 + environment: + - CLUSTER_NAME=riakkv + - COORDINATOR_NODE=riakdb + labels: + - "com.basho.riak.cluster.name=riakkv" + volumes: + - ./riak/riak_user.conf:/etc/riak/user.conf:ro + - schemas:/etc/riak/schemas + member1: + <<: *member-node + links: + - riakdb + depends_on: + - riakdb + member2: + <<: *member-node + links: + - riakdb + depends_on: + - riakdb + + zookeeper: + image: docker.io/confluentinc/cp-zookeeper:5.1.2 + healthcheck: + test: echo ruok | nc 127.0.0.1 2181 || exit -1 + interval: 5s + timeout: 240s + retries: 50 + environment: + KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" + ZOOKEEPER_CLIENT_PORT: 2181 + + kafka1: &kafka-broker + image: docker.io/confluentinc/cp-kafka:5.1.2 + depends_on: + - zookeeper + healthcheck: + test: ["CMD", "kafka-topics", "--list", "--zookeeper", "zookeeper:2181"] + interval: 5s + timeout: 10s + retries: 5 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092 + kafka2: + <<: *kafka-broker + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092 + kafka3: + <<: *kafka-broker + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092 + + jaeger: + image: jaegertracing/all-in-one:1.47 + environment: + - COLLECTOR_OTLP_ENABLED=true + healthcheck: + test: "/go/bin/all-in-one-linux status" + interval: 2s + timeout: 1s + retries: 20 + ports: + - 4317:4317 + - 4318:4318 + - 5778:5778 + - 14250:14250 + - 16686:16686 + +volumes: + schemas: + external: false diff --git a/example/load_processor.exs b/example/load_processor.exs new file mode 100644 index 00000000..e2608155 --- /dev/null +++ b/example/load_processor.exs @@ -0,0 +1,220 @@ +Mix.install([ + {:thrift, + git: "https://github.com/valitydev/elixir-thrift.git", + branch: "ft/subst-reserved-vars", + override: true}, + {:mg_proto, git: "https://github.com/valitydev/machinegun-proto", branch: "ft/elixir-support"}, + {:snowflake, git: "https://github.com/valitydev/snowflake.git", branch: "master"}, + {:genlib, git: "https://github.com/valitydev/genlib.git", branch: "master"} +]) + +defmodule LoadProcessor do + defmodule Utils do + @moduledoc false + alias MachinegunProto.MsgPack + + def pack(data) do + %MsgPack.Value{bin: :erlang.term_to_binary(data)} + end + + def unpack(%MsgPack.Value{bin: bin}) do + :erlang.binary_to_term(bin) + end + end + + defmodule ProcessorHandler do + @moduledoc false + alias Woody.Generated.MachinegunProto.StateProcessing.Processor + @behaviour Processor.Handler + + require Logger + + alias LoadProcessor.Utils + alias MachinegunProto.StateProcessing.{SignalArgs, CallArgs, RepairArgs} + alias MachinegunProto.StateProcessing.{Signal, InitSignal, TimeoutSignal, NotificationSignal} + alias MachinegunProto.StateProcessing.{SignalResult, CallResult, RepairResult, RepairFailed} + alias MachinegunProto.StateProcessing.{Content, HistoryRange, Direction} + alias MachinegunProto.StateProcessing.{Machine, MachineStatus, MachineStateChange} + alias MachinegunProto.StateProcessing.{ComplexAction, TimerAction, SetTimerAction} + alias MachinegunProto.Base.Timer + + def new(http_path, options \\ []) do + Processor.Handler.new(__MODULE__, http_path, options) + end + + @impl true + def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do + result = + case signal do + %Signal{init: %InitSignal{arg: args}} -> + process_init(machine, args) + + %Signal{timeout: %TimeoutSignal{}} -> + process_timeout(machine) + + %Signal{notification: %NotificationSignal{arg: args}} -> + process_notification(machine, args) + + _uknown_signal -> + throw(:not_implemented) + end + + {:ok, result} + end + + @impl true + def process_call(%CallArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do + throw(:not_implemented) + end + + @impl true + def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do + throw(:not_implemented) + end + + defp process_init(%Machine{id: id, ns: ns} = _machine, args) do + Logger.info("Starting '#{id}' of '#{ns}' with arguments: #{inspect(Utils.unpack(args))}") + + change = + %MachineStateChange{} + |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", "counter" => 0}) + |> put_events([:counter_incremented]) + + action = + %ComplexAction{} + |> set_timer(1) + + %SignalResult{change: change, action: action} + end + + defp process_timeout(%Machine{id: id, ns: ns} = machine) do + Logger.info("Timeouting machine #{id} of #{ns}") + + aux_state = get_aux_state(machine) + + change = + %MachineStateChange{} + |> put_aux_state(%{aux_state | "counter" => aux_state["counter"] + 1}) + |> put_events([]) + + action = + %ComplexAction{} + |> set_timer(1) + + %SignalResult{change: change, action: action} + end + + defp process_notification(_machine, _args) do + throw(:not_implemented) + end + + defp get_aux_state(%Machine{aux_state: %Content{format_version: 1, data: data}}) do + Utils.unpack(data) + end + + defp get_aux_state(_machine) do + nil + end + + defp put_aux_state(change, data, format_version \\ 1) do + %MachineStateChange{change | aux_state: to_content(data, format_version)} + end + + defp put_events(change, events, format_version \\ 1) do + wrapped_events = + events + |> Enum.map(&to_content(&1, format_version)) + + %MachineStateChange{change | events: wrapped_events} + end + + defp to_content(data, format_version) do + %Content{format_version: format_version, data: Utils.pack(data)} + end + + defp set_timer(action, timeout, deadline \\ nil, range \\ nil) do + timer = %SetTimerAction{ + timer: %Timer{timeout: timeout, deadline: deadline}, + range: maybe_full_range(range), + timeout: nil + } + + %ComplexAction{action | timer: %TimerAction{set_timer: timer}} + end + + defp maybe_full_range(nil) do + require Direction + %HistoryRange{after: nil, limit: nil, direction: Direction.forward()} + end + + defp maybe_full_range(range) do + range + end + end + + defmodule Machinery do + @moduledoc false + alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client + alias LoadProcessor.Utils + + defstruct url: nil, opts: nil + + def new(url, opts \\ nil) do + %__MODULE__{url: url, opts: opts} + end + + def start(%__MODULE__{url: url, opts: opts}, ns, id, args) do + Woody.Context.new() + |> Client.new(url, List.wrap(opts)) + |> Client.start(ns, id, Utils.pack(args)) + end + end + + defmodule WebHandler do + @moduledoc false + alias LoadProcessor.Machinery + + def init(req, state) do + result = + Machinery.new("http://machinegun:8022/v1/automaton") + |> Machinery.start("load-test", random_id(), "start please") + + IO.inspect(result) + + res = + :cowboy_req.reply(200, %{"content-type" => "text/plain"}, "Starting machine now\n", req) + + {:ok, res, state} + end + + def terminate(_, _, _) do + :ok + end + + defp random_id() do + <> = :snowflake.new() + :genlib_format.format_int_base(id, 62) + end + end +end + +require Logger + +alias Woody.Server.Http, as: Server + +endpoint = + Server.Endpoint.any(:inet) + |> Map.put(:port, 8022) + +handlers = [ + {"/", LoadProcessor.WebHandler, []}, + LoadProcessor.ProcessorHandler.new("/v1/stateproc", event_handler: Woody.EventHandler.Default) +] + +{:ok, _pid} = + Server.child_spec(LoadProcessor, endpoint, handlers) + |> List.wrap() + |> Supervisor.start_link(strategy: :one_for_one) + +Logger.info("Woody server now running on #{Server.endpoint(LoadProcessor)}") +Process.sleep(:infinity) diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml new file mode 100644 index 00000000..9c26e6fe --- /dev/null +++ b/example/machinegun/config.yaml @@ -0,0 +1,131 @@ +service_name: machinegun + +dist_node_name: + hostpart: ip + +dist_port: + mode: static + port: 31337 + +erlang: + secret_cookie_file: "/opt/machinegun/etc/cookie" + ipv6: false + disable_dns_cache: true + +woody_server: + ip: "::" + port: 8022 + http_keep_alive_timeout: 60s + shutdown_timeout: 5s + +cluster: + discovery: + type: dns + options: + domain_name: machinegun + sname: machinegun + reconnect_timeout: 5000 + +process_registry: + module: mg_core_procreg_global + +limits: + process_heap: 2M + memory: + type: cgroups # cgroups | total + value: 90% + scheduler_tasks: 5000 + +logging: + out_type: stdout + level: warning + formatter: + level_map: + 'emergency': 'ERROR' + 'alert': 'ERROR' + 'critical': 'ERROR' + 'error': 'ERROR' + 'warning': 'WARN' + 'notice': 'INFO' + 'info': 'INFO' + 'debug': 'DEBUG' + +namespaces: + load-test: + suicide_probability: 0.1 + event_sinks: + kafka: + type: kafka + client: default_kafka_client + topic: load-test + default_processing_timeout: 30s + timer_processing_timeout: 60s + reschedule_timeout: 60s + hibernate_timeout: 5s + shutdown_timeout: 5s + unload_timeout: 60s + processor: + url: http://load-processor:8022/v1/stateproc + pool_size: 50 + http_keep_alive_timeout: 10s + timers: + scan_interval: 1m + scan_limit: 1000 + capacity: 500 + min_scan_delay: 10s + overseer: + scan_interval: 60m + min_scan_delay: 5s + notification: + capacity: 1000 + scan_interval: 1m + min_scan_delay: 1s + scan_handicap: 10s + scan_cutoff: 4W + reschedule_time: 5s + event_stash_size: 5 + modernizer: + current_format_version: 1 + handler: + url: http://load-test:8022/v1/modernizer + pool_size: 50 + http_keep_alive_timeout: 10s + +snowflake_machine_id: 1 + +storage: + type: riak + host: riakdb + port: 8087 + pool: + size: 100 + queue_max: 1000 + connect_timeout: 5s + request_timeout: 10s + index_query_timeout: 10s + batch_concurrency_limit: 50 + +lifecycle_pulse: + topic: mg-lifecycle + client: default_kafka_client + +kafka: + default_kafka_client: + endpoints: + - host: "kafka1" + port: 9092 + - host: "kafka2" + port: 9092 + - host: "kafka3" + port: 9092 + producer: + compression: no_compression + partition_onwire_limit: 1 + ack_timeout: 10s + required_acks: all_isr + partition_buffer_limit: 256 + max_linger: 0ms + max_linger_count: 0 + max_batch_size: 1M + max_retries: 3 + retry_backoff: 500ms diff --git a/example/machinegun/cookie b/example/machinegun/cookie new file mode 100644 index 00000000..dfa851bc --- /dev/null +++ b/example/machinegun/cookie @@ -0,0 +1 @@ +load-test-machinegun \ No newline at end of file diff --git a/example/riak/riak_user.conf b/example/riak/riak_user.conf new file mode 100644 index 00000000..88f6dcbf --- /dev/null +++ b/example/riak/riak_user.conf @@ -0,0 +1,8 @@ +## Specifies the storage engine used for Riak's key-value data +## and secondary indexes (if supported). +## +## Default: bitcask +## +## Acceptable values: +## - one of: bitcask, leveldb, memory, multi, prefix_multi +storage_backend = leveldb From 99fe2b1e40b6ea459ecc9cd2d6e6d59f2fbc2f2e Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 28 Jun 2024 18:51:31 +0300 Subject: [PATCH 17/31] Fixes mg config --- example/compose.yaml | 4 +- example/load_processor.exs | 43 ++++-- example/machinegun/config.yaml | 240 ++++++++++++++++++--------------- 3 files changed, 167 insertions(+), 120 deletions(-) diff --git a/example/compose.yaml b/example/compose.yaml index c08f4f82..0aa5d322 100644 --- a/example/compose.yaml +++ b/example/compose.yaml @@ -39,7 +39,7 @@ services: # For `ERL_DIST_PORT` see `dist_port` entry in `example/machinegun/config.yaml` test: "ERL_DIST_PORT=31337 /opt/machinegun/bin/machinegun ping" interval: 5s - timeout: 1s + timeout: 5s retries: 20 environment: OTEL_TRACES_EXPORTER: otlp @@ -77,7 +77,7 @@ services: condition: on-failure riakdb: &member-node - image: docker.io/basho/riak-kv:ubuntu-2.2.3 + image: riak-valitydev:latest environment: - CLUSTER_NAME=riakkv - COORDINATOR_NODE=riakdb diff --git a/example/load_processor.exs b/example/load_processor.exs index e2608155..184cf695 100644 --- a/example/load_processor.exs +++ b/example/load_processor.exs @@ -63,8 +63,19 @@ defmodule LoadProcessor do end @impl true - def process_call(%CallArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do - throw(:not_implemented) + def process_call(%CallArgs{arg: args, machine: machine}, _ctx, _hdlops) do + args = Utils.unpack(args) + Logger.info("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") + + change = + %MachineStateChange{} + |> put_events([{:call_processed, args}]) + + action = + %ComplexAction{} + |> set_timer(0) + + %CallResult{response: Utils.pack("result"), change: change, action: action} end @impl true @@ -78,7 +89,7 @@ defmodule LoadProcessor do change = %MachineStateChange{} |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", "counter" => 0}) - |> put_events([:counter_incremented]) + |> put_events([:counter_created]) action = %ComplexAction{} @@ -90,12 +101,17 @@ defmodule LoadProcessor do defp process_timeout(%Machine{id: id, ns: ns} = machine) do Logger.info("Timeouting machine #{id} of #{ns}") - aux_state = get_aux_state(machine) + aux_state = + machine + |> get_aux_state() + |> Map.update!("counter", &(&1 + 1)) + + Logger.info("New aux state #{inspect(aux_state)}") change = %MachineStateChange{} - |> put_aux_state(%{aux_state | "counter" => aux_state["counter"] + 1}) - |> put_events([]) + |> put_aux_state(aux_state) + |> put_events([:counter_incremented]) action = %ComplexAction{} @@ -175,16 +191,19 @@ defmodule LoadProcessor do alias LoadProcessor.Machinery def init(req, state) do - result = + {:ok, machine} = Machinery.new("http://machinegun:8022/v1/automaton") |> Machinery.start("load-test", random_id(), "start please") - IO.inspect(result) - - res = - :cowboy_req.reply(200, %{"content-type" => "text/plain"}, "Starting machine now\n", req) + req = + :cowboy_req.reply( + 200, + %{"content-type" => "text/plain"}, + "Starting machine now\n#{inspect(machine)}\n", + req + ) - {:ok, res, state} + {:ok, req, state} end def terminate(_, _, _) do diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml index 9c26e6fe..82a95463 100644 --- a/example/machinegun/config.yaml +++ b/example/machinegun/config.yaml @@ -1,131 +1,159 @@ service_name: machinegun dist_node_name: - hostpart: ip + hostpart: ip dist_port: - mode: static - port: 31337 + mode: static + port: 31337 erlang: - secret_cookie_file: "/opt/machinegun/etc/cookie" - ipv6: false - disable_dns_cache: true + secret_cookie_file: "/opt/machinegun/etc/cookie" + ipv6: false + disable_dns_cache: true woody_server: - ip: "::" - port: 8022 - http_keep_alive_timeout: 60s - shutdown_timeout: 5s + ip: "::" + port: 8022 + max_concurrent_connections: 8000 + http_keep_alive_timeout: 3000ms + shutdown_timeout: 5s + +snowflake_machine_id: 1 + +storage: + type: riak + host: riakdb + port: 8087 + pool: + size: 100 + queue_max: 500 + connect_timeout: 500ms + request_timeout: 10s + index_query_timeout: 60s + batch_concurrency_limit: 10 cluster: - discovery: - type: dns - options: - domain_name: machinegun - sname: machinegun - reconnect_timeout: 5000 + discovery: + type: dns + options: + domain_name: machinegun + sname: machinegun + reconnect_timeout: 5000 process_registry: - module: mg_core_procreg_global + module: mg_core_procreg_global limits: - process_heap: 2M - memory: - type: cgroups # cgroups | total - value: 90% - scheduler_tasks: 5000 + process_heap: 2M + memory: + type: cgroups # cgroups | total + value: 90% + scheduler_tasks: 5000 logging: - out_type: stdout - level: warning - formatter: - level_map: - 'emergency': 'ERROR' - 'alert': 'ERROR' - 'critical': 'ERROR' - 'error': 'ERROR' - 'warning': 'WARN' - 'notice': 'INFO' - 'info': 'INFO' - 'debug': 'DEBUG' + out_type: stdout + level: warning + formatter: + level_map: + 'emergency': 'ERROR' + 'alert': 'ERROR' + 'critical': 'ERROR' + 'error': 'ERROR' + 'warning': 'WARN' + 'notice': 'INFO' + 'info': 'INFO' + 'debug': 'DEBUG' namespaces: - load-test: - suicide_probability: 0.1 - event_sinks: - kafka: - type: kafka - client: default_kafka_client - topic: load-test - default_processing_timeout: 30s - timer_processing_timeout: 60s - reschedule_timeout: 60s - hibernate_timeout: 5s - shutdown_timeout: 5s - unload_timeout: 60s - processor: - url: http://load-processor:8022/v1/stateproc - pool_size: 50 - http_keep_alive_timeout: 10s - timers: - scan_interval: 1m - scan_limit: 1000 - capacity: 500 - min_scan_delay: 10s - overseer: - scan_interval: 60m - min_scan_delay: 5s - notification: - capacity: 1000 - scan_interval: 1m - min_scan_delay: 1s - scan_handicap: 10s - scan_cutoff: 4W - reschedule_time: 5s - event_stash_size: 5 - modernizer: - current_format_version: 1 - handler: - url: http://load-test:8022/v1/modernizer - pool_size: 50 - http_keep_alive_timeout: 10s - -snowflake_machine_id: 1 - -storage: - type: riak - host: riakdb - port: 8087 - pool: - size: 100 - queue_max: 1000 - connect_timeout: 5s - request_timeout: 10s - index_query_timeout: 10s - batch_concurrency_limit: 50 + load-test: + # suicide_probability: 0.1 + retries: + storage: + type: exponential + max_retries: infinity + factor: 2 + timeout: 10ms + max_timeout: 60s + timers: + type: exponential + max_retries: 100 + factor: 2 + timeout: 2s + max_timeout: 30m + processor: + type: exponential + max_retries: + max_total_timeout: 1d + factor: 2 + timeout: 10ms + max_timeout: 60s + continuation: + type: exponential + max_retries: infinity + factor: 2 + timeout: 10ms + max_timeout: 60s + event_sinks: + kafka: + type: kafka + client: default_kafka_client + topic: load-test + default_processing_timeout: 30s + timer_processing_timeout: 60s + reschedule_timeout: 60s + hibernate_timeout: 5s + shutdown_timeout: 5s + unload_timeout: 1m + processor: + url: http://load-processor:8022/v1/stateproc + pool_size: 50 + http_keep_alive_timeout: 10s + overseer: + capacity: 1000 + min_scan_delay: 1s + scan_interval: 1m + timers: + scan_interval: 1m + scan_limit: 1000 + capacity: 1000 + min_scan_delay: 10s + notification: + capacity: 1000 + scan_interval: 1m + min_scan_delay: 1s + scan_handicap: 10s + scan_cutoff: 4W + reschedule_time: 5s + event_stash_size: 5 + modernizer: + current_format_version: 1 + handler: + url: http://load-test:8022/v1/modernizer + pool_size: 50 + http_keep_alive_timeout: 10s lifecycle_pulse: topic: mg-lifecycle client: default_kafka_client kafka: - default_kafka_client: - endpoints: - - host: "kafka1" - port: 9092 - - host: "kafka2" - port: 9092 - - host: "kafka3" - port: 9092 - producer: - compression: no_compression - partition_onwire_limit: 1 - ack_timeout: 10s - required_acks: all_isr - partition_buffer_limit: 256 - max_linger: 0ms - max_linger_count: 0 - max_batch_size: 1M - max_retries: 3 - retry_backoff: 500ms + default_kafka_client: + endpoints: + - host: "kafka1" + port: 9092 + - host: "kafka2" + port: 9092 + - host: "kafka3" + port: 9092 + producer: + compression: no_compression + partition_onwire_limit: 1 + ack_timeout: 10s + required_acks: all_isr + partition_buffer_limit: 256 + max_linger: 0ms + max_linger_count: 0 + max_batch_size: 1M + max_retries: 3 + retry_backoff: 500ms From 2484d4b4f9fed67b7454f1a0e560cb4cab3ec705 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 2 Jul 2024 18:26:20 +0300 Subject: [PATCH 18/31] Turns elixir processor modules into its own elixir application --- example/.formatter.exs | 4 + example/.gitignore | 26 ++ example/README.md | 3 + example/compose.yaml | 1 - example/config/config.exs | 3 + example/lib/load_processor.ex | 18 ++ example/lib/load_processor/application.ex | 42 +++ example/lib/load_processor/machinery.ex | 53 ++++ .../lib/load_processor/processor_handler.ex | 149 +++++++++++ .../lib/load_processor/stfu_woody_handler.ex | 34 +++ example/lib/load_processor/utils.ex | 20 ++ example/lib/load_processor/web_handler.ex | 29 +++ example/load_processor.exs | 239 ------------------ example/machinegun/simple_counter_flow.yaml | 10 + example/mix.exs | 55 ++++ example/mix.lock | 27 ++ example/test/load_processor_test.exs | 8 + example/test/test_helper.exs | 1 + 18 files changed, 482 insertions(+), 240 deletions(-) create mode 100644 example/.formatter.exs create mode 100644 example/.gitignore create mode 100644 example/README.md create mode 100644 example/config/config.exs create mode 100644 example/lib/load_processor.ex create mode 100644 example/lib/load_processor/application.ex create mode 100644 example/lib/load_processor/machinery.ex create mode 100644 example/lib/load_processor/processor_handler.ex create mode 100644 example/lib/load_processor/stfu_woody_handler.ex create mode 100644 example/lib/load_processor/utils.ex create mode 100644 example/lib/load_processor/web_handler.ex delete mode 100644 example/load_processor.exs create mode 100644 example/machinegun/simple_counter_flow.yaml create mode 100644 example/mix.exs create mode 100644 example/mix.lock create mode 100644 example/test/load_processor_test.exs create mode 100644 example/test/test_helper.exs diff --git a/example/.formatter.exs b/example/.formatter.exs new file mode 100644 index 00000000..d2cda26e --- /dev/null +++ b/example/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/example/.gitignore b/example/.gitignore new file mode 100644 index 00000000..6bf5bcb7 --- /dev/null +++ b/example/.gitignore @@ -0,0 +1,26 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +load_processor-*.tar + +# Temporary files, for example, from tests. +/tmp/ diff --git a/example/README.md b/example/README.md new file mode 100644 index 00000000..935947fb --- /dev/null +++ b/example/README.md @@ -0,0 +1,3 @@ +# LoadProcessor + +**TODO: Add description** diff --git a/example/compose.yaml b/example/compose.yaml index 0aa5d322..2038e797 100644 --- a/example/compose.yaml +++ b/example/compose.yaml @@ -21,7 +21,6 @@ services: condition: service_healthy working_dir: $PWD command: /usr/local/bin/iex - # command: /usr/local/bin/elixir ./load_processor.exs machinegun: build: diff --git a/example/config/config.exs b/example/config/config.exs new file mode 100644 index 00000000..477c9079 --- /dev/null +++ b/example/config/config.exs @@ -0,0 +1,3 @@ +import Config + +config :logger, level: :info diff --git a/example/lib/load_processor.ex b/example/lib/load_processor.ex new file mode 100644 index 00000000..8081cb03 --- /dev/null +++ b/example/lib/load_processor.ex @@ -0,0 +1,18 @@ +defmodule LoadProcessor do + @moduledoc """ + Documentation for `LoadProcessor`. + """ + + @doc """ + Hello world. + + ## Examples + + iex> LoadProcessor.hello() + :world + + """ + def hello do + :world + end +end diff --git a/example/lib/load_processor/application.ex b/example/lib/load_processor/application.ex new file mode 100644 index 00000000..0ea8a3f6 --- /dev/null +++ b/example/lib/load_processor/application.ex @@ -0,0 +1,42 @@ +defmodule LoadProcessor.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + require Logger + + alias Woody.Server.Http, as: Server + + @impl true + def start(_type, _args) do + endpoint = + Server.Endpoint.any(:inet) + |> Map.put(:port, 8022) + + handlers = [ + {"/", LoadProcessor.WebHandler, []}, + LoadProcessor.ProcessorHandler.new("/v1/stateproc", + event_handler: LoadProcessor.StfuWoodyHandler + ) + ] + + children = [ + Server.child_spec(LoadProcessor, endpoint, handlers) + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: LoadProcessor.Supervisor] + + case Supervisor.start_link(children, opts) do + {:ok, pid} -> + Logger.info("Woody server now running on #{Server.endpoint(LoadProcessor)}") + {:ok, pid} + + bad_ret -> + bad_ret + end + end +end diff --git a/example/lib/load_processor/machinery.ex b/example/lib/load_processor/machinery.ex new file mode 100644 index 00000000..53a1f8ae --- /dev/null +++ b/example/lib/load_processor/machinery.ex @@ -0,0 +1,53 @@ +defmodule LoadProcessor.Machinery do + @moduledoc false + alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client + alias LoadProcessor.Utils + alias MachinegunProto.StateProcessing.{MachineDescriptor, Reference, HistoryRange, Direction} + + @enforce_keys [:client] + defstruct client: nil + + def new(url, opts \\ nil) do + new(Woody.Context.new(), url, opts) + end + + def new(ctx, url, opts) do + %__MODULE__{client: Client.new(ctx, url, List.wrap(opts))} + end + + def start(%__MODULE__{client: client}, ns, id, args) do + Client.start(client, ns, id, Utils.pack(args)) + end + + def get(%__MODULE__{client: client}, ns, id) do + require Direction + Client.get_machine(client, make_descr(ns, id)) + end + + def call(%__MODULE__{client: client}, ns, id, args) do + case Client.call(client, make_descr(ns, id), Utils.pack(args)) do + {:ok, result} -> {:ok, Utils.unpack(result)} + other_response -> other_response + end + end + + def simple_repair(%__MODULE__{client: client}, ns, id) do + Client.simple_repair(client, ns, make_ref(id)) + end + + ### + + defp make_descr(ns, id) do + require Direction + + %MachineDescriptor{ + ns: ns, + ref: make_ref(id), + range: %HistoryRange{limit: 5, direction: Direction.backward()} + } + end + + defp make_ref(id) do + %Reference{id: id} + end +end diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex new file mode 100644 index 00000000..ec7fd20a --- /dev/null +++ b/example/lib/load_processor/processor_handler.ex @@ -0,0 +1,149 @@ +defmodule LoadProcessor.ProcessorHandler do + @moduledoc false + alias Woody.Generated.MachinegunProto.StateProcessing.Processor + @behaviour Processor.Handler + + require Logger + + alias LoadProcessor.Utils + alias MachinegunProto.StateProcessing.{SignalArgs, CallArgs, RepairArgs} + alias MachinegunProto.StateProcessing.{Signal, InitSignal, TimeoutSignal, NotificationSignal} + alias MachinegunProto.StateProcessing.{SignalResult, CallResult, RepairResult, RepairFailed} + alias MachinegunProto.StateProcessing.{Content, HistoryRange, Direction} + alias MachinegunProto.StateProcessing.{Machine, MachineStatus, MachineStateChange} + alias MachinegunProto.StateProcessing.{ComplexAction, TimerAction, SetTimerAction} + alias MachinegunProto.Base.Timer + + def new(http_path, options \\ []) do + Processor.Handler.new(__MODULE__, http_path, options) + end + + @impl true + def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do + case signal do + %Signal{init: %InitSignal{arg: args}} -> + process_init(machine, args) + + %Signal{timeout: %TimeoutSignal{}} -> + process_timeout(machine) + + %Signal{notification: %NotificationSignal{arg: args}} -> + process_notification(machine, args) + + _uknown_signal -> + throw(:not_implemented) + end + end + + @impl true + def process_call(%CallArgs{arg: args, machine: machine}, _ctx, _hdlops) do + args = Utils.unpack(args) + Logger.debug("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") + + change = + %MachineStateChange{} + |> put_events([{:call_processed, args}]) + |> put_aux_state(get_aux_state(machine)) + + action = + %ComplexAction{} + |> set_timer(0) + + {:ok, %CallResult{response: Utils.pack("result"), change: change, action: action}} + end + + @impl true + def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do + throw(:not_implemented) + end + + defp process_init(%Machine{id: id, ns: ns} = _machine, args) do + Logger.debug("Starting '#{id}' of '#{ns}' with arguments: #{inspect(Utils.unpack(args))}") + + change = + %MachineStateChange{} + |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", "counter" => 0}) + |> put_events([:counter_created]) + + action = + %ComplexAction{} + |> set_timer(1) + + {:ok, %SignalResult{change: change, action: action}} + end + + defp process_timeout(%Machine{id: id, ns: ns} = machine) do + Logger.debug("Timeouting machine #{id} of #{ns}") + + aux_state = + machine + |> get_aux_state() + |> Map.update!("counter", &(&1 + 1)) + + Logger.debug("New aux state #{inspect(aux_state)}") + + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_incremented]) + + action = + %ComplexAction{} + |> set_timer(1) + + {:ok, %SignalResult{change: change, action: action}} + end + + defp process_notification(_machine, _args) do + throw(:not_implemented) + end + + defp get_aux_state(%Machine{aux_state: %Content{format_version: 1, data: data}}) do + Utils.unpack(data) + end + + defp get_aux_state(_machine) do + nil + end + + defp put_aux_state(change, data, format_version \\ 1) do + # Optional 'aux_state' technically can be 'nil' but this will + # break machine, because it is not interpreted as msg_pack's 'nil' + # but actually erlang's 'undefined'. + # In another words, default 'nil' value of 'aux_state' does not + # leaves previous value unchanged but always expects it to be + # explicitly set. + %MachineStateChange{change | aux_state: to_content(data, format_version)} + end + + defp put_events(change, events, format_version \\ 1) do + wrapped_events = + events + |> Enum.map(&to_content(&1, format_version)) + + %MachineStateChange{change | events: wrapped_events} + end + + defp to_content(data, format_version) do + %Content{format_version: format_version, data: Utils.pack(data)} + end + + defp set_timer(action, timeout, deadline \\ nil, range \\ nil) do + timer = %SetTimerAction{ + timer: %Timer{timeout: timeout, deadline: deadline}, + range: maybe_last_n_range(range, 5), + timeout: nil + } + + %ComplexAction{action | timer: %TimerAction{set_timer: timer}} + end + + defp maybe_last_n_range(nil, limit) do + require Direction + %HistoryRange{after: nil, limit: limit, direction: Direction.backward()} + end + + defp maybe_last_n_range(range, _limit) do + range + end +end diff --git a/example/lib/load_processor/stfu_woody_handler.ex b/example/lib/load_processor/stfu_woody_handler.ex new file mode 100644 index 00000000..84a0a689 --- /dev/null +++ b/example/lib/load_processor/stfu_woody_handler.ex @@ -0,0 +1,34 @@ +defmodule LoadProcessor.StfuWoodyHandler do + @moduledoc false + alias :woody_event_handler, as: WoodyEventHandler + alias Woody.EventHandler.Formatter + @behaviour WoodyEventHandler + require Logger + + @exposed_meta [ + :event, + :service, + :function, + :type, + :metadata, + :url, + :deadline, + :execution_duration_ms + ] + + @allowed_severity [:warning, :error] + + def handle_event(event, rpc_id, meta, _opts) do + case WoodyEventHandler.get_event_severity(event, meta) do + level when level in @allowed_severity -> + Logger.log( + level, + Formatter.format(rpc_id, event, meta), + WoodyEventHandler.format_meta(event, meta, @exposed_meta) + ) + + _ -> + :ok + end + end +end diff --git a/example/lib/load_processor/utils.ex b/example/lib/load_processor/utils.ex new file mode 100644 index 00000000..cb64e46a --- /dev/null +++ b/example/lib/load_processor/utils.ex @@ -0,0 +1,20 @@ +defmodule LoadProcessor.Utils do + @moduledoc false + alias MachinegunProto.MsgPack + + def pack(nil) do + %MsgPack.Value{nl: %MsgPack.Nil{}} + end + + def pack(data) do + %MsgPack.Value{bin: :erlang.term_to_binary(data)} + end + + def unpack(%MsgPack.Value{nl: %MsgPack.Nil{}}) do + nil + end + + def unpack(%MsgPack.Value{bin: bin}) do + :erlang.binary_to_term(bin) + end +end diff --git a/example/lib/load_processor/web_handler.ex b/example/lib/load_processor/web_handler.ex new file mode 100644 index 00000000..c24b28c8 --- /dev/null +++ b/example/lib/load_processor/web_handler.ex @@ -0,0 +1,29 @@ +defmodule LoadProcessor.WebHandler do + @moduledoc false + alias LoadProcessor.Machinery + + def init(req, state) do + {:ok, machine} = + Machinery.new("http://machinegun:8022/v1/automaton") + |> Machinery.start("load-test", random_id(), "start please") + + req = + :cowboy_req.reply( + 200, + %{"content-type" => "text/plain"}, + "Starting machine now\n#{inspect(machine)}\n", + req + ) + + {:ok, req, state} + end + + def terminate(_, _, _) do + :ok + end + + defp random_id() do + <> = :snowflake.new() + :genlib_format.format_int_base(id, 62) + end +end diff --git a/example/load_processor.exs b/example/load_processor.exs deleted file mode 100644 index 184cf695..00000000 --- a/example/load_processor.exs +++ /dev/null @@ -1,239 +0,0 @@ -Mix.install([ - {:thrift, - git: "https://github.com/valitydev/elixir-thrift.git", - branch: "ft/subst-reserved-vars", - override: true}, - {:mg_proto, git: "https://github.com/valitydev/machinegun-proto", branch: "ft/elixir-support"}, - {:snowflake, git: "https://github.com/valitydev/snowflake.git", branch: "master"}, - {:genlib, git: "https://github.com/valitydev/genlib.git", branch: "master"} -]) - -defmodule LoadProcessor do - defmodule Utils do - @moduledoc false - alias MachinegunProto.MsgPack - - def pack(data) do - %MsgPack.Value{bin: :erlang.term_to_binary(data)} - end - - def unpack(%MsgPack.Value{bin: bin}) do - :erlang.binary_to_term(bin) - end - end - - defmodule ProcessorHandler do - @moduledoc false - alias Woody.Generated.MachinegunProto.StateProcessing.Processor - @behaviour Processor.Handler - - require Logger - - alias LoadProcessor.Utils - alias MachinegunProto.StateProcessing.{SignalArgs, CallArgs, RepairArgs} - alias MachinegunProto.StateProcessing.{Signal, InitSignal, TimeoutSignal, NotificationSignal} - alias MachinegunProto.StateProcessing.{SignalResult, CallResult, RepairResult, RepairFailed} - alias MachinegunProto.StateProcessing.{Content, HistoryRange, Direction} - alias MachinegunProto.StateProcessing.{Machine, MachineStatus, MachineStateChange} - alias MachinegunProto.StateProcessing.{ComplexAction, TimerAction, SetTimerAction} - alias MachinegunProto.Base.Timer - - def new(http_path, options \\ []) do - Processor.Handler.new(__MODULE__, http_path, options) - end - - @impl true - def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do - result = - case signal do - %Signal{init: %InitSignal{arg: args}} -> - process_init(machine, args) - - %Signal{timeout: %TimeoutSignal{}} -> - process_timeout(machine) - - %Signal{notification: %NotificationSignal{arg: args}} -> - process_notification(machine, args) - - _uknown_signal -> - throw(:not_implemented) - end - - {:ok, result} - end - - @impl true - def process_call(%CallArgs{arg: args, machine: machine}, _ctx, _hdlops) do - args = Utils.unpack(args) - Logger.info("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") - - change = - %MachineStateChange{} - |> put_events([{:call_processed, args}]) - - action = - %ComplexAction{} - |> set_timer(0) - - %CallResult{response: Utils.pack("result"), change: change, action: action} - end - - @impl true - def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do - throw(:not_implemented) - end - - defp process_init(%Machine{id: id, ns: ns} = _machine, args) do - Logger.info("Starting '#{id}' of '#{ns}' with arguments: #{inspect(Utils.unpack(args))}") - - change = - %MachineStateChange{} - |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", "counter" => 0}) - |> put_events([:counter_created]) - - action = - %ComplexAction{} - |> set_timer(1) - - %SignalResult{change: change, action: action} - end - - defp process_timeout(%Machine{id: id, ns: ns} = machine) do - Logger.info("Timeouting machine #{id} of #{ns}") - - aux_state = - machine - |> get_aux_state() - |> Map.update!("counter", &(&1 + 1)) - - Logger.info("New aux state #{inspect(aux_state)}") - - change = - %MachineStateChange{} - |> put_aux_state(aux_state) - |> put_events([:counter_incremented]) - - action = - %ComplexAction{} - |> set_timer(1) - - %SignalResult{change: change, action: action} - end - - defp process_notification(_machine, _args) do - throw(:not_implemented) - end - - defp get_aux_state(%Machine{aux_state: %Content{format_version: 1, data: data}}) do - Utils.unpack(data) - end - - defp get_aux_state(_machine) do - nil - end - - defp put_aux_state(change, data, format_version \\ 1) do - %MachineStateChange{change | aux_state: to_content(data, format_version)} - end - - defp put_events(change, events, format_version \\ 1) do - wrapped_events = - events - |> Enum.map(&to_content(&1, format_version)) - - %MachineStateChange{change | events: wrapped_events} - end - - defp to_content(data, format_version) do - %Content{format_version: format_version, data: Utils.pack(data)} - end - - defp set_timer(action, timeout, deadline \\ nil, range \\ nil) do - timer = %SetTimerAction{ - timer: %Timer{timeout: timeout, deadline: deadline}, - range: maybe_full_range(range), - timeout: nil - } - - %ComplexAction{action | timer: %TimerAction{set_timer: timer}} - end - - defp maybe_full_range(nil) do - require Direction - %HistoryRange{after: nil, limit: nil, direction: Direction.forward()} - end - - defp maybe_full_range(range) do - range - end - end - - defmodule Machinery do - @moduledoc false - alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client - alias LoadProcessor.Utils - - defstruct url: nil, opts: nil - - def new(url, opts \\ nil) do - %__MODULE__{url: url, opts: opts} - end - - def start(%__MODULE__{url: url, opts: opts}, ns, id, args) do - Woody.Context.new() - |> Client.new(url, List.wrap(opts)) - |> Client.start(ns, id, Utils.pack(args)) - end - end - - defmodule WebHandler do - @moduledoc false - alias LoadProcessor.Machinery - - def init(req, state) do - {:ok, machine} = - Machinery.new("http://machinegun:8022/v1/automaton") - |> Machinery.start("load-test", random_id(), "start please") - - req = - :cowboy_req.reply( - 200, - %{"content-type" => "text/plain"}, - "Starting machine now\n#{inspect(machine)}\n", - req - ) - - {:ok, req, state} - end - - def terminate(_, _, _) do - :ok - end - - defp random_id() do - <> = :snowflake.new() - :genlib_format.format_int_base(id, 62) - end - end -end - -require Logger - -alias Woody.Server.Http, as: Server - -endpoint = - Server.Endpoint.any(:inet) - |> Map.put(:port, 8022) - -handlers = [ - {"/", LoadProcessor.WebHandler, []}, - LoadProcessor.ProcessorHandler.new("/v1/stateproc", event_handler: Woody.EventHandler.Default) -] - -{:ok, _pid} = - Server.child_spec(LoadProcessor, endpoint, handlers) - |> List.wrap() - |> Supervisor.start_link(strategy: :one_for_one) - -Logger.info("Woody server now running on #{Server.endpoint(LoadProcessor)}") -Process.sleep(:infinity) diff --git a/example/machinegun/simple_counter_flow.yaml b/example/machinegun/simple_counter_flow.yaml new file mode 100644 index 00000000..3ed0aa78 --- /dev/null +++ b/example/machinegun/simple_counter_flow.yaml @@ -0,0 +1,10 @@ + +version: 1 + +storage: ~ +cluster: ~ +process_registry: ~ +limits: ~ +logging: ~ +erlang: ~ +kafka: ~ diff --git a/example/mix.exs b/example/mix.exs new file mode 100644 index 00000000..d6cc8509 --- /dev/null +++ b/example/mix.exs @@ -0,0 +1,55 @@ +defmodule LoadProcessor.MixProject do + use Mix.Project + + def project do + [ + app: :load_processor, + version: "0.1.0", + elixir: "~> 1.16", + start_permanent: Mix.env() == :prod, + deps: deps(), + releases: releases() + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + extra_applications: [:logger], + mod: {LoadProcessor.Application, []} + ] + end + + defp releases do + [ + api_key_mgmt: [ + version: "0.1.0", + applications: [ + api_key_mgmt: :permanent, + logstash_logger_formatter: :load + ], + include_executables_for: [:unix], + include_erts: false + ] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + {:thrift, + git: "https://github.com/valitydev/elixir-thrift.git", + branch: "ft/subst-reserved-vars", + override: true}, + {:mg_proto, + git: "https://github.com/valitydev/machinegun-proto", branch: "ft/elixir-support"}, + {:snowflake, git: "https://github.com/valitydev/snowflake.git", branch: "master"}, + {:genlib, git: "https://github.com/valitydev/genlib.git", branch: "master"}, + {:logstash_logger_formatter, + git: "https://github.com/valitydev/logstash_logger_formatter.git", + branch: "master", + only: [:prod], + runtime: false} + ] + end +end diff --git a/example/mix.lock b/example/mix.lock new file mode 100644 index 00000000..97ca680a --- /dev/null +++ b/example/mix.lock @@ -0,0 +1,27 @@ +%{ + "cache": {:hex, :cache, "2.3.3", "b23a5fe7095445a88412a6e614c933377e0137b44ffed77c9b3fef1a731a20b2", [:rebar3], [], "hexpm", "44516ce6fa03594d3a2af025dd3a87bfe711000eb730219e1ddefc816e0aa2f4"}, + "certifi": {:hex, :certifi, "2.8.0", "d4fb0a6bb20b7c9c3643e22507e42f356ac090a1dcea9ab99e27e0376d695eba", [:rebar3], [], "hexpm", "6ac7efc1c6f8600b08d625292d4bbf584e14847ce1b6b5c44d983d273e1097ea"}, + "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, + "cowboy": {:hex, :cowboy, "2.9.0", "865dd8b6607e14cf03282e10e934023a1bd8be6f6bacf921a7e2a96d800cd452", [:make, :rebar3], [{:cowlib, "2.11.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "2c729f934b4e1aa149aff882f57c6372c15399a20d54f65c8d67bef583021bde"}, + "cowlib": {:hex, :cowlib, "2.11.0", "0b9ff9c346629256c42ebe1eeb769a83c6cb771a6ee5960bd110ab0b9b872063", [:make, :rebar3], [], "hexpm", "2b3e9da0b21c4565751a6d4901c20d1b4cc25cbb7fd50d91d2ab6dd287bc86a9"}, + "genlib": {:git, "https://github.com/valitydev/genlib.git", "f6074551d6586998e91a97ea20acb47241254ff3", [branch: "master"]}, + "gproc": {:hex, :gproc, "0.9.0", "853ccb7805e9ada25d227a157ba966f7b34508f386a3e7e21992b1b484230699", [:rebar3], [], "hexpm", "587e8af698ccd3504cf4ba8d90f893ede2b0f58cabb8a916e2bf9321de3cf10b"}, + "hackney": {:hex, :hackney, "1.18.0", "c4443d960bb9fba6d01161d01cd81173089686717d9490e5d3606644c48d121f", [:rebar3], [{:certifi, "~> 2.8.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "9afcda620704d720db8c6a3123e9848d09c87586dc1c10479c42627b905b5c5e"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, + "mg_proto": {:git, "https://github.com/valitydev/machinegun-proto", "5c2bcfdde8d91d7bd31011af8d6be1c558e9f2d3", [branch: "ft/elixir-support"]}, + "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "opentelemetry_api": {:hex, :opentelemetry_api, "1.2.1", "7b69ed4f40025c005de0b74fce8c0549625d59cb4df12d15c32fe6dc5076ff42", [:mix, :rebar3], [{:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "6d7a27b7cad2ad69a09cabf6670514cafcec717c8441beb5c96322bac3d05350"}, + "opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "0.2.0", "b67fe459c2938fcab341cb0951c44860c62347c005ace1b50f8402576f241435", [:mix, :rebar3], [], "hexpm", "d61fa1f5639ee8668d74b527e6806e0503efc55a42db7b5f39939d84c07d6895"}, + "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, + "prometheus": {:hex, :prometheus, "4.8.1", "fa76b152555273739c14b06f09f485cf6d5d301fe4e9d31b7ff803d26025d7a0", [:mix, :rebar3], [{:quantile_estimator, "~> 0.2.1", [hex: :quantile_estimator, repo: "hexpm", optional: false]}], "hexpm", "6edfbe928d271c7f657a6f2c46258738086584bd6cae4a000b8b9a6009ba23a5"}, + "quantile_estimator": {:hex, :quantile_estimator, "0.2.1", "ef50a361f11b5f26b5f16d0696e46a9e4661756492c981f7b2229ef42ff1cd15", [:rebar3], [], "hexpm", "282a8a323ca2a845c9e6f787d166348f776c1d4a41ede63046d72d422e3da946"}, + "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, + "snowflake": {:git, "https://github.com/valitydev/snowflake.git", "de159486ef40cec67074afe71882bdc7f7deab72", [branch: "master"]}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, + "thrift": {:git, "https://github.com/valitydev/elixir-thrift.git", "f561f7746c3f5634021258c86ea02b94579ef6eb", [branch: "ft/subst-reserved-vars"]}, + "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, + "woody": {:git, "https://github.com/valitydev/woody_erlang.git", "c4f6b9c62c77699d7489c6913f1a157cd9c7ca1e", [branch: "ft/bump-compat-woody-ex"]}, + "woody_ex": {:git, "https://github.com/valitydev/woody_ex.git", "e8d89c1dc8be889a0371227730b45b84a94bd67d", [branch: "ft/subst-reserved-vars"]}, +} diff --git a/example/test/load_processor_test.exs b/example/test/load_processor_test.exs new file mode 100644 index 00000000..c94358c7 --- /dev/null +++ b/example/test/load_processor_test.exs @@ -0,0 +1,8 @@ +defmodule LoadProcessorTest do + use ExUnit.Case + doctest LoadProcessor + + test "greets the world" do + assert LoadProcessor.hello() == :world + end +end diff --git a/example/test/test_helper.exs b/example/test/test_helper.exs new file mode 100644 index 00000000..869559e7 --- /dev/null +++ b/example/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start() From 9e9c05d6a5c10c394110e75b3c0f53d67243442e Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 2 Jul 2024 18:32:06 +0300 Subject: [PATCH 19/31] Adds notes --- example/README.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/example/README.md b/example/README.md index 935947fb..dd1e8bf2 100644 --- a/example/README.md +++ b/example/README.md @@ -1,3 +1,29 @@ # LoadProcessor **TODO: Add description** + + +## Receipts + +``` elixir +# Alias machinery helper for convinence +alias LoadProcessor.Machinery, as: M + +# Create automaton client +automaton = M.new("http://machinegun:8022/v1/automaton") + +# Start machine +automaton |> M.start("load-test", "my-machine", "start payload") + +# Call machine +automaton |> M.call("load-test", "my-machine", "call payload") + +# Repair machine +automaton |> M.simple_repair("load-test", "my-machine") + +# Get machine +automaton |> M.get("load-test", "my-machine") + +# Get machine status +automaton |> M.get("load-test", "my-machine") |> elem(1) |> Map.get(:status) +``` From e62b7fabca1b044b64331ddef452d28a38053180 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 4 Jul 2024 10:05:32 +0300 Subject: [PATCH 20/31] Refactors machine client api --- example/README.md | 18 ++--- example/lib/load_processor/application.ex | 31 +++++---- example/lib/load_processor/machine.ex | 69 +++++++++++++++++++ example/lib/load_processor/machine/event.ex | 23 +++++++ example/lib/load_processor/machine/history.ex | 15 ++++ example/lib/load_processor/machine/utils.ex | 37 ++++++++++ example/lib/load_processor/machinery.ex | 53 -------------- .../lib/load_processor/processor_handler.ex | 57 +++++++-------- example/lib/load_processor/utils.ex | 20 ------ example/lib/load_processor/web_handler.ex | 7 +- example/machinegun/simple_counter_flow.yaml | 10 --- 11 files changed, 196 insertions(+), 144 deletions(-) create mode 100644 example/lib/load_processor/machine.ex create mode 100644 example/lib/load_processor/machine/event.ex create mode 100644 example/lib/load_processor/machine/history.ex create mode 100644 example/lib/load_processor/machine/utils.ex delete mode 100644 example/lib/load_processor/machinery.ex delete mode 100644 example/lib/load_processor/utils.ex delete mode 100644 example/machinegun/simple_counter_flow.yaml diff --git a/example/README.md b/example/README.md index dd1e8bf2..7084a88f 100644 --- a/example/README.md +++ b/example/README.md @@ -7,23 +7,23 @@ ``` elixir # Alias machinery helper for convinence -alias LoadProcessor.Machinery, as: M +alias LoadProcessor.Machine -# Create automaton client -automaton = M.new("http://machinegun:8022/v1/automaton") +# Create machine client +machine = Machine.new("http://machinegun:8022/v1/automaton", "load-test", "my-machine") # Start machine -automaton |> M.start("load-test", "my-machine", "start payload") +machine |> Machine.start("start payload") -# Call machine -automaton |> M.call("load-test", "my-machine", "call payload") +# Call machine (returns call result, not machine client) +machine |> Machine.call("call payload") # Repair machine -automaton |> M.simple_repair("load-test", "my-machine") +machine |> Machine.simple_repair() # Get machine -automaton |> M.get("load-test", "my-machine") +machine |> Machine.get() # Get machine status -automaton |> M.get("load-test", "my-machine") |> elem(1) |> Map.get(:status) +machine |> Machine.get() |> Map.get(:status) ``` diff --git a/example/lib/load_processor/application.ex b/example/lib/load_processor/application.ex index 0ea8a3f6..99252236 100644 --- a/example/lib/load_processor/application.ex +++ b/example/lib/load_processor/application.ex @@ -8,22 +8,13 @@ defmodule LoadProcessor.Application do require Logger alias Woody.Server.Http, as: Server + alias LoadProcessor.StfuWoodyHandler, as: WoodyHandler + alias LoadProcessor.ProcessorHandler @impl true def start(_type, _args) do - endpoint = - Server.Endpoint.any(:inet) - |> Map.put(:port, 8022) - - handlers = [ - {"/", LoadProcessor.WebHandler, []}, - LoadProcessor.ProcessorHandler.new("/v1/stateproc", - event_handler: LoadProcessor.StfuWoodyHandler - ) - ] - children = [ - Server.child_spec(LoadProcessor, endpoint, handlers) + server_spec([{"/", LoadProcessor.WebHandler, []}]) ] # See https://hexdocs.pm/elixir/Supervisor.html @@ -32,11 +23,25 @@ defmodule LoadProcessor.Application do case Supervisor.start_link(children, opts) do {:ok, pid} -> - Logger.info("Woody server now running on #{Server.endpoint(LoadProcessor)}") + Logger.info("Woody server pnow running on #{server_endpoint()}") {:ok, pid} bad_ret -> bad_ret end end + + defp server_spec(additional_handlers) do + endpoint = + Server.Endpoint.any(:inet) + |> Map.put(:port, 8022) + + Server.child_spec(LoadProcessor, endpoint, [ + ProcessorHandler.new("/v1/stateproc", event_handler: WoodyHandler) | additional_handlers + ]) + end + + defp server_endpoint() do + Server.endpoint(LoadProcessor) + end end diff --git a/example/lib/load_processor/machine.ex b/example/lib/load_processor/machine.ex new file mode 100644 index 00000000..759c5155 --- /dev/null +++ b/example/lib/load_processor/machine.ex @@ -0,0 +1,69 @@ +defmodule LoadProcessor.Machine do + @moduledoc false + + alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client + alias MachinegunProto.StateProcessing.{MachineDescriptor, Reference, HistoryRange, Direction} + alias LoadProcessor.Machine.{History, Utils} + + require Direction + + @default_range %HistoryRange{limit: 10, direction: Direction.backward()} + + @enforce_keys [:client, :ns, :id] + defstruct client: nil, + ns: nil, + id: nil, + status: nil, + history: nil, + aux_state: nil + + def new(automaton_url, ns, id) do + new(Woody.Context.new(), automaton_url, nil, ns, id) + end + + def new(woody_ctx, automaton_url, automaton_opts, ns, id) do + %__MODULE__{ + client: Client.new(woody_ctx, automaton_url, List.wrap(automaton_opts)), + ns: ns, + id: id + } + end + + def loaded?(%__MODULE__{status: nil}), do: false + def loaded?(%__MODULE__{status: _}), do: true + + def start(%__MODULE__{client: client, ns: ns, id: id} = machine, args) do + _ = Client.start!(client, ns, id, Utils.pack(args)) + get(machine) + end + + def get(%__MODULE__{client: client, ns: ns, id: id} = machine) do + machine_state = Client.get_machine!(client, make_descr(ns, id, nil)) + + %{ + machine + | history: History.from_machine_state(machine_state), + status: machine_state.status, + aux_state: Utils.marshal(:aux_state, machine_state.aux_state) + } + end + + def call(%__MODULE__{client: client, ns: ns, id: id}, args) do + client + |> Client.call!(make_descr(ns, id, nil), Utils.pack(args)) + |> Utils.unpack() + end + + def simple_repair(%__MODULE__{client: client, ns: ns, id: id} = machine) do + _ = Client.simple_repair(client, ns, make_ref(id)) + machine + end + + defp make_descr(ns, id, range) do + %MachineDescriptor{ns: ns, ref: make_ref(id), range: range || @default_range} + end + + defp make_ref(id) do + %Reference{id: id} + end +end diff --git a/example/lib/load_processor/machine/event.ex b/example/lib/load_processor/machine/event.ex new file mode 100644 index 00000000..ee2bde04 --- /dev/null +++ b/example/lib/load_processor/machine/event.ex @@ -0,0 +1,23 @@ +defmodule LoadProcessor.Machine.Event do + @moduledoc false + + alias MachinegunProto.StateProcessing.Event, as: MachineEvent + alias LoadProcessor.Machine.Utils + + @enforce_keys [:id, :occurred_at, :body] + defstruct id: nil, occurred_at: nil, body: nil + + def from_machine_history(history) do + Enum.map(history, &construct_event/1) + end + + defp construct_event(%MachineEvent{} = machine_event) do + {:ok, occurred_at, _rest} = DateTime.from_iso8601(machine_event.created_at) + + %__MODULE__{ + id: machine_event.id, + occurred_at: occurred_at, + body: Utils.marshal(:event, machine_event) + } + end +end diff --git a/example/lib/load_processor/machine/history.ex b/example/lib/load_processor/machine/history.ex new file mode 100644 index 00000000..6908a948 --- /dev/null +++ b/example/lib/load_processor/machine/history.ex @@ -0,0 +1,15 @@ +defmodule LoadProcessor.Machine.History do + alias MachinegunProto.StateProcessing.Machine, as: MachineState + alias MachinegunProto.StateProcessing.{HistoryRange, Direction} + alias LoadProcessor.Machine.Event + require Direction + + @enforce_keys [:events, :range] + defstruct events: nil, range: nil + + def from_machine_state(%MachineState{history: history, history_range: history_range}) do + events = Event.from_machine_history(history) + + %__MODULE__{events: events, range: history_range} + end +end diff --git a/example/lib/load_processor/machine/utils.ex b/example/lib/load_processor/machine/utils.ex new file mode 100644 index 00000000..a79b3d1e --- /dev/null +++ b/example/lib/load_processor/machine/utils.ex @@ -0,0 +1,37 @@ +defmodule LoadProcessor.Machine.Utils do + @moduledoc false + alias MachinegunProto.MsgPack + alias MachinegunProto.StateProcessing.Content + alias MachinegunProto.StateProcessing.Event + + def pack(nil) do + %MsgPack.Value{nl: %MsgPack.Nil{}} + end + + def pack(data) do + %MsgPack.Value{bin: :erlang.term_to_binary(data)} + end + + def unpack(%MsgPack.Value{nl: %MsgPack.Nil{}}) do + nil + end + + def unpack(%MsgPack.Value{bin: bin}) do + :erlang.binary_to_term(bin) + end + + @format_version 1 + + def marshal(:content, %Content{format_version: @format_version, data: data}), do: unpack(data) + def marshal(:content, _), do: nil + def marshal(:aux_state, value), do: marshal(:content, value) + def marshal(:event, %Event{format_version: @format_version, data: data}), do: unpack(data) + def marshal(type, _value), do: raise("Marshalling of type #{inspect(type)} is not supported") + + def unmarshal(:content, nil), do: nil + def unmarshal(:content, value), do: %Content{format_version: @format_version, data: pack(value)} + def unmarshal(:aux_state, value), do: unmarshal(:content, value) + + def unmarshal(type, _value), + do: raise("Unmarshalling of type #{inspect(type)} is not supported") +end diff --git a/example/lib/load_processor/machinery.ex b/example/lib/load_processor/machinery.ex deleted file mode 100644 index 53a1f8ae..00000000 --- a/example/lib/load_processor/machinery.ex +++ /dev/null @@ -1,53 +0,0 @@ -defmodule LoadProcessor.Machinery do - @moduledoc false - alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client - alias LoadProcessor.Utils - alias MachinegunProto.StateProcessing.{MachineDescriptor, Reference, HistoryRange, Direction} - - @enforce_keys [:client] - defstruct client: nil - - def new(url, opts \\ nil) do - new(Woody.Context.new(), url, opts) - end - - def new(ctx, url, opts) do - %__MODULE__{client: Client.new(ctx, url, List.wrap(opts))} - end - - def start(%__MODULE__{client: client}, ns, id, args) do - Client.start(client, ns, id, Utils.pack(args)) - end - - def get(%__MODULE__{client: client}, ns, id) do - require Direction - Client.get_machine(client, make_descr(ns, id)) - end - - def call(%__MODULE__{client: client}, ns, id, args) do - case Client.call(client, make_descr(ns, id), Utils.pack(args)) do - {:ok, result} -> {:ok, Utils.unpack(result)} - other_response -> other_response - end - end - - def simple_repair(%__MODULE__{client: client}, ns, id) do - Client.simple_repair(client, ns, make_ref(id)) - end - - ### - - defp make_descr(ns, id) do - require Direction - - %MachineDescriptor{ - ns: ns, - ref: make_ref(id), - range: %HistoryRange{limit: 5, direction: Direction.backward()} - } - end - - defp make_ref(id) do - %Reference{id: id} - end -end diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex index ec7fd20a..7d4883a0 100644 --- a/example/lib/load_processor/processor_handler.ex +++ b/example/lib/load_processor/processor_handler.ex @@ -5,12 +5,12 @@ defmodule LoadProcessor.ProcessorHandler do require Logger - alias LoadProcessor.Utils + alias LoadProcessor.Machine.Utils alias MachinegunProto.StateProcessing.{SignalArgs, CallArgs, RepairArgs} alias MachinegunProto.StateProcessing.{Signal, InitSignal, TimeoutSignal, NotificationSignal} - alias MachinegunProto.StateProcessing.{SignalResult, CallResult, RepairResult, RepairFailed} - alias MachinegunProto.StateProcessing.{Content, HistoryRange, Direction} - alias MachinegunProto.StateProcessing.{Machine, MachineStatus, MachineStateChange} + alias MachinegunProto.StateProcessing.{SignalResult, CallResult} + alias MachinegunProto.StateProcessing.{HistoryRange, Direction} + alias MachinegunProto.StateProcessing.{Machine, MachineStateChange} alias MachinegunProto.StateProcessing.{ComplexAction, TimerAction, SetTimerAction} alias MachinegunProto.Base.Timer @@ -20,19 +20,19 @@ defmodule LoadProcessor.ProcessorHandler do @impl true def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do - case signal do - %Signal{init: %InitSignal{arg: args}} -> - process_init(machine, args) + case signal do + %Signal{init: %InitSignal{arg: args}} -> + process_init(machine, args) - %Signal{timeout: %TimeoutSignal{}} -> - process_timeout(machine) + %Signal{timeout: %TimeoutSignal{}} -> + process_timeout(machine) - %Signal{notification: %NotificationSignal{arg: args}} -> - process_notification(machine, args) + %Signal{notification: %NotificationSignal{arg: args}} -> + process_notification(machine, args) - _uknown_signal -> - throw(:not_implemented) - end + _uknown_signal -> + throw(:not_implemented) + end end @impl true @@ -98,40 +98,31 @@ defmodule LoadProcessor.ProcessorHandler do throw(:not_implemented) end - defp get_aux_state(%Machine{aux_state: %Content{format_version: 1, data: data}}) do - Utils.unpack(data) + defp get_aux_state(%Machine{aux_state: aux_state}) do + Utils.marshal(:aux_state, aux_state) end - defp get_aux_state(_machine) do - nil - end - - defp put_aux_state(change, data, format_version \\ 1) do + defp put_aux_state(change, data) do # Optional 'aux_state' technically can be 'nil' but this will # break machine, because it is not interpreted as msg_pack's 'nil' - # but actually erlang's 'undefined'. - # In another words, default 'nil' value of 'aux_state' does not - # leaves previous value unchanged but always expects it to be - # explicitly set. - %MachineStateChange{change | aux_state: to_content(data, format_version)} + # but actually erlang's 'undefined'. In another words, default + # 'nil' value of 'aux_state' does not leave previous value + # unchanged but always expects it to be explicitly set. + %MachineStateChange{change | aux_state: Utils.unmarshal(:aux_state, data)} end - defp put_events(change, events, format_version \\ 1) do + defp put_events(change, events) do wrapped_events = events - |> Enum.map(&to_content(&1, format_version)) + |> Enum.map(&Utils.unmarshal(:content, &1)) %MachineStateChange{change | events: wrapped_events} end - defp to_content(data, format_version) do - %Content{format_version: format_version, data: Utils.pack(data)} - end - defp set_timer(action, timeout, deadline \\ nil, range \\ nil) do timer = %SetTimerAction{ timer: %Timer{timeout: timeout, deadline: deadline}, - range: maybe_last_n_range(range, 5), + range: maybe_last_n_range(range, 10), timeout: nil } diff --git a/example/lib/load_processor/utils.ex b/example/lib/load_processor/utils.ex deleted file mode 100644 index cb64e46a..00000000 --- a/example/lib/load_processor/utils.ex +++ /dev/null @@ -1,20 +0,0 @@ -defmodule LoadProcessor.Utils do - @moduledoc false - alias MachinegunProto.MsgPack - - def pack(nil) do - %MsgPack.Value{nl: %MsgPack.Nil{}} - end - - def pack(data) do - %MsgPack.Value{bin: :erlang.term_to_binary(data)} - end - - def unpack(%MsgPack.Value{nl: %MsgPack.Nil{}}) do - nil - end - - def unpack(%MsgPack.Value{bin: bin}) do - :erlang.binary_to_term(bin) - end -end diff --git a/example/lib/load_processor/web_handler.ex b/example/lib/load_processor/web_handler.ex index c24b28c8..162f7157 100644 --- a/example/lib/load_processor/web_handler.ex +++ b/example/lib/load_processor/web_handler.ex @@ -1,17 +1,12 @@ defmodule LoadProcessor.WebHandler do @moduledoc false - alias LoadProcessor.Machinery def init(req, state) do - {:ok, machine} = - Machinery.new("http://machinegun:8022/v1/automaton") - |> Machinery.start("load-test", random_id(), "start please") - req = :cowboy_req.reply( 200, %{"content-type" => "text/plain"}, - "Starting machine now\n#{inspect(machine)}\n", + "New random id\n#{inspect(random_id())}\n", req ) diff --git a/example/machinegun/simple_counter_flow.yaml b/example/machinegun/simple_counter_flow.yaml deleted file mode 100644 index 3ed0aa78..00000000 --- a/example/machinegun/simple_counter_flow.yaml +++ /dev/null @@ -1,10 +0,0 @@ - -version: 1 - -storage: ~ -cluster: ~ -process_registry: ~ -limits: ~ -logging: ~ -erlang: ~ -kafka: ~ From 2305b0b5a3d7282162a07d4b4d3efd1195cd0dd7 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 4 Jul 2024 10:48:52 +0300 Subject: [PATCH 21/31] Fixes typos --- example/lib/load_processor.ex | 17 +---------------- example/lib/load_processor/application.ex | 2 +- example/test/load_processor_test.exs | 4 ++-- 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/example/lib/load_processor.ex b/example/lib/load_processor.ex index 8081cb03..8f9282d8 100644 --- a/example/lib/load_processor.ex +++ b/example/lib/load_processor.ex @@ -1,18 +1,3 @@ defmodule LoadProcessor do - @moduledoc """ - Documentation for `LoadProcessor`. - """ - - @doc """ - Hello world. - - ## Examples - - iex> LoadProcessor.hello() - :world - - """ - def hello do - :world - end + @moduledoc false end diff --git a/example/lib/load_processor/application.ex b/example/lib/load_processor/application.ex index 99252236..34e67245 100644 --- a/example/lib/load_processor/application.ex +++ b/example/lib/load_processor/application.ex @@ -23,7 +23,7 @@ defmodule LoadProcessor.Application do case Supervisor.start_link(children, opts) do {:ok, pid} -> - Logger.info("Woody server pnow running on #{server_endpoint()}") + Logger.info("Woody server now running on #{server_endpoint()}") {:ok, pid} bad_ret -> diff --git a/example/test/load_processor_test.exs b/example/test/load_processor_test.exs index c94358c7..6f5cdf21 100644 --- a/example/test/load_processor_test.exs +++ b/example/test/load_processor_test.exs @@ -2,7 +2,7 @@ defmodule LoadProcessorTest do use ExUnit.Case doctest LoadProcessor - test "greets the world" do - assert LoadProcessor.hello() == :world + test "truthness" do + assert true end end From cc9f6e9229bbd8d413115f230c9ae7d21da80317 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 4 Jul 2024 14:36:57 +0300 Subject: [PATCH 22/31] Adds machine start load script --- example/README.md | 2 +- example/compose.yaml | 2 +- example/config/config.exs | 4 ++ example/lib/load_processor.ex | 26 ++++++++++++ example/lib/load_processor/machine.ex | 20 +++++++-- example/lib/load_processor/machine/history.ex | 2 - .../lib/load_processor/processor_handler.ex | 41 ++++++++++++------- example/machinegun/config.yaml | 2 +- 8 files changed, 76 insertions(+), 23 deletions(-) diff --git a/example/README.md b/example/README.md index 7084a88f..f68143fd 100644 --- a/example/README.md +++ b/example/README.md @@ -10,7 +10,7 @@ alias LoadProcessor.Machine # Create machine client -machine = Machine.new("http://machinegun:8022/v1/automaton", "load-test", "my-machine") +machine = Machine.new("load-test") # Start machine machine |> Machine.start("start payload") diff --git a/example/compose.yaml b/example/compose.yaml index 2038e797..577e0d90 100644 --- a/example/compose.yaml +++ b/example/compose.yaml @@ -76,7 +76,7 @@ services: condition: on-failure riakdb: &member-node - image: riak-valitydev:latest + image: docker.io/basho/riak-kv:ubuntu-2.2.3 environment: - CLUSTER_NAME=riakkv - COORDINATOR_NODE=riakdb diff --git a/example/config/config.exs b/example/config/config.exs index 477c9079..a056d653 100644 --- a/example/config/config.exs +++ b/example/config/config.exs @@ -1,3 +1,7 @@ import Config config :logger, level: :info + +config :load_processor, :automaton, + url: "http://machinegun:8022/v1/automaton", + options: nil diff --git a/example/lib/load_processor.ex b/example/lib/load_processor.ex index 8f9282d8..f6def87c 100644 --- a/example/lib/load_processor.ex +++ b/example/lib/load_processor.ex @@ -1,3 +1,29 @@ defmodule LoadProcessor do @moduledoc false + alias LoadProcessor.Machine + + def run(count) do + :timer.tc(fn -> do_run(count) end) + end + + defp do_run(count) do + 1..count + |> Task.async_stream(&try_start/1, max_concurrency: count, timeout: :infinity) + |> Enum.filter(fn + {:ok, {_i, nil}} -> false + {:ok, _} -> true + end) + |> Enum.map(&elem(&1, 1)) + end + + defp try_start(i) do + "load-test" + |> Machine.new() + |> Machine.start(%{"payload" => nil}) + + {i, nil} + rescue + exception in Woody.BadResultError -> + {i, exception} + end end diff --git a/example/lib/load_processor/machine.ex b/example/lib/load_processor/machine.ex index 759c5155..d1b8dd71 100644 --- a/example/lib/load_processor/machine.ex +++ b/example/lib/load_processor/machine.ex @@ -17,13 +17,20 @@ defmodule LoadProcessor.Machine do history: nil, aux_state: nil - def new(automaton_url, ns, id) do - new(Woody.Context.new(), automaton_url, nil, ns, id) + @automaton_url Application.compile_env!(:load_processor, [:automaton, :url]) + @automaton_opts List.wrap(Application.compile_env!(:load_processor, [:automaton, :options])) + + def new(ns, id) do + new(Woody.Context.new(), ns, id) + end + + def new(ns) do + new(ns, random_id()) end - def new(woody_ctx, automaton_url, automaton_opts, ns, id) do + def new(woody_ctx, ns, id) do %__MODULE__{ - client: Client.new(woody_ctx, automaton_url, List.wrap(automaton_opts)), + client: Client.new(woody_ctx, @automaton_url, @automaton_opts), ns: ns, id: id } @@ -66,4 +73,9 @@ defmodule LoadProcessor.Machine do defp make_ref(id) do %Reference{id: id} end + + defp random_id() do + <> = :snowflake.new() + :genlib_format.format_int_base(id, 62) + end end diff --git a/example/lib/load_processor/machine/history.ex b/example/lib/load_processor/machine/history.ex index 6908a948..e2d29cd3 100644 --- a/example/lib/load_processor/machine/history.ex +++ b/example/lib/load_processor/machine/history.ex @@ -1,8 +1,6 @@ defmodule LoadProcessor.Machine.History do alias MachinegunProto.StateProcessing.Machine, as: MachineState - alias MachinegunProto.StateProcessing.{HistoryRange, Direction} alias LoadProcessor.Machine.Event - require Direction @enforce_keys [:events, :range] defstruct events: nil, range: nil diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex index 7d4883a0..8aa34bd3 100644 --- a/example/lib/load_processor/processor_handler.ex +++ b/example/lib/load_processor/processor_handler.ex @@ -67,31 +67,44 @@ defmodule LoadProcessor.ProcessorHandler do action = %ComplexAction{} - |> set_timer(1) + |> set_timer(get_rand_sleep_time([3, 2, 1])) {:ok, %SignalResult{change: change, action: action}} end + defp get_rand_sleep_time(seed) do + # 0s 1s 2s etc occurrences in seed + seed |> Enum.with_index() |> Enum.map(fn {occ, i} -> List.duplicate(i, occ) end) |> List.flatten() |> Enum.random() + end + defp process_timeout(%Machine{id: id, ns: ns} = machine) do Logger.debug("Timeouting machine #{id} of #{ns}") + aux_state = get_aux_state(machine) - aux_state = - machine - |> get_aux_state() - |> Map.update!("counter", &(&1 + 1)) + case aux_state do + %{"counter" => counter} when counter < 100 -> + aux_state = Map.update!(aux_state, "counter", &(&1 + 1)) + Logger.debug("New aux state #{inspect(aux_state)}") - Logger.debug("New aux state #{inspect(aux_state)}") + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_incremented]) - change = - %MachineStateChange{} - |> put_aux_state(aux_state) - |> put_events([:counter_incremented]) + action = + %ComplexAction{} + |> set_timer(get_rand_sleep_time([3, 2, 1])) - action = - %ComplexAction{} - |> set_timer(1) + {:ok, %SignalResult{change: change, action: action}} - {:ok, %SignalResult{change: change, action: action}} + _ -> + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_stopped]) + + {:ok, %SignalResult{change: change, action: %ComplexAction{}}} + end end defp process_notification(_machine, _args) do diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml index 82a95463..0022986d 100644 --- a/example/machinegun/config.yaml +++ b/example/machinegun/config.yaml @@ -42,7 +42,7 @@ cluster: reconnect_timeout: 5000 process_registry: - module: mg_core_procreg_global + module: mg_procreg_global limits: process_heap: 2M From ed987d62da406003bbc9beaf3555760bc8e9f049 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Thu, 4 Jul 2024 18:44:57 +0300 Subject: [PATCH 23/31] Adds notification callback and OTEL tracing --- example/compose.yaml | 6 +- example/lib/load_processor/machine.ex | 45 +++++++---- .../lib/load_processor/processor_handler.ex | 77 ++++++++++++++----- example/mix.exs | 9 ++- example/mix.lock | 11 ++- 5 files changed, 107 insertions(+), 41 deletions(-) diff --git a/example/compose.yaml b/example/compose.yaml index 577e0d90..3eccc802 100644 --- a/example/compose.yaml +++ b/example/compose.yaml @@ -67,11 +67,11 @@ services: endpoint_mode: dnsrr resources: limits: + cpus: '0.5' + memory: 1G + reservations: cpus: '0.2' memory: 512M - reservations: - cpus: '0.1' - memory: 256M restart_policy: condition: on-failure diff --git a/example/lib/load_processor/machine.ex b/example/lib/load_processor/machine.ex index d1b8dd71..db706267 100644 --- a/example/lib/load_processor/machine.ex +++ b/example/lib/load_processor/machine.ex @@ -5,6 +5,7 @@ defmodule LoadProcessor.Machine do alias MachinegunProto.StateProcessing.{MachineDescriptor, Reference, HistoryRange, Direction} alias LoadProcessor.Machine.{History, Utils} + require OpenTelemetry.Tracer, as: Tracer require Direction @default_range %HistoryRange{limit: 10, direction: Direction.backward()} @@ -40,30 +41,46 @@ defmodule LoadProcessor.Machine do def loaded?(%__MODULE__{status: _}), do: true def start(%__MODULE__{client: client, ns: ns, id: id} = machine, args) do - _ = Client.start!(client, ns, id, Utils.pack(args)) + Tracer.with_span "starting machine" do + _ = Client.start!(client, ns, id, Utils.pack(args)) + end + get(machine) end def get(%__MODULE__{client: client, ns: ns, id: id} = machine) do - machine_state = Client.get_machine!(client, make_descr(ns, id, nil)) - - %{ - machine - | history: History.from_machine_state(machine_state), - status: machine_state.status, - aux_state: Utils.marshal(:aux_state, machine_state.aux_state) - } + Tracer.with_span "getting machine" do + machine_state = Client.get_machine!(client, make_descr(ns, id, nil)) + + %{ + machine + | history: History.from_machine_state(machine_state), + status: machine_state.status, + aux_state: Utils.marshal(:aux_state, machine_state.aux_state) + } + end end def call(%__MODULE__{client: client, ns: ns, id: id}, args) do - client - |> Client.call!(make_descr(ns, id, nil), Utils.pack(args)) - |> Utils.unpack() + Tracer.with_span "calling machine" do + client + |> Client.call!(make_descr(ns, id, nil), Utils.pack(args)) + |> Utils.unpack() + end + end + + def notify(%__MODULE__{client: client, ns: ns, id: id}, args) do + Tracer.with_span "sending notification to machine" do + client + |> Client.notify!(make_descr(ns, id, nil), Utils.pack(args)) + end end def simple_repair(%__MODULE__{client: client, ns: ns, id: id} = machine) do - _ = Client.simple_repair(client, ns, make_ref(id)) - machine + Tracer.with_span "simply repairaing machine" do + _ = Client.simple_repair(client, ns, make_ref(id)) + machine + end end defp make_descr(ns, id, range) do diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex index 8aa34bd3..04468a5b 100644 --- a/example/lib/load_processor/processor_handler.ex +++ b/example/lib/load_processor/processor_handler.ex @@ -3,6 +3,7 @@ defmodule LoadProcessor.ProcessorHandler do alias Woody.Generated.MachinegunProto.StateProcessing.Processor @behaviour Processor.Handler + require OpenTelemetry.Tracer, as: Tracer require Logger alias LoadProcessor.Machine.Utils @@ -22,13 +23,19 @@ defmodule LoadProcessor.ProcessorHandler do def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do case signal do %Signal{init: %InitSignal{arg: args}} -> - process_init(machine, args) + Tracer.with_span "initializing" do + process_init(machine, Utils.unpack(args)) + end %Signal{timeout: %TimeoutSignal{}} -> - process_timeout(machine) + Tracer.with_span "timeouting" do + process_timeout(machine) + end %Signal{notification: %NotificationSignal{arg: args}} -> - process_notification(machine, args) + Tracer.with_span "notifying" do + process_notification(machine, Utils.unpack(args)) + end _uknown_signal -> throw(:not_implemented) @@ -37,19 +44,20 @@ defmodule LoadProcessor.ProcessorHandler do @impl true def process_call(%CallArgs{arg: args, machine: machine}, _ctx, _hdlops) do - args = Utils.unpack(args) - Logger.debug("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") + Tracer.with_span "processing call" do + Logger.debug("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") - change = - %MachineStateChange{} - |> put_events([{:call_processed, args}]) - |> put_aux_state(get_aux_state(machine)) + change = + %MachineStateChange{} + |> preserve_aux_state(machine) + |> put_events([{:call_processed, args}]) - action = - %ComplexAction{} - |> set_timer(0) + action = + %ComplexAction{} + |> set_timer(0) - {:ok, %CallResult{response: Utils.pack("result"), change: change, action: action}} + {:ok, %CallResult{response: Utils.pack("result"), change: change, action: action}} + end end @impl true @@ -58,11 +66,11 @@ defmodule LoadProcessor.ProcessorHandler do end defp process_init(%Machine{id: id, ns: ns} = _machine, args) do - Logger.debug("Starting '#{id}' of '#{ns}' with arguments: #{inspect(Utils.unpack(args))}") + Logger.debug("Starting '#{id}' of '#{ns}' with arguments: #{inspect(args)}") change = %MachineStateChange{} - |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", "counter" => 0}) + |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", :counter => 0}) |> put_events([:counter_created]) action = @@ -74,7 +82,11 @@ defmodule LoadProcessor.ProcessorHandler do defp get_rand_sleep_time(seed) do # 0s 1s 2s etc occurrences in seed - seed |> Enum.with_index() |> Enum.map(fn {occ, i} -> List.duplicate(i, occ) end) |> List.flatten() |> Enum.random() + seed + |> Enum.with_index() + |> Enum.map(fn {occ, i} -> List.duplicate(i, occ) end) + |> List.flatten() + |> Enum.random() end defp process_timeout(%Machine{id: id, ns: ns} = machine) do @@ -82,14 +94,22 @@ defmodule LoadProcessor.ProcessorHandler do aux_state = get_aux_state(machine) case aux_state do - %{"counter" => counter} when counter < 100 -> - aux_state = Map.update!(aux_state, "counter", &(&1 + 1)) + %{notified: true} -> + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_stopped]) + + {:ok, %SignalResult{change: change, action: %ComplexAction{}}} + + %{counter: counter} when counter < 100 -> + aux_state = Map.update!(aux_state, :counter, &(&1 + 1)) Logger.debug("New aux state #{inspect(aux_state)}") change = %MachineStateChange{} |> put_aux_state(aux_state) - |> put_events([:counter_incremented]) + |> put_events([{:counter_incremented, 1}]) action = %ComplexAction{} @@ -107,8 +127,23 @@ defmodule LoadProcessor.ProcessorHandler do end end - defp process_notification(_machine, _args) do - throw(:not_implemented) + defp process_notification(%Machine{id: id, ns: ns} = machine, args) do + Logger.debug("Notifying machine #{id} of #{ns}") + + change = + %MachineStateChange{} + |> put_aux_state(Map.put(get_aux_state(machine), :notified, true)) + |> put_events([{:counter_notified, args}]) + + action = + %ComplexAction{} + |> set_timer(get_rand_sleep_time([3, 2, 1])) + + {:ok, %SignalResult{change: change, action: action}} + end + + defp preserve_aux_state(change, %Machine{aux_state: aux_state}) do + %MachineStateChange{change | aux_state: aux_state} end defp get_aux_state(%Machine{aux_state: aux_state}) do diff --git a/example/mix.exs b/example/mix.exs index d6cc8509..ebb6fcc0 100644 --- a/example/mix.exs +++ b/example/mix.exs @@ -26,7 +26,8 @@ defmodule LoadProcessor.MixProject do version: "0.1.0", applications: [ api_key_mgmt: :permanent, - logstash_logger_formatter: :load + logstash_logger_formatter: :load, + opentelemetry: :temporary ], include_executables_for: [:unix], include_erts: false @@ -49,7 +50,11 @@ defmodule LoadProcessor.MixProject do git: "https://github.com/valitydev/logstash_logger_formatter.git", branch: "master", only: [:prod], - runtime: false} + runtime: false}, + {:gproc, "~> 0.9.1", override: true}, + {:opentelemetry, "~> 1.3"}, + {:opentelemetry_api, "~> 1.2"}, + {:opentelemetry_exporter, "~> 1.6"} ] end end diff --git a/example/mix.lock b/example/mix.lock index 97ca680a..21695b87 100644 --- a/example/mix.lock +++ b/example/mix.lock @@ -1,17 +1,25 @@ %{ + "acceptor_pool": {:hex, :acceptor_pool, "1.0.0", "43c20d2acae35f0c2bcd64f9d2bde267e459f0f3fd23dab26485bf518c281b21", [:rebar3], [], "hexpm", "0cbcd83fdc8b9ad2eee2067ef8b91a14858a5883cb7cd800e6fcd5803e158788"}, "cache": {:hex, :cache, "2.3.3", "b23a5fe7095445a88412a6e614c933377e0137b44ffed77c9b3fef1a731a20b2", [:rebar3], [], "hexpm", "44516ce6fa03594d3a2af025dd3a87bfe711000eb730219e1ddefc816e0aa2f4"}, "certifi": {:hex, :certifi, "2.8.0", "d4fb0a6bb20b7c9c3643e22507e42f356ac090a1dcea9ab99e27e0376d695eba", [:rebar3], [], "hexpm", "6ac7efc1c6f8600b08d625292d4bbf584e14847ce1b6b5c44d983d273e1097ea"}, + "chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"}, "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, "cowboy": {:hex, :cowboy, "2.9.0", "865dd8b6607e14cf03282e10e934023a1bd8be6f6bacf921a7e2a96d800cd452", [:make, :rebar3], [{:cowlib, "2.11.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "2c729f934b4e1aa149aff882f57c6372c15399a20d54f65c8d67bef583021bde"}, "cowlib": {:hex, :cowlib, "2.11.0", "0b9ff9c346629256c42ebe1eeb769a83c6cb771a6ee5960bd110ab0b9b872063", [:make, :rebar3], [], "hexpm", "2b3e9da0b21c4565751a6d4901c20d1b4cc25cbb7fd50d91d2ab6dd287bc86a9"}, + "ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"}, "genlib": {:git, "https://github.com/valitydev/genlib.git", "f6074551d6586998e91a97ea20acb47241254ff3", [branch: "master"]}, - "gproc": {:hex, :gproc, "0.9.0", "853ccb7805e9ada25d227a157ba966f7b34508f386a3e7e21992b1b484230699", [:rebar3], [], "hexpm", "587e8af698ccd3504cf4ba8d90f893ede2b0f58cabb8a916e2bf9321de3cf10b"}, + "gproc": {:hex, :gproc, "0.9.1", "f1df0364423539cf0b80e8201c8b1839e229e5f9b3ccb944c5834626998f5b8c", [:rebar3], [], "hexpm", "905088e32e72127ed9466f0bac0d8e65704ca5e73ee5a62cb073c3117916d507"}, + "grpcbox": {:hex, :grpcbox, "0.17.1", "6e040ab3ef16fe699ffb513b0ef8e2e896da7b18931a1ef817143037c454bcce", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.15.1", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.9.1", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "4a3b5d7111daabc569dc9cbd9b202a3237d81c80bf97212fbc676832cb0ceb17"}, "hackney": {:hex, :hackney, "1.18.0", "c4443d960bb9fba6d01161d01cd81173089686717d9490e5d3606644c48d121f", [:rebar3], [{:certifi, "~> 2.8.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "9afcda620704d720db8c6a3123e9848d09c87586dc1c10479c42627b905b5c5e"}, + "hpack": {:hex, :hpack_erl, "0.3.0", "2461899cc4ab6a0ef8e970c1661c5fc6a52d3c25580bc6dd204f84ce94669926", [:rebar3], [], "hexpm", "d6137d7079169d8c485c6962dfe261af5b9ef60fbc557344511c1e65e3d95fb0"}, "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "logstash_logger_formatter": {:git, "https://github.com/valitydev/logstash_logger_formatter.git", "e18d378ce155b4e3c06db9f5b696eb5ad1cf4f58", [branch: "master"]}, "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, "mg_proto": {:git, "https://github.com/valitydev/machinegun-proto", "5c2bcfdde8d91d7bd31011af8d6be1c558e9f2d3", [branch: "ft/elixir-support"]}, "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "opentelemetry": {:hex, :opentelemetry, "1.3.0", "988ac3c26acac9720a1d4fb8d9dc52e95b45ecfec2d5b5583276a09e8936bc5e", [:rebar3], [{:opentelemetry_api, "~> 1.2.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "8e09edc26aad11161509d7ecad854a3285d88580f93b63b0b1cf0bac332bfcc0"}, "opentelemetry_api": {:hex, :opentelemetry_api, "1.2.1", "7b69ed4f40025c005de0b74fce8c0549625d59cb4df12d15c32fe6dc5076ff42", [:mix, :rebar3], [{:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "6d7a27b7cad2ad69a09cabf6670514cafcec717c8441beb5c96322bac3d05350"}, + "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.6.0", "f4fbf69aa9f1541b253813221b82b48a9863bc1570d8ecc517bc510c0d1d3d8c", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.3", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.2", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "1802d1dca297e46f21e5832ecf843c451121e875f73f04db87355a6cb2ba1710"}, "opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "0.2.0", "b67fe459c2938fcab341cb0951c44860c62347c005ace1b50f8402576f241435", [:mix, :rebar3], [], "hexpm", "d61fa1f5639ee8668d74b527e6806e0503efc55a42db7b5f39939d84c07d6895"}, "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, "prometheus": {:hex, :prometheus, "4.8.1", "fa76b152555273739c14b06f09f485cf6d5d301fe4e9d31b7ff803d26025d7a0", [:mix, :rebar3], [{:quantile_estimator, "~> 0.2.1", [hex: :quantile_estimator, repo: "hexpm", optional: false]}], "hexpm", "6edfbe928d271c7f657a6f2c46258738086584bd6cae4a000b8b9a6009ba23a5"}, @@ -21,6 +29,7 @@ "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, "thrift": {:git, "https://github.com/valitydev/elixir-thrift.git", "f561f7746c3f5634021258c86ea02b94579ef6eb", [branch: "ft/subst-reserved-vars"]}, + "tls_certificate_check": {:hex, :tls_certificate_check, "1.22.1", "0f450cc1568a67a65ce5e15df53c53f9a098c3da081c5f126199a72505858dc1", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3092be0babdc0e14c2e900542351e066c0fa5a9cf4b3597559ad1e67f07938c0"}, "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, "woody": {:git, "https://github.com/valitydev/woody_erlang.git", "c4f6b9c62c77699d7489c6913f1a157cd9c7ca1e", [branch: "ft/bump-compat-woody-ex"]}, "woody_ex": {:git, "https://github.com/valitydev/woody_ex.git", "e8d89c1dc8be889a0371227730b45b84a94bd67d", [branch: "ft/subst-reserved-vars"]}, From ea0d8aa65f0e6e1760d26391ed67ebe5695cead9 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 5 Jul 2024 12:08:00 +0300 Subject: [PATCH 24/31] Adds event sink span and event stash events --- apps/mg_core/src/mg_core_event_sink.erl | 15 +++++- apps/mg_core/src/mg_core_events_machine.erl | 47 ++++++++++++------- apps/mg_core/src/mg_core_otel.erl | 20 +++++++- apps/mg_woody/src/mg_woody_pulse_otel.erl | 15 +----- example/README.md | 12 ++++- example/lib/load_processor.ex | 12 ++--- example/lib/load_processor/application.ex | 7 ++- .../lib/load_processor/processor_handler.ex | 44 ++++++++++------- example/machinegun/config.yaml | 27 +++++++++-- 9 files changed, 135 insertions(+), 64 deletions(-) diff --git a/apps/mg_core/src/mg_core_event_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl index 18a80d05..52d63015 100644 --- a/apps/mg_core/src/mg_core_event_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -16,6 +16,9 @@ -module(mg_core_event_sink). +-include_lib("opentelemetry_api/include/otel_tracer.hrl"). +-include_lib("opentelemetry_api/include/opentelemetry.hrl"). + -export([add_events/6]). -callback add_events( @@ -48,4 +51,14 @@ add_events(_Handler, _NS, _ID, [], _ReqCtx, _Deadline) -> ok; add_events(Handler, NS, ID, Events, ReqCtx, Deadline) -> - ok = mg_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]). + {Mod, _} = mg_utils:separate_mod_opts(Handler), + SpanOpts = #{ + kind => ?SPAN_KIND_PRODUCER, + attributes => mg_core_otel:machine_tags(NS, ID, #{ + <<"mg.event_sink.handler">> => Mod, + <<"mg.event_sink.count">> => erlang:length(Events) + }) + }, + ?with_span(<<"sinking events">>, SpanOpts, fun(_) -> + ok = mg_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]) + end). diff --git a/apps/mg_core/src/mg_core_events_machine.erl b/apps/mg_core/src/mg_core_events_machine.erl index 5f6da667..2b44d1d1 100644 --- a/apps/mg_core/src/mg_core_events_machine.erl +++ b/apps/mg_core/src/mg_core_events_machine.erl @@ -399,13 +399,24 @@ process_machine_std(Options, ReqCtx, Deadline, Subject, Args, Machine, State) -> maybe_stash_events(#{event_stash_size := Max}, State = #{events := EventStash}, NewEvents) -> Events = EventStash ++ NewEvents, NumEvents = erlang:length(Events), - case NumEvents > Max of - true -> - {External, Internal} = lists:split(NumEvents - Max, Events), - {State#{events => Internal}, External}; - false -> - {State#{events => Events}, []} - end. + {State1, Events1, StashCount, AddedCount, UnstashedCount} = + case NumEvents > Max of + true -> + Offset = NumEvents - Max, + {External, Internal} = lists:split(Offset, Events), + {State#{events => Internal}, External, erlang:length(Internal), Offset, Offset}; + false -> + {State#{events => Events}, [], NumEvents, erlang:length(NewEvents), 0} + end, + ok = mg_core_otel:add_event( + <<"events stash updated">>, + #{ + <<"mg.machine.event_stash.size">> => StashCount, + <<"mg.machine.event_stash.added">> => AddedCount, + <<"mg.machine.event_stash.unstashed">> => UnstashedCount + } + ), + {State1, Events1}. -spec retry_store_events(options(), id(), deadline(), [event()]) -> ok. retry_store_events(Options, ID, Deadline, Events) -> @@ -437,14 +448,7 @@ push_events_to_event_sinks(Options, ID, ReqCtx, Deadline, Events) -> EventSinks = maps:get(event_sinks, Options, []), lists:foreach( fun(EventSinkHandler) -> - ok = mg_core_event_sink:add_events( - EventSinkHandler, - Namespace, - ID, - Events, - ReqCtx, - Deadline - ) + ok = mg_core_event_sink:add_events(EventSinkHandler, Namespace, ID, Events, ReqCtx, Deadline) end, EventSinks ). @@ -578,10 +582,11 @@ handle_state_change( StateWas = #{events_range := EventsRangeWas} ) -> {Events, EventsRange} = mg_core_events:generate_events_with_range(EventsBodies, EventsRangeWas), + NewEventsRange = diff_event_ranges(EventsRange, EventsRangeWas), DelayedActions = #{ % NOTE % This is a range of events which are not yet pushed to event sinks - new_events_range => diff_event_ranges(EventsRange, EventsRangeWas) + new_events_range => NewEventsRange }, State = add_delayed_actions( DelayedActions, @@ -590,6 +595,16 @@ handle_state_change( aux_state := AuxState } ), + ok = + case NewEventsRange of + undefined -> + ok; + _ -> + mg_core_otel:add_event( + <<"new delayed event range">>, + mg_core_otel:event_range_to_attributes(NewEventsRange) + ) + end, maybe_stash_events(Options, State, Events). -spec diff_event_ranges(events_range(), events_range()) -> events_range(). diff --git a/apps/mg_core/src/mg_core_otel.erl b/apps/mg_core/src/mg_core_otel.erl index 5e1526bd..07e4a24c 100644 --- a/apps/mg_core/src/mg_core_otel.erl +++ b/apps/mg_core/src/mg_core_otel.erl @@ -16,6 +16,8 @@ -export([impact_to_machine_activity/1]). -export([machine_tags/2]). +-export([machine_tags/3]). +-export([event_range_to_attributes/1]). -type packed_otel_stub() :: [mg_core_storage:opaque()]. @@ -125,11 +127,25 @@ machine_tags(Namespace, ID) -> machine_tags(Namespace, ID, OtherTags) -> genlib_map:compact( maps:merge(OtherTags, #{ - <<"machine.ns">> => Namespace, - <<"machine.id">> => ID + <<"mg.machine.ns">> => Namespace, + <<"mg.machine.id">> => ID }) ). +-spec event_range_to_attributes(mg_core_events:events_range()) -> map(). +event_range_to_attributes(undefined) -> + #{}; +event_range_to_attributes({After, Limit, Direction}) -> + #{ + "mg.machine.event_range.after" => After, + "mg.machine.event_range.limit" => Limit, + "mg.machine.event_range.direction" => + case Direction of + +1 -> forward; + -1 -> backward + end + }. + %% -spec span_id(opentelemetry:span_ctx()) -> opentelemetry:span_id() | undefined. diff --git a/apps/mg_woody/src/mg_woody_pulse_otel.erl b/apps/mg_woody/src/mg_woody_pulse_otel.erl index e79915b4..25aa885f 100644 --- a/apps/mg_woody/src/mg_woody_pulse_otel.erl +++ b/apps/mg_woody/src/mg_woody_pulse_otel.erl @@ -33,20 +33,7 @@ handle_beat(Options, #woody_event{event = Event, rpc_id = RpcID, event_meta = Me woody_event_handler_otel:handle_event(Event, RpcID, Meta, Options); %% Woody server's function handling error beat. handle_beat(_Options, #woody_request_handle_error{namespace = NS, machine_id = ID, exception = Exception}) -> - mg_core_otel:record_exception(Exception, machine_tags(NS, ID)); + mg_core_otel:record_exception(Exception, mg_core_otel:machine_tags(NS, ID)); %% Disregard any other handle_beat(_Options, _Beat) -> ok. - -%% Internal - --spec machine_tags(mg_core:ns(), mg_core:id()) -> map(). -machine_tags(Namespace, ID) -> - machine_tags(Namespace, ID, #{}). - --spec machine_tags(mg_core:ns(), mg_core:id(), map()) -> map(). -machine_tags(Namespace, ID, OtherTags) -> - maps:merge(OtherTags, #{ - <<"machine.ns">> => Namespace, - <<"machine.id">> => ID - }). diff --git a/example/README.md b/example/README.md index f68143fd..1f7a7145 100644 --- a/example/README.md +++ b/example/README.md @@ -3,14 +3,16 @@ **TODO: Add description** -## Receipts +## Examples + +### Per machine ``` elixir # Alias machinery helper for convinence alias LoadProcessor.Machine # Create machine client -machine = Machine.new("load-test") +machine = Machine.new("eventful-counter") # Start machine machine |> Machine.start("start payload") @@ -27,3 +29,9 @@ machine |> Machine.get() # Get machine status machine |> Machine.get() |> Map.get(:status) ``` + +### Start batch + +``` elixir +LoadProcessor.run("simple-counter", 100) +``` diff --git a/example/lib/load_processor.ex b/example/lib/load_processor.ex index f6def87c..9370355a 100644 --- a/example/lib/load_processor.ex +++ b/example/lib/load_processor.ex @@ -2,13 +2,13 @@ defmodule LoadProcessor do @moduledoc false alias LoadProcessor.Machine - def run(count) do - :timer.tc(fn -> do_run(count) end) + def run(ns, count) do + :timer.tc(fn -> do_run(ns, count) end) end - defp do_run(count) do + defp do_run(ns, count) do 1..count - |> Task.async_stream(&try_start/1, max_concurrency: count, timeout: :infinity) + |> Task.async_stream(fn i -> try_start(ns, i) end, max_concurrency: count, timeout: :infinity) |> Enum.filter(fn {:ok, {_i, nil}} -> false {:ok, _} -> true @@ -16,8 +16,8 @@ defmodule LoadProcessor do |> Enum.map(&elem(&1, 1)) end - defp try_start(i) do - "load-test" + defp try_start(ns, i) do + ns |> Machine.new() |> Machine.start(%{"payload" => nil}) diff --git a/example/lib/load_processor/application.ex b/example/lib/load_processor/application.ex index 34e67245..70719bc2 100644 --- a/example/lib/load_processor/application.ex +++ b/example/lib/load_processor/application.ex @@ -37,7 +37,12 @@ defmodule LoadProcessor.Application do |> Map.put(:port, 8022) Server.child_spec(LoadProcessor, endpoint, [ - ProcessorHandler.new("/v1/stateproc", event_handler: WoodyHandler) | additional_handlers + ProcessorHandler.new("/v1/stateproc/simple-counter", + event_handler: WoodyHandler, + less_events: true + ), + ProcessorHandler.new("/v1/stateproc/eventful-counter", event_handler: WoodyHandler) + | additional_handlers ]) end diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex index 04468a5b..33234e35 100644 --- a/example/lib/load_processor/processor_handler.ex +++ b/example/lib/load_processor/processor_handler.ex @@ -16,25 +16,31 @@ defmodule LoadProcessor.ProcessorHandler do alias MachinegunProto.Base.Timer def new(http_path, options \\ []) do - Processor.Handler.new(__MODULE__, http_path, options) + {hdlopts, options} = + case Keyword.pop(options, :less_events, false) do + {false, opts} -> {%{less_events: false}, opts} + {true, opts} -> {%{less_events: true}, opts} + end + + Processor.Handler.new({__MODULE__, hdlopts}, http_path, options) end @impl true - def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, _hdlops) do + def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, hdlopts) do case signal do %Signal{init: %InitSignal{arg: args}} -> Tracer.with_span "initializing" do - process_init(machine, Utils.unpack(args)) + process_init(machine, Utils.unpack(args), hdlopts) end %Signal{timeout: %TimeoutSignal{}} -> Tracer.with_span "timeouting" do - process_timeout(machine) + process_timeout(machine, hdlopts) end %Signal{notification: %NotificationSignal{arg: args}} -> Tracer.with_span "notifying" do - process_notification(machine, Utils.unpack(args)) + process_notification(machine, Utils.unpack(args), hdlopts) end _uknown_signal -> @@ -43,14 +49,14 @@ defmodule LoadProcessor.ProcessorHandler do end @impl true - def process_call(%CallArgs{arg: args, machine: machine}, _ctx, _hdlops) do + def process_call(%CallArgs{arg: args, machine: machine}, _ctx, hdlopts) do Tracer.with_span "processing call" do Logger.debug("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") change = %MachineStateChange{} |> preserve_aux_state(machine) - |> put_events([{:call_processed, args}]) + |> put_events([{:call_processed, args}], hdlopts) action = %ComplexAction{} @@ -61,17 +67,17 @@ defmodule LoadProcessor.ProcessorHandler do end @impl true - def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlops) do + def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlopts) do throw(:not_implemented) end - defp process_init(%Machine{id: id, ns: ns} = _machine, args) do + defp process_init(%Machine{id: id, ns: ns} = _machine, args, hdlopts) do Logger.debug("Starting '#{id}' of '#{ns}' with arguments: #{inspect(args)}") change = %MachineStateChange{} |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", :counter => 0}) - |> put_events([:counter_created]) + |> put_events([:counter_created], hdlopts) action = %ComplexAction{} @@ -89,7 +95,7 @@ defmodule LoadProcessor.ProcessorHandler do |> Enum.random() end - defp process_timeout(%Machine{id: id, ns: ns} = machine) do + defp process_timeout(%Machine{id: id, ns: ns} = machine, hdlopts) do Logger.debug("Timeouting machine #{id} of #{ns}") aux_state = get_aux_state(machine) @@ -98,7 +104,7 @@ defmodule LoadProcessor.ProcessorHandler do change = %MachineStateChange{} |> put_aux_state(aux_state) - |> put_events([:counter_stopped]) + |> put_events([:counter_stopped], hdlopts) {:ok, %SignalResult{change: change, action: %ComplexAction{}}} @@ -109,7 +115,7 @@ defmodule LoadProcessor.ProcessorHandler do change = %MachineStateChange{} |> put_aux_state(aux_state) - |> put_events([{:counter_incremented, 1}]) + |> put_events([{:counter_incremented, 1}], hdlopts) action = %ComplexAction{} @@ -121,19 +127,19 @@ defmodule LoadProcessor.ProcessorHandler do change = %MachineStateChange{} |> put_aux_state(aux_state) - |> put_events([:counter_stopped]) + |> put_events([:counter_stopped], hdlopts) {:ok, %SignalResult{change: change, action: %ComplexAction{}}} end end - defp process_notification(%Machine{id: id, ns: ns} = machine, args) do + defp process_notification(%Machine{id: id, ns: ns} = machine, args, hdlopts) do Logger.debug("Notifying machine #{id} of #{ns}") change = %MachineStateChange{} |> put_aux_state(Map.put(get_aux_state(machine), :notified, true)) - |> put_events([{:counter_notified, args}]) + |> put_events([{:counter_notified, args}], hdlopts) action = %ComplexAction{} @@ -159,7 +165,11 @@ defmodule LoadProcessor.ProcessorHandler do %MachineStateChange{change | aux_state: Utils.unmarshal(:aux_state, data)} end - defp put_events(change, events) do + defp put_events(change, _events, %{less_events: true}) do + change + end + + defp put_events(change, events, _hdlopts) do wrapped_events = events |> Enum.map(&Utils.unmarshal(:content, &1)) diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml index 0022986d..2e0ae5bf 100644 --- a/example/machinegun/config.yaml +++ b/example/machinegun/config.yaml @@ -66,8 +66,7 @@ logging: 'debug': 'DEBUG' namespaces: - load-test: - # suicide_probability: 0.1 + simple-counter: &default_namespace_params retries: storage: type: exponential @@ -98,7 +97,7 @@ namespaces: kafka: type: kafka client: default_kafka_client - topic: load-test + topic: simple-counter default_processing_timeout: 30s timer_processing_timeout: 60s reschedule_timeout: 60s @@ -106,7 +105,7 @@ namespaces: shutdown_timeout: 5s unload_timeout: 1m processor: - url: http://load-processor:8022/v1/stateproc + url: http://load-processor:8022/v1/stateproc/simple-counter pool_size: 50 http_keep_alive_timeout: 10s overseer: @@ -129,7 +128,25 @@ namespaces: modernizer: current_format_version: 1 handler: - url: http://load-test:8022/v1/modernizer + url: http://load-processor:8022/v1/modernizer/simple-counter + pool_size: 50 + http_keep_alive_timeout: 10s + + eventful-counter: + <<: *default_namespace_params + event_sinks: + kafka: + type: kafka + client: default_kafka_client + topic: eventful-counter + processor: + url: http://load-processor:8022/v1/stateproc/eventful-counter + pool_size: 50 + http_keep_alive_timeout: 10s + modernizer: + current_format_version: 1 + handler: + url: http://load-processor:8022/v1/modernizer/eventful-counter pool_size: 50 http_keep_alive_timeout: 10s From 2b8fc63b4581a281a6a7c594a1bf9a0d3578c882 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 5 Jul 2024 12:31:45 +0300 Subject: [PATCH 25/31] Fixes events --- apps/mg_core/src/mg_core_events_machine.erl | 22 +++++++++++++-------- apps/mg_core/src/mg_core_otel.erl | 12 +++++------ example/machinegun/config.yaml | 22 +++++++++++++++------ 3 files changed, 36 insertions(+), 20 deletions(-) diff --git a/apps/mg_core/src/mg_core_events_machine.erl b/apps/mg_core/src/mg_core_events_machine.erl index 2b44d1d1..251177cd 100644 --- a/apps/mg_core/src/mg_core_events_machine.erl +++ b/apps/mg_core/src/mg_core_events_machine.erl @@ -408,14 +408,20 @@ maybe_stash_events(#{event_stash_size := Max}, State = #{events := EventStash}, false -> {State#{events => Events}, [], NumEvents, erlang:length(NewEvents), 0} end, - ok = mg_core_otel:add_event( - <<"events stash updated">>, - #{ - <<"mg.machine.event_stash.size">> => StashCount, - <<"mg.machine.event_stash.added">> => AddedCount, - <<"mg.machine.event_stash.unstashed">> => UnstashedCount - } - ), + ok = + case AddedCount of + 0 -> + ok; + _ -> + mg_core_otel:add_event( + <<"events stash updated">>, + #{ + <<"mg.machine.event_stash.size">> => StashCount, + <<"mg.machine.event_stash.added">> => AddedCount, + <<"mg.machine.event_stash.unstashed">> => UnstashedCount + } + ) + end, {State1, Events1}. -spec retry_store_events(options(), id(), deadline(), [event()]) -> ok. diff --git a/apps/mg_core/src/mg_core_otel.erl b/apps/mg_core/src/mg_core_otel.erl index 07e4a24c..0e1f1430 100644 --- a/apps/mg_core/src/mg_core_otel.erl +++ b/apps/mg_core/src/mg_core_otel.erl @@ -135,14 +135,14 @@ machine_tags(Namespace, ID, OtherTags) -> -spec event_range_to_attributes(mg_core_events:events_range()) -> map(). event_range_to_attributes(undefined) -> #{}; -event_range_to_attributes({After, Limit, Direction}) -> +event_range_to_attributes({UpperBoundary, LowerBoundary, Direction}) -> #{ - "mg.machine.event_range.after" => After, - "mg.machine.event_range.limit" => Limit, - "mg.machine.event_range.direction" => + <<"mg.machine.event_range.upper_boundary">> => UpperBoundary, + <<"mg.machine.event_range.lower_boundary">> => LowerBoundary, + <<"mg.machine.event_range.direction">> => case Direction of - +1 -> forward; - -1 -> backward + +1 -> <<"forward">>; + -1 -> <<"backward">> end }. diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml index 2e0ae5bf..1fc736e6 100644 --- a/example/machinegun/config.yaml +++ b/example/machinegun/config.yaml @@ -66,8 +66,8 @@ logging: 'debug': 'DEBUG' namespaces: - simple-counter: &default_namespace_params - retries: + simple-counter: + retries: &retries storage: type: exponential max_retries: infinity @@ -108,16 +108,16 @@ namespaces: url: http://load-processor:8022/v1/stateproc/simple-counter pool_size: 50 http_keep_alive_timeout: 10s - overseer: + overseer: &overseer capacity: 1000 min_scan_delay: 1s scan_interval: 1m - timers: + timers: &timers scan_interval: 1m scan_limit: 1000 capacity: 1000 min_scan_delay: 10s - notification: + notification: ¬ification capacity: 1000 scan_interval: 1m min_scan_delay: 1s @@ -133,16 +133,26 @@ namespaces: http_keep_alive_timeout: 10s eventful-counter: - <<: *default_namespace_params + retries: *retries event_sinks: kafka: type: kafka client: default_kafka_client topic: eventful-counter + default_processing_timeout: 30s + timer_processing_timeout: 60s + reschedule_timeout: 60s + hibernate_timeout: 5s + shutdown_timeout: 5s + unload_timeout: 1m processor: url: http://load-processor:8022/v1/stateproc/eventful-counter pool_size: 50 http_keep_alive_timeout: 10s + overseer: *overseer + timers: *timers + notification: *notification + event_stash_size: 5 modernizer: current_format_version: 1 handler: From b88ff6404be548b8cece398314aacfb10697fc13 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 5 Jul 2024 13:27:47 +0300 Subject: [PATCH 26/31] Fixes `example/Dockerfile` --- example/Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/example/Dockerfile b/example/Dockerfile index 72eb77d3..31bab9fc 100644 --- a/example/Dockerfile +++ b/example/Dockerfile @@ -12,7 +12,10 @@ RUN wget -q -O- "https://github.com/valitydev/woorl/releases/download/1.8/woorl- ENV CHARSET=UTF-8 ENV LANG=C.UTF-8 -RUN apt update && apt install -y inotify-tools && mix local.hex --force && mix local.rebar --force +RUN apt-get update && \ + apt-get install -y inotify-tools && \ + mix local.hex --force && \ + mix local.rebar --force # Set runtime CMD ["/bin/bash"] From 7caa1b1ab9c799a5989ea5bb6e363c6343d39af0 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 5 Jul 2024 13:33:16 +0300 Subject: [PATCH 27/31] Fixes dockerfile inotify version --- example/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/Dockerfile b/example/Dockerfile index 31bab9fc..a4c1ba5f 100644 --- a/example/Dockerfile +++ b/example/Dockerfile @@ -13,7 +13,7 @@ ENV CHARSET=UTF-8 ENV LANG=C.UTF-8 RUN apt-get update && \ - apt-get install -y inotify-tools && \ + apt-get install -y inotify-tools=3.22.6.0-4 && \ mix local.hex --force && \ mix local.rebar --force From 5274e7f629d80ed6aee73def29cd4d485f360849 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Fri, 5 Jul 2024 13:37:19 +0300 Subject: [PATCH 28/31] Patches dockerfile according hadoling results --- example/Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/example/Dockerfile b/example/Dockerfile index a4c1ba5f..a58a7166 100644 --- a/example/Dockerfile +++ b/example/Dockerfile @@ -13,9 +13,11 @@ ENV CHARSET=UTF-8 ENV LANG=C.UTF-8 RUN apt-get update && \ - apt-get install -y inotify-tools=3.22.6.0-4 && \ + apt-get install -y inotify-tools=3.22.6.0-4 --no-install-recommends && \ mix local.hex --force && \ - mix local.rebar --force + mix local.rebar --force && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* # Set runtime CMD ["/bin/bash"] From 72464db7778999db144923692f99dea0f9c718d5 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Mon, 15 Jul 2024 12:50:42 +0300 Subject: [PATCH 29/31] Fixes dup headers inclusion --- apps/mg_core/src/mg_core_event_sink.erl | 3 --- 1 file changed, 3 deletions(-) diff --git a/apps/mg_core/src/mg_core_event_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl index 30a4d34c..52d63015 100644 --- a/apps/mg_core/src/mg_core_event_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -19,9 +19,6 @@ -include_lib("opentelemetry_api/include/otel_tracer.hrl"). -include_lib("opentelemetry_api/include/opentelemetry.hrl"). --include_lib("opentelemetry_api/include/otel_tracer.hrl"). --include_lib("opentelemetry_api/include/opentelemetry.hrl"). - -export([add_events/6]). -callback add_events( From cf54d8183bdd9f63482b0927018dd5aac8c103bf Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 4 Feb 2025 15:46:36 +0300 Subject: [PATCH 30/31] Bumps CI --- .github/workflows/erlang-checks.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/erlang-checks.yml b/.github/workflows/erlang-checks.yml index ee5df04f..585aa7ef 100644 --- a/.github/workflows/erlang-checks.yml +++ b/.github/workflows/erlang-checks.yml @@ -17,7 +17,7 @@ jobs: thrift-version: ${{ steps.thrift-version.outputs.version }} steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - run: grep -v '^#' .env >> $GITHUB_ENV - id: otp-version run: echo "::set-output name=version::$OTP_VERSION" @@ -29,7 +29,7 @@ jobs: run: name: Run checks needs: setup - uses: valitydev/erlang-workflows/.github/workflows/erlang-parallel-build.yml@v1.0.13 + uses: valitydev/erlang-workflows/.github/workflows/erlang-parallel-build.yml@v1.0.17 with: otp-version: ${{ needs.setup.outputs.otp-version }} rebar-version: ${{ needs.setup.outputs.rebar-version }} From 7bfc65125baeedc41cb1ed8f22cdf5f1c0751d33 Mon Sep 17 00:00:00 2001 From: Aleksey Kashapov Date: Tue, 4 Feb 2025 15:47:25 +0300 Subject: [PATCH 31/31] Fixes `mg_utils` missing rename --- apps/mg_core/src/mg_core_event_sink.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/mg_core/src/mg_core_event_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl index 4b1c6e1a..52d63015 100644 --- a/apps/mg_core/src/mg_core_event_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -51,7 +51,7 @@ add_events(_Handler, _NS, _ID, [], _ReqCtx, _Deadline) -> ok; add_events(Handler, NS, ID, Events, ReqCtx, Deadline) -> - {Mod, _} = mg_core_utils:separate_mod_opts(Handler), + {Mod, _} = mg_utils:separate_mod_opts(Handler), SpanOpts = #{ kind => ?SPAN_KIND_PRODUCER, attributes => mg_core_otel:machine_tags(NS, ID, #{