diff --git a/.dockerignore b/.dockerignore index 17c5a18e..c5e20fec 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,3 +5,5 @@ /.idea/ erl_crash.dump rebar3.crashdump + +/example/ \ No newline at end of file diff --git a/.github/workflows/erlang-checks.yml b/.github/workflows/erlang-checks.yml index ee5df04f..585aa7ef 100644 --- a/.github/workflows/erlang-checks.yml +++ b/.github/workflows/erlang-checks.yml @@ -17,7 +17,7 @@ jobs: thrift-version: ${{ steps.thrift-version.outputs.version }} steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - run: grep -v '^#' .env >> $GITHUB_ENV - id: otp-version run: echo "::set-output name=version::$OTP_VERSION" @@ -29,7 +29,7 @@ jobs: run: name: Run checks needs: setup - uses: valitydev/erlang-workflows/.github/workflows/erlang-parallel-build.yml@v1.0.13 + uses: valitydev/erlang-workflows/.github/workflows/erlang-parallel-build.yml@v1.0.17 with: otp-version: ${{ needs.setup.outputs.otp-version }} rebar-version: ${{ needs.setup.outputs.rebar-version }} diff --git a/Makefile b/Makefile index 622758d7..9a150cd1 100644 --- a/Makefile +++ b/Makefile @@ -107,9 +107,4 @@ cover-report: $(REBAR) cover test-configurator: - $(MAKE) $(FILE_PERMISSIONS) ERL_LIBS=_build/default/lib ./rel_scripts/configurator.escript config/config.yaml config - -FILE_PERMISSIONS = $(patsubst %,%.target,$(wildcard config/*._perms)) -$(FILE_PERMISSIONS): config/%._perms.target: config/%._perms - chmod $$(cat $^) config/$* diff --git a/README.md b/README.md index 5a3f7314..4001eea3 100644 --- a/README.md +++ b/README.md @@ -128,6 +128,13 @@ _НС_ — это любое отклонение от основного биз !!! attention "Todo" +## EventSink + +Основная его задача — сохранение сплошного потока эвенотов, для возможности синхронизации баз. Эвенты должны быть total ordered, и должна быть цепочка хэшей для контроля целостности. +Находится отдельно от машин, и может быть подписан на произвольные namespace'ы. Тоже является машиной в отдельном нэймспейсе (чтобы это работало нормально нужно сделать [оптимизации протокола](https://github.com/rbkmoney/damsel/pull/38) и возможно отдельный бэкенд для бд). +Через настройки описываются подписки event_sink'ов на namespace'ы (точнее на машины). +У машины появляется промежуточный стейт для слива в синк. + ## OpenTelemetry В МГ добавлена поддержка трассировки сигналов автомата и передача соответствующего контекста в http заголовках при работе woody rpc. diff --git a/apps/gen_squad/include/gen_squad_cth.hrl b/apps/gen_squad/include/gen_squad_cth.hrl new file mode 100644 index 00000000..1bd9a33f --- /dev/null +++ b/apps/gen_squad/include/gen_squad_cth.hrl @@ -0,0 +1,56 @@ +-ifndef(__gen_squad_cth__). +-define(__gen_squad_cth__, 42). + +-define(flushMailbox(__Acc0), + (fun __Flush(__Acc) -> + receive + __M -> __Flush([__M | __Acc]) + after 0 -> __Acc + end + end)( + __Acc0 + ) +). + +-define(assertReceive(__Expr), + ?assertReceive(__Expr, 1000) +). + +-define(assertReceive(__Expr, __Timeout), + (fun() -> + receive + (__Expr) = __V -> __V + after __Timeout -> + erlang:error( + {assertReceive, [ + {module, ?MODULE}, + {line, ?LINE}, + {expression, (??__Expr)}, + {mailbox, ?flushMailbox([])} + ]} + ) + end + end)() +). + +-define(assertNoReceive(), + ?assertNoReceive(1000) +). + +-define(assertNoReceive(__Timeout), + (fun() -> + receive + __Message -> + erlang:error( + {assertNoReceive, [ + {module, ?MODULE}, + {line, ?LINE}, + {mailbox, ?flushMailbox([__Message])} + ]} + ) + after __Timeout -> ok + end + end)() +). + +-endif. diff --git a/apps/gen_squad/rebar.config b/apps/gen_squad/rebar.config new file mode 100644 index 00000000..e69de29b diff --git a/apps/gen_squad/src/gen_squad.app.src b/apps/gen_squad/src/gen_squad.app.src new file mode 100644 index 00000000..18937c0c --- /dev/null +++ b/apps/gen_squad/src/gen_squad.app.src @@ -0,0 +1,15 @@ +{application, gen_squad, [ + {description, "Generic squad behaviour"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_gen_squad.erl b/apps/gen_squad/src/gen_squad.erl similarity index 96% rename from apps/mg_core/src/mg_core_gen_squad.erl rename to apps/gen_squad/src/gen_squad.erl index ece672ac..e1782ed4 100644 --- a/apps/mg_core/src/mg_core_gen_squad.erl +++ b/apps/gen_squad/src/gen_squad.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ %%% - Do we even need monitors? %%% - More tests %%% --module(mg_core_gen_squad). +-module(gen_squad). %% @@ -134,19 +134,24 @@ discovery => discovery_opts(), heartbeat => heartbeat_opts(), promotion => promotion_opts(), - pulse => mg_core_gen_squad_pulse:handler() + pulse => gen_squad_pulse:handler() }. -export_type([opts/0]). -export_type([heartbeat_opts/0]). +-type gen_reg_name() :: + {local, atom()} + | {global, term()} + | {via, module(), term()}. + %% -spec start_link(module(), _Args, opts()) -> {ok, pid()} | ignore | {error, _}. start_link(Module, Args, Opts) -> gen_server:start_link(?MODULE, mk_state(Module, Args, set_defaults(Opts)), []). --spec start_link(mg_core_procreg:reg_name(), module(), _Args, opts()) -> +-spec start_link(gen_reg_name(), module(), _Args, opts()) -> {ok, pid()} | ignore | {error, _}. start_link(RegName, Module, Args, Opts) -> gen_server:start_link(RegName, ?MODULE, mk_state(Module, Args, set_defaults(Opts)), []). @@ -236,7 +241,7 @@ init(St0) -> {ok, St = #st{squad = Squad0, opts = Opts}} -> Squad = add_member(self(), Squad0, Opts), HeartOpts = maps:with([heartbeat, pulse], Opts), - {ok, HeartPid} = mg_core_gen_squad_heart:start_link(heartbeat, HeartOpts), + {ok, HeartPid} = gen_squad_heart:start_link(heartbeat, HeartOpts), {ok, defer_discovery(St#st{heart = HeartPid, squad = Squad})}; Ret -> Ret @@ -247,7 +252,7 @@ handle_call(Call, From, St = #st{squad = Squad}) -> invoke_callback(handle_call, [Call, From, get_rank(St), Squad], try_cancel_st_timer(user, St)). -type cast() :: - mg_core_gen_squad_heart:envelope() + gen_squad_heart:envelope() | heartbeat. -spec handle_cast(cast(), st()) -> noreply(st()). @@ -259,7 +264,7 @@ handle_cast(heartbeat, St) -> handle_cast(Cast, St = #st{squad = Squad}) -> invoke_callback(handle_cast, [Cast, get_rank(St), Squad], try_cancel_st_timer(user, St)). --spec handle_broadcast(mg_core_gen_squad_heart:payload(), st()) -> noreply(st()). +-spec handle_broadcast(gen_squad_heart:payload(), st()) -> noreply(st()). handle_broadcast( #{msg := howdy, from := Pid, members := Pids}, St = #st{squad = Squad0, opts = Opts} @@ -360,7 +365,7 @@ try_update_squad(Squad, St0 = #st{heart = HeartPid, opts = Opts}) -> ok = case has_squad_changed(Squad, St0) of {true, Members} -> - mg_core_gen_squad_heart:update_members(Members, HeartPid); + gen_squad_heart:update_members(Members, HeartPid); false -> ok end, @@ -486,13 +491,13 @@ account_heartbeat(Member) -> -type recepient_filter() :: fun((pid()) -> boolean()). --spec broadcast(mg_core_gen_squad_heart:message(), recepient_filter(), squad(), _Ctx, opts()) -> ok. +-spec broadcast(gen_squad_heart:message(), recepient_filter(), squad(), _Ctx, opts()) -> ok. broadcast(Message, RecepientFilter, Squad, Ctx, Opts) -> Self = self(), Members = members(maps:remove(Self, Squad)), Recepients = lists:filter(RecepientFilter, Members), Pulse = maps:get(pulse, Opts, undefined), - mg_core_gen_squad_heart:broadcast(Message, Self, Members, Recepients, Ctx, Pulse). + gen_squad_heart:broadcast(Message, Self, Members, Recepients, Ctx, Pulse). -spec newbies(squad()) -> recepient_filter(). newbies(Squad) -> @@ -581,10 +586,10 @@ cancel_monitor(MRef, Opts) -> %% --spec beat(mg_core_gen_squad_pulse:beat(), st() | opts()) -> _. +-spec beat(gen_squad_pulse:beat(), st() | opts()) -> _. beat(Beat, #st{opts = Opts}) -> beat(Beat, Opts); beat(Beat, #{pulse := Handler}) -> - mg_core_gen_squad_pulse:handle_beat(Handler, Beat); + gen_squad_pulse:handle_beat(Handler, Beat); beat(_Beat, _St) -> ok. diff --git a/apps/mg_core/src/mg_core_gen_squad_heart.erl b/apps/gen_squad/src/gen_squad_heart.erl similarity index 93% rename from apps/mg_core/src/mg_core_gen_squad_heart.erl rename to apps/gen_squad/src/gen_squad_heart.erl index bc3f5f85..43005bc2 100644 --- a/apps/mg_core/src/mg_core_gen_squad_heart.erl +++ b/apps/gen_squad/src/gen_squad_heart.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_heart). +-module(gen_squad_heart). -export([start_link/2]). -export([update_members/2]). @@ -43,9 +43,9 @@ -type envelope() :: {'$squad', payload()}. --type pulse() :: mg_core_gen_squad_pulse:handler(). +-type pulse() :: gen_squad_pulse:handler(). -type opts() :: #{ - heartbeat => mg_core_gen_squad:heartbeat_opts(), + heartbeat => gen_squad:heartbeat_opts(), pulse => pulse() }. @@ -165,12 +165,12 @@ monitor_self(St = #st{self = Self}) -> %% --spec beat(mg_core_gen_squad_pulse:beat(), st() | opts() | pulse() | undefined) -> _. +-spec beat(gen_squad_pulse:beat(), st() | opts() | pulse() | undefined) -> _. beat(Beat, #st{opts = Opts}) -> beat(Beat, Opts); beat(Beat, Opts = #{}) -> beat(Beat, maps:get(pulse, Opts, undefined)); beat(Beat, Handler) when Handler /= undefined -> - mg_core_gen_squad_pulse:handle_beat(Handler, Beat); + gen_squad_pulse:handle_beat(Handler, Beat); beat(_Beat, undefined) -> ok. diff --git a/apps/mg_core/src/mg_core_gen_squad_pulse.erl b/apps/gen_squad/src/gen_squad_pulse.erl similarity index 63% rename from apps/mg_core/src/mg_core_gen_squad_pulse.erl rename to apps/gen_squad/src/gen_squad_pulse.erl index 00566d87..736fe9d0 100644 --- a/apps/mg_core/src/mg_core_gen_squad_pulse.erl +++ b/apps/gen_squad/src/gen_squad_pulse.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,21 +14,21 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_pulse). +-module(gen_squad_pulse). -callback handle_beat(_Options, beat()) -> _. %% TODO remove weak circular deps -type beat() :: - {rank, {changed, mg_core_gen_squad:rank()}} + {rank, {changed, gen_squad:rank()}} | { {member, pid()}, added - | {refreshed, mg_core_gen_squad:member()} - | {removed, mg_core_gen_squad:member(), _Reason :: lost | {down, _}} + | {refreshed, gen_squad:member()} + | {removed, gen_squad:member(), _Reason :: lost | {down, _}} } | { - {broadcast, mg_core_gen_squad_heart:payload()}, + {broadcast, gen_squad_heart:payload()}, {sent, [pid()], _Ctx} | received } @@ -46,7 +46,10 @@ } | {unexpected, {{call, _From} | cast | info, _Payload}}. --type handler() :: mg_core_utils:mod_opts(). +-type mod_opts() :: mod_opts(term()). +-type mod_opts(Options) :: {module(), Options} | module(). + +-type handler() :: mod_opts(). -export_type([beat/0]). -export_type([handler/0]). @@ -57,5 +60,17 @@ -spec handle_beat(handler(), any()) -> _. handle_beat(Handler, Beat) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(Handler), + {Mod, Options} = separate_mod_opts(Handler), Mod:handle_beat(Options, Beat). + +%% + +-spec separate_mod_opts(mod_opts()) -> {module(), _Arg}. +separate_mod_opts(ModOpts) -> + separate_mod_opts(ModOpts, undefined). + +-spec separate_mod_opts(mod_opts(Defaults), Defaults) -> {module(), Defaults}. +separate_mod_opts(ModOpts = {_, _}, _) -> + ModOpts; +separate_mod_opts(Mod, Default) -> + {Mod, Default}. diff --git a/apps/mg_core/test/mg_core_gen_squad_SUITE.erl b/apps/gen_squad/test/gen_squad_SUITE.erl similarity index 87% rename from apps/mg_core/test/mg_core_gen_squad_SUITE.erl rename to apps/gen_squad/test/gen_squad_SUITE.erl index 07e05712..f0b90150 100644 --- a/apps/mg_core/test/mg_core_gen_squad_SUITE.erl +++ b/apps/gen_squad/test/gen_squad_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ %%% limitations under the License. %%% --module(mg_core_gen_squad_SUITE). --include_lib("common_test/include/ct.hrl"). +-module(gen_squad_SUITE). + -include_lib("stdlib/include/assert.hrl"). --include_lib("mg_cth/include/mg_cth.hrl"). +-include("gen_squad_cth.hrl"). %% tests descriptions -export([all/0]). @@ -29,7 +29,7 @@ -export([squad_shrinks_consistently/1]). %% squad behaviour --behaviour(mg_core_gen_squad). +-behaviour(gen_squad). -export([init/1]). -export([discover/1]). -export([handle_rank_change/3]). @@ -37,7 +37,7 @@ -export([handle_cast/4]). -export([handle_info/4]). --behaviour(mg_core_gen_squad_pulse). +-behaviour(gen_squad_pulse). -export([handle_beat/2]). %% tests descriptions @@ -56,12 +56,11 @@ all() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> - Apps = mg_cth:start_applications([mg_core]), - [{apps, Apps} | C]. + C. -spec end_per_suite(config()) -> ok. -end_per_suite(C) -> - mg_cth:stop_applications(?config(apps, C)). +end_per_suite(_C) -> + ok. %% @@ -122,9 +121,9 @@ squad_shrinks_consistently(_) -> _ = ?assertEqual([LeaderLast], lists:filter(fun erlang:is_process_alive/1, Members)), ok. --spec start_member(mg_core_gen_squad:opts()) -> pid(). +-spec start_member(gen_squad:opts()) -> pid(). start_member(Opts) -> - {ok, Pid} = mg_core_gen_squad:start_link(?MODULE, #{runner => self(), known => []}, Opts), + {ok, Pid} = gen_squad:start_link(?MODULE, #{runner => self(), known => []}, Opts), Pid. -spec neighbours([T]) -> [{T, T}]. @@ -134,8 +133,8 @@ neighbours([]) -> []. %% --type rank() :: mg_core_gen_squad:rank(). --type squad() :: mg_core_gen_squad:squad(). +-type rank() :: gen_squad:rank(). +-type squad() :: gen_squad:squad(). -type st() :: #{ runner := pid(), @@ -152,7 +151,7 @@ discover(St = #{known := Known}) -> -spec handle_rank_change(rank(), squad(), st()) -> {noreply, st()}. handle_rank_change(Rank, Squad, St = #{runner := Runner}) -> - _ = Runner ! {self(), Rank, mg_core_gen_squad:members(Squad)}, + _ = Runner ! {self(), Rank, gen_squad:members(Squad)}, case Rank of leader -> {noreply, St, 200}; follower -> {noreply, St} @@ -164,7 +163,7 @@ handle_rank_change(_Rank, _Squad, St) -> -spec handle_call(call(), _From, rank(), squad(), st()) -> {noreply, st()} | {reply, _, st()}. handle_call(report, _From, Rank, Squad, St) -> - {reply, {self(), Rank, mg_core_gen_squad:members(Squad)}, St}; + {reply, {self(), Rank, gen_squad:members(Squad)}, St}; handle_call(Call, From, _Rank, _Squad, _St) -> erlang:error({unexpected, {call, Call, From}}). @@ -184,7 +183,7 @@ handle_info(timeout, leader, _Squad, St) -> handle_info(Info, _Rank, _Squad, _St) -> erlang:error({unexpected, {info, Info}}). --spec handle_beat(_, mg_core_gen_squad_pulse:beat()) -> _. +-spec handle_beat(_, gen_squad_pulse:beat()) -> _. handle_beat(_Start, {{timer, _}, _}) -> ok; handle_beat(_Start, {{monitor, _}, _}) -> diff --git a/apps/machinegun/src/machinegun.app.src b/apps/machinegun/src/machinegun.app.src index 19f7a2d8..8f236ffb 100644 --- a/apps/machinegun/src/machinegun.app.src +++ b/apps/machinegun/src/machinegun.app.src @@ -26,11 +26,15 @@ erl_health, prometheus, prometheus_cowboy, + mg_utils, mg_core, + mg_riak, + mg_es_kafka, mg_woody, opentelemetry_api, opentelemetry_exporter, - opentelemetry + opentelemetry, + mg_conf ]}, {env, []}, {modules, []}, diff --git a/apps/machinegun/src/machinegun.erl b/apps/machinegun/src/machinegun.erl index 0d2308b1..74689d5a 100644 --- a/apps/machinegun/src/machinegun.erl +++ b/apps/machinegun/src/machinegun.erl @@ -22,12 +22,8 @@ stop() -> start(_StartType, _StartArgs) -> Config = maps:from_list(genlib_app:env(?MODULE)), ok = setup_metrics(), - ChildSpecs = mg_configurator:construct_child_specs(Config), - genlib_adhoc_supervisor:start_link( - {local, ?MODULE}, - #{strategy => rest_for_one}, - ChildSpecs - ). + ChildSpecs = mg_conf:construct_child_specs(Config, additional_routes(Config)), + genlib_adhoc_supervisor:start_link({local, ?MODULE}, #{strategy => rest_for_one}, ChildSpecs). -spec stop(any()) -> ok. stop(_State) -> @@ -40,4 +36,38 @@ setup_metrics() -> ok = woody_ranch_prometheus_collector:setup(), ok = woody_hackney_prometheus_collector:setup(), ok = mg_pulse_prometheus:setup(), - ok = mg_riak_prometheus:setup(). + ok = mg_riak_pulse_prometheus:setup(), + ok = mg_riak_prometheus:setup(), + ok = mg_event_sink_kafka_prometheus_pulse:setup(). + +%% TODO Maybe move those to `mg_conf'. + +-spec additional_routes(mg_conf:config()) -> [woody_server_thrift_http_handler:route(any())]. +additional_routes(Config) -> + HealthChecks = maps:get(health_check, Config, #{}), + [ + get_startup_route(), + get_health_route(HealthChecks), + get_prometheus_route() + ]. + +-spec get_startup_route() -> {iodata(), module(), _Opts :: any()}. +get_startup_route() -> + EvHandler = {erl_health_event_handler, []}, + Check = #{ + startup => #{ + runner => {mg_health_check, startup, []}, + event_handler => EvHandler + } + }, + erl_health_handle:get_startup_route(Check). + +-spec get_health_route(erl_health:check()) -> {iodata(), module(), _Opts :: any()}. +get_health_route(Check0) -> + EvHandler = {erl_health_event_handler, []}, + Check = maps:map(fun(_, V = {_, _, _}) -> #{runner => V, event_handler => EvHandler} end, Check0), + erl_health_handle:get_route(Check). + +-spec get_prometheus_route() -> {iodata(), module(), _Opts :: any()}. +get_prometheus_route() -> + {"/metrics/[:registry]", prometheus_cowboy2_handler, []}. diff --git a/apps/machinegun/src/mg_configuration_utils.erl b/apps/machinegun/src/mg_configuration_utils.erl index 91bc4889..b6d8323b 100644 --- a/apps/machinegun/src/mg_configuration_utils.erl +++ b/apps/machinegun/src/mg_configuration_utils.erl @@ -327,7 +327,7 @@ proplist(Config) -> -spec ip(yaml_string()) -> inet:ip_address(). ip(Host) -> - mg_core_utils:throw_if_error(inet:parse_address(string(Host))). + mg_utils:throw_if_error(inet:parse_address(string(Host))). -spec atom(yaml_string()) -> atom(). atom(AtomStr) -> diff --git a/apps/machinegun/src/mg_pulse.erl b/apps/machinegun/src/mg_pulse.erl index 4ad13f62..1120b506 100644 --- a/apps/machinegun/src/mg_pulse.erl +++ b/apps/machinegun/src/mg_pulse.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -17,18 +17,21 @@ -module(mg_pulse). -include_lib("mg_woody/include/pulse.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). %% pulse types -type beat() :: - mg_core_pulse:beat() - | mg_core_queue_scanner:beat() + mpulse:beat() + | mg_riak_pulse:beat() + | mg_skd_scanner:beat() | #woody_event{} - | #woody_request_handle_error{}. + | #woody_request_handle_error{} + | #mg_event_sink_kafka_sent{}. -type options() :: #{ woody_event_handler_options => woody_event_handler:options(), @@ -46,8 +49,11 @@ handle_beat(Options, Beat) -> ok = mg_woody_pulse_otel:handle_beat(Options, Beat), ok = mg_core_pulse_otel:handle_beat(Options, Beat), + ok = mg_riak_pulse:handle_beat(Options, Beat), ok = mg_pulse_log:handle_beat(maps:get(woody_event_handler_options, Options, #{}), Beat), ok = mg_pulse_prometheus:handle_beat(#{}, Beat), + ok = mg_riak_pulse_prometheus:handle_beat(#{}, Beat), + ok = mg_event_sink_kafka_prometheus_pulse:handle_beat(#{}, Beat), ok = maybe_handle_lifecycle_kafka(Options, Beat). %% diff --git a/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl b/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl index a35f5812..c0be99db 100644 --- a/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl +++ b/apps/machinegun/src/mg_pulse_lifecycle_kafka.erl @@ -18,8 +18,8 @@ -include_lib("mg_core/include/pulse.hrl"). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -type options() :: #{ topic := brod:topic(), diff --git a/apps/machinegun/src/mg_pulse_log.erl b/apps/machinegun/src/mg_pulse_log.erl index c4118eb6..d32a7508 100644 --- a/apps/machinegun/src/mg_pulse_log.erl +++ b/apps/machinegun/src/mg_pulse_log.erl @@ -16,11 +16,12 @@ -module(mg_pulse_log). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). -include_lib("mg_woody/include/pulse.hrl"). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -export([handle_beat/2]). @@ -70,14 +71,14 @@ format_beat(#woody_event{event = Event, rpc_id = RPCID, event_meta = EventMeta}, WoodyMeta = woody_event_handler:format_meta(Event, EventMeta, WoodyMetaFields), Meta = lists:flatten([extract_woody_meta(WoodyMeta), extract_meta(rpc_id, RPCID)]), {Level, Msg, Meta}; -format_beat(#mg_core_scheduler_task_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_task_error, Beat), +format_beat(#mg_skd_task_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_task_error, Beat), {warning, {"scheduler task ~p failed ~p", [Name, Reason]}, Context}; -format_beat(#mg_core_scheduler_task_add_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_task_add_error, Beat), +format_beat(#mg_skd_task_add_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_task_add_error, Beat), {warning, {"scheduler task ~p add failed ~p", [Name, Reason]}, Context}; -format_beat(#mg_core_scheduler_search_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> - Context = ?BEAT_TO_META(mg_core_scheduler_search_error, Beat), +format_beat(#mg_skd_search_error{scheduler_name = Name, exception = {_, Reason, _}} = Beat, _Options) -> + Context = ?BEAT_TO_META(mg_skd_search_error, Beat), {warning, {"scheduler search ~p failed ~p", [Name, Reason]}, Context}; format_beat(#mg_core_machine_process_transient_error{exception = {_, Reason, _}} = Beat, _Options) -> Context = ?BEAT_TO_META(mg_core_machine_process_transient_error, Beat), @@ -122,7 +123,7 @@ format_beat(_Beat, _Options) -> undefined. %% squad --spec format_squad_beat(mg_core_gen_squad_pulse:beat()) -> log_msg() | undefined. +-spec format_squad_beat(gen_squad_pulse:beat()) -> log_msg() | undefined. format_squad_beat({rank, {changed, Rank}}) -> {info, {"rank changed to: ~p", [Rank]}, [ {mg_pulse_event_id, squad_rank_changed}, diff --git a/apps/machinegun/src/mg_pulse_prometheus.erl b/apps/machinegun/src/mg_pulse_prometheus.erl index 87abb4e0..b090a996 100644 --- a/apps/machinegun/src/mg_pulse_prometheus.erl +++ b/apps/machinegun/src/mg_pulse_prometheus.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ -module(mg_pulse_prometheus). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). --include_lib("mg_woody/include/pulse.hrl"). -export([setup/0]). -export([handle_beat/2]). @@ -34,7 +34,8 @@ -spec handle_beat(options(), beat()) -> ok. handle_beat(_Options, Beat) -> - ok = dispatch_metrics(Beat). + ok = dispatch_metrics(Beat), + ok. %% %% management API @@ -194,67 +195,6 @@ setup() -> {duration_unit, seconds}, {help, "Machinegun storage operation duration."} ]), - % Riak client operations - true = prometheus_counter:declare([ - {name, mg_riak_client_operation_changes_total}, - {registry, registry()}, - {labels, [namespace, name, operation, change]}, - {help, "Total number of Machinegun riak client operations."} - ]), - true = prometheus_histogram:declare([ - {name, mg_riak_client_operation_duration_seconds}, - {registry, registry()}, - {labels, [namespace, name, operation]}, - {buckets, duration_buckets()}, - {duration_unit, seconds}, - {help, "Machinegun riak client operation duration."} - ]), - %% Riak pool events - true = prometheus_counter:declare([ - {name, mg_riak_pool_no_free_connection_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of no free connection errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_queue_limit_reached_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of queue limit reached errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_connect_timeout_errors_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of connect timeout errors in Machinegun riak pool."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_killed_free_connections_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of killed free Machinegun riak pool connections."} - ]), - true = prometheus_counter:declare([ - {name, mg_riak_pool_killed_in_use_connections_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of killed used Machinegun riak pool connections."} - ]), - %% Event sink / kafka - true = prometheus_counter:declare([ - {name, mg_events_sink_produced_total}, - {registry, registry()}, - {labels, [namespace, name]}, - {help, "Total number of Machinegun event sink events."} - ]), - true = prometheus_histogram:declare([ - {name, mg_events_sink_kafka_produced_duration_seconds}, - {registry, registry()}, - {labels, [namespace, name, action]}, - {buckets, duration_buckets()}, - {duration_unit, seconds}, - {help, "Machinegun event sink addition duration."} - ]), ok. %% Internals @@ -311,7 +251,7 @@ dispatch_metrics(#mg_core_timer_process_finished{namespace = NS, queue = Queue, ok = inc(mg_timer_processing_changes_total, [NS, Queue, finished]), ok = observe(mg_timer_processing_duration_seconds, [NS, Queue], Duration); % Scheduler -dispatch_metrics(#mg_core_scheduler_search_success{ +dispatch_metrics(#mg_skd_search_success{ scheduler_name = Name, namespace = NS, delay = DelayMS, @@ -320,25 +260,25 @@ dispatch_metrics(#mg_core_scheduler_search_success{ ok = inc(mg_scheduler_scan_changes_total, [NS, Name, success]), ok = observe(mg_scheduler_scan_delay_seconds, [NS, Name], decode_delay(DelayMS)), ok = observe(mg_scheduler_scan_duration_seconds, [NS, Name], Duration); -dispatch_metrics(#mg_core_scheduler_search_error{scheduler_name = Name, namespace = NS}) -> +dispatch_metrics(#mg_skd_search_error{scheduler_name = Name, namespace = NS}) -> ok = inc(mg_scheduler_scan_changes_total, [NS, Name, error]); -dispatch_metrics(#mg_core_scheduler_task_error{scheduler_name = Name, namespace = NS}) -> +dispatch_metrics(#mg_skd_task_error{scheduler_name = Name, namespace = NS}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, error]); -dispatch_metrics(#mg_core_scheduler_new_tasks{scheduler_name = Name, namespace = NS, new_tasks_count = Count}) -> +dispatch_metrics(#mg_skd_new_tasks{scheduler_name = Name, namespace = NS, new_tasks_count = Count}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, created], Count); -dispatch_metrics(#mg_core_scheduler_task_started{scheduler_name = Name, namespace = NS, task_delay = DelayMS}) -> +dispatch_metrics(#mg_skd_task_started{scheduler_name = Name, namespace = NS, task_delay = DelayMS}) -> ok = inc(mg_scheduler_task_changes_total, [NS, Name, started]), ok = observe(mg_scheduler_task_processing_delay_seconds, [NS, Name], decode_delay(DelayMS)); -dispatch_metrics(#mg_core_scheduler_task_finished{} = Beat) -> - #mg_core_scheduler_task_finished{ +dispatch_metrics(#mg_skd_task_finished{} = Beat) -> + #mg_skd_task_finished{ scheduler_name = Name, namespace = NS, process_duration = Duration } = Beat, ok = inc(mg_scheduler_task_changes_total, [NS, Name, finished]), ok = observe(mg_scheduler_task_processing_duration_seconds, [NS, Name], Duration); -dispatch_metrics(#mg_core_scheduler_quota_reserved{} = Beat) -> - #mg_core_scheduler_quota_reserved{ +dispatch_metrics(#mg_skd_quota_reserved{} = Beat) -> + #mg_skd_quota_reserved{ scheduler_name = Name, namespace = NS, active_tasks = Active, @@ -378,54 +318,6 @@ dispatch_metrics(#mg_core_storage_delete_start{name = {NS, _Caller, Type}}) -> dispatch_metrics(#mg_core_storage_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> ok = inc(mg_storage_operation_changes_total, [NS, Type, delete, finish]), ok = observe(mg_storage_operation_duration_seconds, [NS, Type, delete], Duration); -% Riak client operations -dispatch_metrics(#mg_core_riak_client_get_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, start]); -dispatch_metrics(#mg_core_riak_client_get_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, get], Duration); -dispatch_metrics(#mg_core_riak_client_put_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, start]); -dispatch_metrics(#mg_core_riak_client_put_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, put], Duration); -dispatch_metrics(#mg_core_riak_client_search_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, start]); -dispatch_metrics(#mg_core_riak_client_search_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, search], Duration); -dispatch_metrics(#mg_core_riak_client_delete_start{name = {NS, _Caller, Type}}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, start]); -dispatch_metrics(#mg_core_riak_client_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> - ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, finish]), - ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, delete], Duration); -% Riak pool events -dispatch_metrics(#mg_core_riak_connection_pool_state_reached{ - name = {NS, _Caller, Type}, - state = no_free_connections -}) -> - ok = inc(mg_riak_pool_no_free_connection_errors_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_state_reached{ - name = {NS, _Caller, Type}, - state = queue_limit_reached -}) -> - ok = inc(mg_riak_pool_queue_limit_reached_errors_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = free}) -> - ok = inc(mg_riak_pool_killed_free_connections_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = in_use}) -> - ok = inc(mg_riak_pool_killed_in_use_connections_total, [NS, Type]); -dispatch_metrics(#mg_core_riak_connection_pool_error{name = {NS, _Caller, Type}, reason = connect_timeout}) -> - ok = inc(mg_riak_pool_connect_timeout_errors_total, [NS, Type]); -% Event sink operations -dispatch_metrics(#mg_core_events_sink_kafka_sent{ - name = Name, - namespace = NS, - encode_duration = EncodeDuration, - send_duration = SendDuration -}) -> - ok = inc(mg_events_sink_produced_total, [NS, Name]), - ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), - ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); % Unknown dispatch_metrics(_Beat) -> ok. diff --git a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl index 6cb0587c..ad4354c3 100644 --- a/apps/machinegun/test/mg_prometheus_metric_SUITE.erl +++ b/apps/machinegun/test/mg_prometheus_metric_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -18,8 +18,9 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). -include_lib("mg_core/include/pulse.hrl"). --include_lib("prometheus/include/prometheus_model.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). %% tests descriptions -export([all/0]). @@ -62,22 +63,6 @@ -export([storage_search_finish_test/1]). -export([storage_delete_start_test/1]). -export([storage_delete_finish_test/1]). --export([riak_client_get_start_test/1]). --export([riak_client_get_finish_test/1]). --export([riak_client_put_start_test/1]). --export([riak_client_put_finish_test/1]). --export([riak_client_search_start_test/1]). --export([riak_client_search_finish_test/1]). --export([riak_client_delete_start_test/1]). --export([riak_client_delete_finish_test/1]). --export([riak_pool_no_free_connection_errors_test/1]). --export([riak_pool_queue_limit_reached_errors_test/1]). --export([riak_pool_killed_free_connections_test/1]). --export([riak_pool_killed_in_use_connections_test/1]). --export([riak_pool_connect_timeout_errors_test/1]). --export([events_sink_kafka_sent_test/1]). - --export([riak_pool_collector_test/1]). -define(NS, <<"NS">>). @@ -91,8 +76,7 @@ -spec all() -> [test_name() | {group, group_name()}]. all() -> [ - {group, beats}, - {group, collectors} + {group, beats} ]. -spec groups() -> [{group_name(), list(_), [test_name()]}]. @@ -131,24 +115,7 @@ groups() -> storage_search_start_test, storage_search_finish_test, storage_delete_start_test, - storage_delete_finish_test, - riak_client_get_start_test, - riak_client_get_finish_test, - riak_client_put_start_test, - riak_client_put_finish_test, - riak_client_search_start_test, - riak_client_search_finish_test, - riak_client_delete_start_test, - riak_client_delete_finish_test, - riak_pool_no_free_connection_errors_test, - riak_pool_queue_limit_reached_errors_test, - riak_pool_killed_free_connections_test, - riak_pool_killed_in_use_connections_test, - riak_pool_connect_timeout_errors_test, - events_sink_kafka_sent_test - ]}, - {collectors, [], [ - riak_pool_collector_test + storage_delete_finish_test ]} ]. @@ -360,7 +327,7 @@ scheduler_search_success_test(_C) -> Buckets = test_millisecond_buckets(), _ = maps:fold( fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_scheduler_search_success{ + ok = test_beat(#mg_skd_search_success{ namespace = ?NS, scheduler_name = name, delay = 0, @@ -381,7 +348,7 @@ scheduler_search_success_test(_C) -> -spec scheduler_search_error_test(config()) -> _. scheduler_search_error_test(_C) -> - ok = test_beat(#mg_core_scheduler_search_error{ + ok = test_beat(#mg_skd_search_error{ namespace = ?NS, scheduler_name = name, exception = {throw, thrown, []} @@ -389,7 +356,7 @@ scheduler_search_error_test(_C) -> -spec scheduler_task_error_test(config()) -> _. scheduler_task_error_test(_C) -> - ok = test_beat(#mg_core_scheduler_task_error{ + ok = test_beat(#mg_skd_task_error{ namespace = ?NS, machine_id = <<"ID">>, scheduler_name = name, @@ -398,7 +365,7 @@ scheduler_task_error_test(_C) -> -spec scheduler_new_tasks_test(config()) -> _. scheduler_new_tasks_test(_C) -> - ok = test_beat(#mg_core_scheduler_new_tasks{ + ok = test_beat(#mg_skd_new_tasks{ namespace = ?NS, scheduler_name = name, new_tasks_count = 0 @@ -406,7 +373,7 @@ scheduler_new_tasks_test(_C) -> -spec scheduler_task_started_test(config()) -> _. scheduler_task_started_test(_C) -> - ok = test_beat(#mg_core_scheduler_task_started{ + ok = test_beat(#mg_skd_task_started{ namespace = ?NS, scheduler_name = name, machine_id = <<"ID">>, @@ -418,7 +385,7 @@ scheduler_task_finished_test(_C) -> Buckets = test_millisecond_buckets(), _ = maps:fold( fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_scheduler_task_finished{ + ok = test_beat(#mg_skd_task_finished{ namespace = ?NS, scheduler_name = name, machine_id = <<"ID">>, @@ -438,7 +405,7 @@ scheduler_task_finished_test(_C) -> -spec scheduler_quota_reserved_test(config()) -> _. scheduler_quota_reserved_test(_C) -> - ok = test_beat(#mg_core_scheduler_quota_reserved{ + ok = test_beat(#mg_skd_quota_reserved{ namespace = ?NS, scheduler_name = name, active_tasks = 0, @@ -570,264 +537,6 @@ storage_delete_finish_test(_C) -> Buckets ). --spec riak_client_get_start_test(config()) -> _. -riak_client_get_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_get_start{ - name = {?NS, caller, type} - }). - --spec riak_client_get_finish_test(config()) -> _. -riak_client_get_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_get_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, get]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_put_start_test(config()) -> _. -riak_client_put_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_put_start{ - name = {?NS, caller, type} - }). - --spec riak_client_put_finish_test(config()) -> _. -riak_client_put_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_put_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, put]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_search_start_test(config()) -> _. -riak_client_search_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_search_start{ - name = {?NS, caller, type} - }). - --spec riak_client_search_finish_test(config()) -> _. -riak_client_search_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_search_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, search]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_client_delete_start_test(config()) -> _. -riak_client_delete_start_test(_C) -> - ok = test_beat(#mg_core_riak_client_delete_start{ - name = {?NS, caller, type} - }). - --spec riak_client_delete_finish_test(config()) -> _. -riak_client_delete_finish_test(_C) -> - Buckets = test_millisecond_buckets(), - _ = maps:fold( - fun(DurationMs, BucketIdx, Acc) -> - ok = test_beat(#mg_core_riak_client_delete_finish{ - name = {?NS, caller, type}, - duration = erlang:convert_time_unit(DurationMs, millisecond, native) - }), - {BucketsHits, _} = - prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, delete]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), - Acc#{BucketIdx => BucketHit} - end, - #{}, - Buckets - ). - --spec riak_pool_no_free_connection_errors_test(config()) -> _. -riak_pool_no_free_connection_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_state_reached{ - name = {?NS, caller, type}, - state = no_free_connections - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_no_free_connection_errors_total, [?NS, type]) - ). - --spec riak_pool_queue_limit_reached_errors_test(config()) -> _. -riak_pool_queue_limit_reached_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_state_reached{ - name = {?NS, caller, type}, - state = queue_limit_reached - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_queue_limit_reached_errors_total, [?NS, type]) - ). - --spec riak_pool_killed_free_connections_test(config()) -> _. -riak_pool_killed_free_connections_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_connection_killed{ - name = {?NS, caller, type}, - state = free - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_killed_free_connections_total, [?NS, type]) - ). - --spec riak_pool_killed_in_use_connections_test(config()) -> _. -riak_pool_killed_in_use_connections_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_connection_killed{ - name = {?NS, caller, type}, - state = in_use - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_killed_in_use_connections_total, [?NS, type]) - ). - --spec riak_pool_connect_timeout_errors_test(config()) -> _. -riak_pool_connect_timeout_errors_test(_C) -> - ok = test_beat(#mg_core_riak_connection_pool_error{ - name = {?NS, caller, type}, - reason = connect_timeout - }), - ?assertEqual( - 1, - prometheus_counter:value(mg_riak_pool_connect_timeout_errors_total, [?NS, type]) - ). - --spec events_sink_kafka_sent_test(config()) -> _. -events_sink_kafka_sent_test(_C) -> - Buckets = test_millisecond_buckets(), - Name = kafka, - _ = maps:fold( - fun(DurationMs, BucketIdx, {Counter, BucketAcc}) -> - ok = test_beat(#mg_core_events_sink_kafka_sent{ - name = Name, - namespace = ?NS, - machine_id = <<"ID">>, - request_context = null, - deadline = undefined, - encode_duration = erlang:convert_time_unit(DurationMs, millisecond, native), - send_duration = erlang:convert_time_unit(DurationMs, millisecond, native), - data_size = 0, - partition = 0, - offset = 0 - }), - ?assertEqual(prometheus_counter:value(mg_events_sink_produced_total, [?NS, Name]), Counter), - {BucketsHits, _} = - prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), - {BucketsHits, _} = - prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, send]), - BucketHit = lists:nth(BucketIdx, BucketsHits), - %% Check that bucket under index BucketIdx received one hit - ?assertEqual(maps:get(BucketIdx, BucketAcc, 0) + 1, BucketHit), - {Counter + 1, BucketAcc#{BucketIdx => BucketHit}} - end, - {1, #{}}, - Buckets - ). - -%% - --spec riak_pool_collector_test(config()) -> _. -riak_pool_collector_test(_C) -> - ok = mg_cth:await_ready(fun mg_cth:riak_ready/0), - Storage = - {mg_core_storage_riak, #{ - name => {?NS, caller, type}, - host => "riakdb", - port => 8087, - bucket => ?NS, - pool_options => #{ - init_count => 0, - max_count => 10, - queue_max => 100 - }, - pulse => undefined, - sidecar => {mg_riak_prometheus, #{}} - }}, - - {ok, Pid} = genlib_adhoc_supervisor:start_link( - #{strategy => one_for_all}, - [mg_core_storage:child_spec(Storage, storage)] - ), - - Collectors = prometheus_registry:collectors(default), - ?assert(lists:member(mg_riak_prometheus_collector, Collectors)), - - Self = self(), - ok = prometheus_collector:collect_mf( - default, - mg_riak_prometheus_collector, - fun(MF) -> Self ! MF end - ), - MFs = mg_cth:flush(), - MLabels = [ - #'LabelPair'{name = <<"namespace">>, value = <<"NS">>}, - #'LabelPair'{name = <<"name">>, value = <<"type">>} - ], - ?assertMatch( - [ - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_free">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_in_use">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_connections_limit">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 10}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_queued_requests">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] - }, - #'MetricFamily'{ - name = <<"mg_riak_pool_queued_requests_limit">>, - metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 100}}] - } - ], - lists:sort(MFs) - ), - - ok = proc_lib:stop(Pid, normal, 5000). - %% Metrics utils -spec test_beat(term()) -> ok. diff --git a/apps/machinegun/test/mg_tests_SUITE.erl b/apps/machinegun/test/mg_tests_SUITE.erl index 35b1d66b..a487529b 100644 --- a/apps/machinegun/test/mg_tests_SUITE.erl +++ b/apps/machinegun/test/mg_tests_SUITE.erl @@ -247,7 +247,7 @@ mg_config(#{endpoint := {IP, Port}}, C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, client => mg_cth:config(kafka_client_name) diff --git a/apps/mg_conf/rebar.config b/apps/mg_conf/rebar.config new file mode 100644 index 00000000..02763852 --- /dev/null +++ b/apps/mg_conf/rebar.config @@ -0,0 +1,3 @@ +{deps, [ + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_conf/src/mg_conf.app.src b/apps/mg_conf/src/mg_conf.app.src new file mode 100644 index 00000000..5bd86d93 --- /dev/null +++ b/apps/mg_conf/src/mg_conf.app.src @@ -0,0 +1,19 @@ +{application, mg_conf, [ + {description, "Machinegun configuration"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + mg_utils, + mg_core, + mg_riak, + mg_es_kafka, + mg_woody + ]}, + {env, []}, + {modules, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/machinegun/src/mg_configurator.erl b/apps/mg_conf/src/mg_conf.erl similarity index 72% rename from apps/machinegun/src/mg_configurator.erl rename to apps/mg_conf/src/mg_conf.erl index 94e8a79f..e6828692 100644 --- a/apps/machinegun/src/mg_configurator.erl +++ b/apps/mg_conf/src/mg_conf.erl @@ -1,6 +1,6 @@ --module(mg_configurator). +-module(mg_conf). --export([construct_child_specs/1]). +-export([construct_child_specs/2]). -type modernizer() :: #{ current_format_version := mg_core_events:format_version(), @@ -13,7 +13,7 @@ % all but `worker_options.worker` option worker => mg_core_workers_manager:options(), storage := mg_core_machine:storage_options(), - event_sinks => [mg_core_events_sink:handler()], + event_sinks => [mg_core_event_sink:handler()], retries := mg_core_machine:retry_opt(), schedulers := mg_core_machine:schedulers_opt(), default_processing_timeout := timeout(), @@ -26,25 +26,29 @@ -type config() :: #{ woody_server := mg_woody:woody_server(), namespaces := namespaces(), + quotas => [mg_skd_quota_worker:options()], pulse := pulse(), - quotas => [mg_core_quota_worker:options()], health_check => erl_health:check() }. +-export_type([namespaces/0]). +-export_type([config/0]). + -type processor() :: mg_woody_processor:options(). --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). --spec construct_child_specs(config()) -> [supervisor:child_spec()]. +-spec construct_child_specs(config(), [woody_server_thrift_http_handler:route(any())]) -> [supervisor:child_spec()]. construct_child_specs( #{ woody_server := WoodyServer, namespaces := Namespaces, pulse := Pulse - } = Config + } = Config, + AdditionalRoutes ) -> Quotas = maps:get(quotas, Config, []), - HealthChecks = maps:get(health_check, Config, #{}), + ClusterOpts = maps:get(cluster, Config, #{}), QuotasChildSpec = quotas_child_specs(Quotas, quota), @@ -55,11 +59,7 @@ construct_child_specs( pulse => Pulse, automaton => api_automaton_options(Namespaces, Pulse), woody_server => WoodyServer, - additional_routes => [ - get_startup_route(), - get_health_route(HealthChecks), - get_prometheus_route() - ] + additional_routes => AdditionalRoutes } ), ClusterSpec = mg_core_union:child_spec(ClusterOpts), @@ -73,38 +73,17 @@ construct_child_specs( %% --spec get_startup_route() -> {iodata(), module(), _Opts :: any()}. -get_startup_route() -> - EvHandler = {erl_health_event_handler, []}, - Check = #{ - startup => #{ - runner => {mg_health_check, startup, []}, - event_handler => EvHandler - } - }, - erl_health_handle:get_startup_route(Check). - --spec get_health_route(erl_health:check()) -> {iodata(), module(), _Opts :: any()}. -get_health_route(Check0) -> - EvHandler = {erl_health_event_handler, []}, - Check = maps:map(fun(_, V = {_, _, _}) -> #{runner => V, event_handler => EvHandler} end, Check0), - erl_health_handle:get_route(Check). - --spec get_prometheus_route() -> {iodata(), module(), _Opts :: any()}. -get_prometheus_route() -> - {"/metrics/[:registry]", prometheus_cowboy2_handler, []}. - --spec quotas_child_specs([mg_core_quota_worker:options()], atom()) -> [supervisor:child_spec()]. +-spec quotas_child_specs([mg_skd_quota_worker:options()], atom()) -> [supervisor:child_spec()]. quotas_child_specs(Quotas, ChildID) -> [ - mg_core_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) + mg_skd_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) || Options <- Quotas ]. -spec events_machines_child_specs(namespaces(), pulse()) -> supervisor:child_spec(). events_machines_child_specs(NSs, Pulse) -> NsOptions = [events_machine_options(NS, NSs, Pulse) || NS <- maps:keys(NSs)], - mg_namespace_sup:child_spec(NsOptions, namespaces_sup). + mg_conf_namespace_sup:child_spec(NsOptions, namespaces_sup). -spec events_machine_options(mg_core:ns(), namespaces(), pulse()) -> mg_core_events_machine:options(). events_machine_options(NS, NSs, Pulse) -> @@ -167,9 +146,9 @@ api_automaton_options(NSs, Pulse) -> NSs ). --spec event_sink_options(mg_core_events_sink:handler(), pulse()) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, Pulse) -> - {mg_core_events_sink_kafka, EventSinkConfig#{ +-spec event_sink_options(mg_core_event_sink:handler(), pulse()) -> mg_core_event_sink:handler(). +event_sink_options({mg_event_sink_kafka, EventSinkConfig}, Pulse) -> + {mg_event_sink_kafka, EventSinkConfig#{ pulse => Pulse, encoder => fun mg_woody_event_sink:serialize/3 }}. @@ -178,26 +157,26 @@ event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}, Pulse) -> worker_manager_options(Config) -> maps:merge( #{ - registry => mg_core_procreg_gproc + registry => mg_procreg_gproc }, maps:get(worker, Config, #{}) ). --spec processor(processor(), pulse()) -> mg_core_utils:mod_opts(). +-spec processor(processor(), pulse()) -> mg_utils:mod_opts(). processor(Processor, Pulse) -> {mg_woody_processor, Processor#{event_handler => {mg_woody_event_handler, Pulse}}}. -spec sub_storage_options(mg_core:ns(), mg_core_machine:storage_options()) -> mg_core_machine:storage_options(). sub_storage_options(SubNS, Storage0) -> - Storage1 = mg_core_utils:separate_mod_opts(Storage0, #{}), + Storage1 = mg_utils:separate_mod_opts(Storage0, #{}), Storage2 = add_bucket_postfix(SubNS, Storage1), Storage2. -spec add_bucket_postfix(mg_core:ns(), mg_core_storage:options()) -> mg_core_storage:options(). add_bucket_postfix(_, {mg_core_storage_memory, _} = Storage) -> Storage; -add_bucket_postfix(SubNS, {mg_core_storage_riak, #{bucket := Bucket} = Options}) -> - {mg_core_storage_riak, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. +add_bucket_postfix(SubNS, {mg_riak_storage, #{bucket := Bucket} = Options}) -> + {mg_riak_storage, Options#{bucket := mg_utils:concatenate_namespaces(Bucket, SubNS)}}. -spec modernizer_options(modernizer() | undefined, pulse()) -> #{modernizer => mg_core_events_modernizer:options()}. modernizer_options(#{current_format_version := CurrentFormatVersion, handler := WoodyClient}, Pulse) -> diff --git a/apps/machinegun/src/mg_namespace_sup.erl b/apps/mg_conf/src/mg_conf_namespace_sup.erl similarity index 88% rename from apps/machinegun/src/mg_namespace_sup.erl rename to apps/mg_conf/src/mg_conf_namespace_sup.erl index 97faf405..b76246cc 100644 --- a/apps/machinegun/src/mg_namespace_sup.erl +++ b/apps/mg_conf/src/mg_conf_namespace_sup.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2022 Valitydev +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_namespace_sup). +-module(mg_conf_namespace_sup). -type namespaces() :: [mg_core_events_machine:options()]. @@ -34,7 +34,7 @@ child_spec(Namespaces, ChildID) -> type => supervisor }. --spec start_link(namespaces(), _ChildID) -> mg_core_utils:gen_start_ret(). +-spec start_link(namespaces(), _ChildID) -> mg_utils:gen_start_ret(). start_link(Namespaces, ChildID) -> {ok, SupPid} = genlib_adhoc_supervisor:start_link( #{strategy => simple_one_for_one}, @@ -49,7 +49,7 @@ start_link(Namespaces, ChildID) -> ), start_namespace_children(SupPid, Namespaces). --spec start_namespace_children(pid(), namespaces()) -> mg_core_utils:gen_start_ret(). +-spec start_namespace_children(pid(), namespaces()) -> mg_utils:gen_start_ret(). start_namespace_children(SupPid, []) -> {ok, SupPid}; start_namespace_children(SupPid, [Namespace | Rest]) -> diff --git a/apps/mg_core/include/pulse.hrl b/apps/mg_core/include/pulse.hrl index c604ebe6..50fd14b0 100644 --- a/apps/mg_core/include/pulse.hrl +++ b/apps/mg_core/include/pulse.hrl @@ -27,7 +27,7 @@ machine_id :: mg_core:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). %% Timer processing @@ -52,76 +52,12 @@ duration :: non_neg_integer() }). -%% Scheduler - --record(mg_core_scheduler_search_success, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - delay :: mg_core_queue_scanner:scan_delay(), - tasks :: [mg_core_queue_task:task()], - limit :: mg_core_queue_scanner:scan_limit(), - % in native units - duration :: non_neg_integer() -}). - --record(mg_core_scheduler_search_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception() -}). - --record(mg_core_scheduler_task_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception(), - machine_id :: mg_core:id() | undefined -}). - --record(mg_core_scheduler_task_add_error, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - exception :: mg_core_utils:exception(), - machine_id :: mg_core:id(), - request_context :: mg_core:request_context() -}). - --record(mg_core_scheduler_new_tasks, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - new_tasks_count :: non_neg_integer() -}). - --record(mg_core_scheduler_task_started, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - machine_id :: mg_core:id() | undefined, - task_delay :: timeout() -}). - --record(mg_core_scheduler_task_finished, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - machine_id :: mg_core:id() | undefined, - task_delay :: timeout(), - % in native units - process_duration :: non_neg_integer() -}). - --record(mg_core_scheduler_quota_reserved, { - namespace :: mg_core:ns(), - scheduler_name :: mg_core_scheduler:name(), - active_tasks :: non_neg_integer(), - waiting_tasks :: non_neg_integer(), - quota_name :: mg_core_quota_worker:name(), - quota_reserved :: mg_core_quota:resource() -}). - %% Machine -record(mg_core_machine_process_transient_error, { namespace :: mg_core:ns(), machine_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), request_context :: mg_core:request_context() }). @@ -180,7 +116,7 @@ machine_id :: mg_core:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(mg_core_machine_lifecycle_repaired, { @@ -194,14 +130,14 @@ namespace :: mg_core:ns(), machine_id :: mg_core:id(), request_context :: mg_core:request_context(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(mg_core_machine_lifecycle_transient_error, { context :: atom(), namespace :: mg_core:ns(), machine_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), request_context :: mg_core:request_context(), retry_strategy :: genlib_retry:strategy(), retry_action :: {wait, timeout(), genlib_retry:strategy()} | finish @@ -226,7 +162,7 @@ namespace :: mg_core:ns(), machine_id :: mg_core:id(), notification_id :: mg_core:id(), - exception :: mg_core_utils:exception(), + exception :: mg_utils:exception(), action :: delete | {reschedule, genlib_time:ts()} | ignore }). @@ -269,62 +205,6 @@ duration :: non_neg_integer() }). -%% Riak client operations -%% Duration is in native units - --record(mg_core_riak_client_get_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_get_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_put_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_put_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_search_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_search_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - --record(mg_core_riak_client_delete_start, { - name :: mg_core_storage:name() -}). - --record(mg_core_riak_client_delete_finish, { - name :: mg_core_storage:name(), - duration :: non_neg_integer() -}). - -%% Riak connection pool events - --record(mg_core_riak_connection_pool_state_reached, { - name :: mg_core_storage:name(), - state :: no_free_connections | queue_limit_reached -}). - --record(mg_core_riak_connection_pool_connection_killed, { - name :: mg_core_storage:name(), - state :: free | in_use -}). - --record(mg_core_riak_connection_pool_error, { - name :: mg_core_storage:name(), - reason :: connect_timeout -}). - %% Workers management -record(mg_core_worker_call_attempt, { @@ -341,21 +221,3 @@ msg_queue_len :: non_neg_integer(), msg_queue_limit :: mg_core_workers_manager:queue_limit() }). - -%% Events sink operations - --record(mg_core_events_sink_kafka_sent, { - name :: atom(), - namespace :: mg_core:ns(), - machine_id :: mg_core:id(), - request_context :: mg_core:request_context(), - deadline :: mg_core_deadline:deadline(), - % in native units - encode_duration :: non_neg_integer(), - % in native units - send_duration :: non_neg_integer(), - % in bytes - data_size :: non_neg_integer(), - partition :: brod:partition(), - offset :: brod:offset() -}). diff --git a/apps/mg_core/rebar.config b/apps/mg_core/rebar.config index 01d72ffd..aad165f4 100644 --- a/apps/mg_core/rebar.config +++ b/apps/mg_core/rebar.config @@ -3,32 +3,7 @@ {brod, "3.16.1"}, {snappyer, "1.2.8"}, {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}}, - {riakc, {git, "https://github.com/valitydev/riak-erlang-client", {branch, develop}}}, - {pooler, {git, "https://github.com/seth/pooler", {branch, master}}}, {msgpack, {git, "https://github.com/msgpack/msgpack-erlang", {branch, master}}}, {snowflake, {git, "https://github.com/valitydev/snowflake", {branch, master}}}, {opentelemetry_api, "1.2.1"} ]}. - -{overrides, [ - {override, rebar3_protobuffs_plugin, [ - {deps, [{protobuffs, {git, "https://github.com/basho/erlang_protobuffs", {tag, "0.8.2"}}}]} - ]}, - - {override, protobuffs, [{deps, []}]}, - - {override, riakc, [ - {erl_opts, [ - {d, namespaced_types}, - {d, deprecated_19} - ]} - ]}, - - {override, riak_pb, [ - {plugins, [ - {riak_pb_msgcodegen, {git, "https://github.com/tsloughter/riak_pb_msgcodegen", {branch, "master"}}}, - {rebar3_protobuffs_plugin, {git, "https://github.com/cmkarlsson/rebar3_protobuffs_plugin", {tag, "0.1.1"}}} - ]}, - {provider_hooks, [{pre, [{compile, {protobuffs, compile}}, {compile, riak_pb_msgcodegen}]}]} - ]} -]}. diff --git a/apps/mg_core/src/mg_core.app.src b/apps/mg_core/src/mg_core.app.src index e8aa4068..d21f48ab 100644 --- a/apps/mg_core/src/mg_core.app.src +++ b/apps/mg_core/src/mg_core.app.src @@ -14,7 +14,7 @@ %%% limitations under the License. %%% -{application, mg_core , [ +{application, mg_core, [ {description, "Machinegun FSM processor"}, {vsn, "1"}, {registered, []}, @@ -23,11 +23,12 @@ stdlib, genlib, gproc, - riakc, - pooler, brod, msgpack, snowflake, + mg_scheduler, + mg_utils, + mg_procreg, opentelemetry_api ]}, {env, []}, diff --git a/apps/mg_core/src/mg_core.erl b/apps/mg_core/src/mg_core.erl index 39de01c3..8332c2e7 100644 --- a/apps/mg_core/src/mg_core.erl +++ b/apps/mg_core/src/mg_core.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2017 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ %%% -module(mg_core). +-include_lib("mg_core/include/pulse.hrl"). + %% API -export_type([ns/0]). -export_type([id/0]). @@ -23,3 +25,44 @@ -type ns() :: binary(). -type id() :: binary(). -type request_context() :: mg_core_storage:opaque(). + +-export_type([beat/0]). +-type beat() :: + % Timer + #mg_core_timer_lifecycle_created{} + | #mg_core_timer_lifecycle_rescheduled{} + | #mg_core_timer_lifecycle_rescheduling_error{} + | #mg_core_timer_lifecycle_removed{} + % Timer handling + | #mg_core_timer_process_started{} + | #mg_core_timer_process_finished{} + % Machine process state + | #mg_core_machine_lifecycle_created{} + | #mg_core_machine_lifecycle_removed{} + | #mg_core_machine_lifecycle_loaded{} + | #mg_core_machine_lifecycle_unloaded{} + | #mg_core_machine_lifecycle_committed_suicide{} + | #mg_core_machine_lifecycle_failed{} + | #mg_core_machine_lifecycle_repaired{} + | #mg_core_machine_lifecycle_loading_error{} + | #mg_core_machine_lifecycle_transient_error{} + % Machine call handling + | #mg_core_machine_process_started{} + | #mg_core_machine_process_finished{} + | #mg_core_machine_process_transient_error{} + % Machine notification + | #mg_core_machine_notification_created{} + | #mg_core_machine_notification_delivered{} + | #mg_core_machine_notification_delivery_error{} + % Machine worker handling + | #mg_core_worker_call_attempt{} + | #mg_core_worker_start_attempt{} + % Storage calls + | #mg_core_storage_get_start{} + | #mg_core_storage_get_finish{} + | #mg_core_storage_put_start{} + | #mg_core_storage_put_finish{} + | #mg_core_storage_search_start{} + | #mg_core_storage_search_finish{} + | #mg_core_storage_delete_start{} + | #mg_core_storage_delete_finish{}. diff --git a/apps/mg_core/src/mg_core_events_sink.erl b/apps/mg_core/src/mg_core_event_sink.erl similarity index 87% rename from apps/mg_core/src/mg_core_events_sink.erl rename to apps/mg_core/src/mg_core_event_sink.erl index 95c4fdd9..52d63015 100644 --- a/apps/mg_core/src/mg_core_events_sink.erl +++ b/apps/mg_core/src/mg_core_event_sink.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink). +-module(mg_core_event_sink). -include_lib("opentelemetry_api/include/otel_tracer.hrl"). -include_lib("opentelemetry_api/include/opentelemetry.hrl"). @@ -32,7 +32,7 @@ %% Types --type handler(Options) :: mg_core_utils:mod_opts(Options). +-type handler(Options) :: mg_utils:mod_opts(Options). -type handler() :: handler(handler_options()). -export_type([handler/1]). @@ -51,7 +51,7 @@ add_events(_Handler, _NS, _ID, [], _ReqCtx, _Deadline) -> ok; add_events(Handler, NS, ID, Events, ReqCtx, Deadline) -> - {Mod, _} = mg_core_utils:separate_mod_opts(Handler), + {Mod, _} = mg_utils:separate_mod_opts(Handler), SpanOpts = #{ kind => ?SPAN_KIND_PRODUCER, attributes => mg_core_otel:machine_tags(NS, ID, #{ @@ -60,5 +60,5 @@ add_events(Handler, NS, ID, Events, ReqCtx, Deadline) -> }) }, ?with_span(<<"sinking events">>, SpanOpts, fun(_) -> - ok = mg_core_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]) + ok = mg_utils:apply_mod_opts(Handler, add_events, [NS, ID, Events, ReqCtx, Deadline]) end). diff --git a/apps/mg_core/src/mg_core_events_machine.erl b/apps/mg_core/src/mg_core_events_machine.erl index b6ca5d87..251177cd 100644 --- a/apps/mg_core/src/mg_core_events_machine.erl +++ b/apps/mg_core/src/mg_core_events_machine.erl @@ -121,16 +121,16 @@ -type options() :: #{ namespace => mg_core:ns(), events_storage => storage_options(), - processor => mg_core_utils:mod_opts(), + processor => mg_utils:mod_opts(), machines => mg_core_machine:options(), retries => #{_Subject => genlib_retry:policy()}, - pulse => mg_core_pulse:handler(), - event_sinks => [mg_core_events_sink:handler()], + pulse => mpulse:handler(), + event_sinks => [mg_core_event_sink:handler()], default_processing_timeout => timeout(), event_stash_size => non_neg_integer() }. % like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -spec child_spec(options(), atom()) -> supervisor:child_spec(). child_spec(Options, ChildID) -> @@ -141,11 +141,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_events_storage:child_spec(Options), mg_core_machine:child_spec(machine_options(Options), automaton) ]) @@ -209,7 +209,7 @@ call(Options, ID, Args, HRange, ReqCtx, Deadline) -> get_machine(Options, ID, HRange) -> #{state := State, status := Status} = mg_core_machine:get(machine_options(Options), ID), EffectiveState = maybe_apply_delayed_actions(opaque_to_state(State)), - _ = mg_core_utils:throw_if_undefined(EffectiveState, {logic, machine_not_found}), + _ = mg_utils:throw_if_undefined(EffectiveState, {logic, machine_not_found}), machine(Options, ID, EffectiveState, Status, HRange). -spec remove(options(), id(), request_context(), deadline()) -> ok. @@ -246,7 +246,7 @@ notify(Options, MachineID, Args, HRange, ReqCtx) -> -spec processor_child_spec(options()) -> supervisor:child_spec() | undefined. processor_child_spec(Options) -> - mg_core_utils:apply_mod_opts_if_defined( + mg_utils:apply_mod_opts_if_defined( processor_options(Options), processor_child_spec, undefined @@ -454,7 +454,7 @@ push_events_to_event_sinks(Options, ID, ReqCtx, Deadline, Events) -> EventSinks = maps:get(event_sinks, Options, []), lists:foreach( fun(EventSinkHandler) -> - ok = mg_core_events_sink:add_events(EventSinkHandler, Namespace, ID, Events, ReqCtx, Deadline) + ok = mg_core_event_sink:add_events(EventSinkHandler, Namespace, ID, Events, ReqCtx, Deadline) end, EventSinks ). @@ -479,14 +479,14 @@ emit_action_beats(Options, ID, ReqCtx, ComplexAction) -> -spec emit_timer_action_beats(options(), mg_core:id(), request_context(), complex_action()) -> ok. emit_timer_action_beats(Options, ID, ReqCtx, #{timer := unset_timer}) -> #{namespace := NS, pulse := Pulse} = Options, - mg_core_pulse:handle_beat(Pulse, #mg_core_timer_lifecycle_removed{ + mpulse:handle_beat(Pulse, #mg_core_timer_lifecycle_removed{ namespace = NS, machine_id = ID, request_context = ReqCtx }); emit_timer_action_beats(Options, ID, ReqCtx, #{timer := {set_timer, Timer, _, _}}) -> #{namespace := NS, pulse := Pulse} = Options, - mg_core_pulse:handle_beat(Pulse, #mg_core_timer_lifecycle_created{ + mpulse:handle_beat(Pulse, #mg_core_timer_lifecycle_created{ namespace = NS, machine_id = ID, request_context = ReqCtx, @@ -501,7 +501,7 @@ emit_timer_action_beats(_Options, _ID, _ReqCtx, #{}) -> {ok, state()}. process_signal(Options = #{processor := Processor}, ReqCtx, Deadline, Signal, Machine, State) -> SignalArgs = [ReqCtx, Deadline, {Signal, Machine}], - {StateChange, ComplexAction} = mg_core_utils:apply_mod_opts( + {StateChange, ComplexAction} = mg_utils:apply_mod_opts( Processor, process_signal, SignalArgs @@ -522,7 +522,7 @@ process_signal(Options = #{processor := Processor}, ReqCtx, Deadline, Signal, Ma {_Resp, state()}. process_call(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machine, State) -> CallArgs = [ReqCtx, Deadline, {Args, Machine}], - {Resp, StateChange, ComplexAction} = mg_core_utils:apply_mod_opts( + {Resp, StateChange, ComplexAction} = mg_utils:apply_mod_opts( Processor, process_call, CallArgs @@ -543,7 +543,7 @@ process_call(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machin {ok, {_Resp, state()}} | {error, repair_error()}. process_repair(Options = #{processor := Processor}, ReqCtx, Deadline, Args, Machine, State) -> RepairArgs = [ReqCtx, Deadline, {Args, Machine}], - case mg_core_utils:apply_mod_opts(Processor, process_repair, RepairArgs) of + case mg_utils:apply_mod_opts(Processor, process_repair, RepairArgs) of {ok, {Resp, StateChange, ComplexAction}} -> #{id := ID} = Machine, NewState = handle_processing_result( @@ -653,7 +653,7 @@ timer_to_timestamp({deadline, Deadline}) -> %% --spec processor_options(options()) -> mg_core_utils:mod_opts(). +-spec processor_options(options()) -> mg_utils:mod_opts(). processor_options(Options) -> maps:get(processor, Options). diff --git a/apps/mg_core/src/mg_core_events_modernizer.erl b/apps/mg_core/src/mg_core_events_modernizer.erl index e86aa226..81e66281 100644 --- a/apps/mg_core/src/mg_core_events_modernizer.erl +++ b/apps/mg_core/src/mg_core_events_modernizer.erl @@ -26,7 +26,7 @@ -type options() :: #{ current_format_version := mg_core_events:format_version(), - handler := mg_core_utils:mod_opts(handler_opts()) + handler := mg_utils:mod_opts(handler_opts()) }. % handler specific @@ -123,4 +123,4 @@ event_to_machine_event(NS, ID, Event) -> -spec call_handler(options(), request_context(), machine_event()) -> modernized_event_body(). call_handler(#{handler := Handler}, ReqCtx, MachineEvent) -> % TODO обработка ошибок? - mg_core_utils:apply_mod_opts(Handler, modernize_event, [ReqCtx, MachineEvent]). + mg_utils:apply_mod_opts(Handler, modernize_event, [ReqCtx, MachineEvent]). diff --git a/apps/mg_core/src/mg_core_events_storage.erl b/apps/mg_core/src/mg_core_events_storage.erl index 74fb2114..da17f6bf 100644 --- a/apps/mg_core/src/mg_core_events_storage.erl +++ b/apps/mg_core/src/mg_core_events_storage.erl @@ -71,7 +71,7 @@ get_events(Options, ID, Range) -> -spec events_storage_options(mg_core_events_machine:options()) -> mg_core_storage:options(). events_storage_options(#{namespace := NS, events_storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?STORAGE_NS, events}, pulse => Handler}}. -spec events_to_kvs(mg_core:id(), [mg_core_events:event()]) -> [mg_core_storage:kv()]. diff --git a/apps/mg_core/src/mg_core_machine.erl b/apps/mg_core/src/mg_core_machine.erl index 9108ace7..25571e02 100644 --- a/apps/mg_core/src/mg_core_machine.erl +++ b/apps/mg_core/src/mg_core_machine.erl @@ -130,19 +130,19 @@ % how much tasks in total scheduler is ready to enqueue for processing capacity => non_neg_integer(), % wait at least this delay before subsequent scanning of persistent store for queued tasks - min_scan_delay => mg_core_queue_scanner:scan_delay(), + min_scan_delay => mg_skd_scanner:scan_delay(), % wait at most this delay before subsequent scanning attempts when queue appears to be empty - rescan_delay => mg_core_queue_scanner:scan_delay(), + rescan_delay => mg_skd_scanner:scan_delay(), % how many tasks to fetch at most - max_scan_limit => mg_core_queue_scanner:scan_limit(), + max_scan_limit => mg_skd_scanner:scan_limit(), % by how much to adjust limit to account for possibly duplicated tasks - scan_ahead => mg_core_queue_scanner:scan_ahead(), + scan_ahead => mg_skd_scanner:scan_ahead(), % how many seconds in future a task can be for it to be sent to the local scheduler target_cutoff => seconds(), % name of quota limiting number of active tasks - task_quota => mg_core_quota_worker:name(), + task_quota => mg_skd_quota_worker:name(), % share of quota limit - task_share => mg_core_quota:share(), + task_share => mg_skd_quota:share(), % notifications: upper bound for scan ([_; TSNow - scan_handicap]) scan_handicap => seconds(), % notifications: lower bound for scan ([TSNow - scan_handicap - scan_cutoff; _]) @@ -163,10 +163,10 @@ %% fixed for namespace and pulse, worker -type options() :: #{ namespace := mg_core:ns(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), storage => storage_options(), notification => mg_core_notification:options(), - processor => mg_core_utils:mod_opts(), + processor => mg_utils:mod_opts(), worker := mg_core_workers_manager:ns_options(), retries => retry_opt(), schedulers => schedulers_opt(), @@ -176,7 +176,7 @@ }. % like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -type thrown_error() :: {logic, logic_error()} | {transient, transient_error()} | {timeout, _Reason}. @@ -266,11 +266,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options = #{namespace := NS}) -> start_link(Options, {?MODULE, NS}). --spec start_link(options(), _ChildID) -> mg_core_utils:gen_start_ret(). +-spec start_link(options(), _ChildID) -> mg_utils:gen_start_ret(). start_link(Options, ChildID) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_one}, @@ -287,7 +287,7 @@ machine_sup_child_spec(Options, ChildID) -> start => {genlib_adhoc_supervisor, start_link, [ #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_storage:child_spec(storage_options(Options), storage), notification_child_spec(Options), processor_child_spec(Options), @@ -309,7 +309,7 @@ scheduler_sup_child_spec(Options, ChildID) -> intensity => 10, period => 30 }, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ scheduler_child_spec(timers, Options), scheduler_child_spec(timers_retries, Options), scheduler_child_spec(overseer, Options), @@ -380,14 +380,14 @@ resume_interrupted(Options, ID, Deadline) -> fail(Options, ID, ReqCtx, Deadline) -> fail(Options, ID, {error, explicit_fail, []}, ReqCtx, Deadline). --spec fail(options(), mg_core:id(), mg_core_utils:exception(), request_context(), deadline()) -> ok. +-spec fail(options(), mg_core:id(), mg_utils:exception(), request_context(), deadline()) -> ok. fail(Options, ID, Exception, ReqCtx, Deadline) -> call_(Options, ID, {fail, Exception}, ReqCtx, Deadline). -spec get(options(), mg_core:id()) -> storage_machine() | throws(). get(Options, ID) -> {_, StorageMachine} = - mg_core_utils:throw_if_undefined( + mg_utils:throw_if_undefined( get_storage_machine(Options, ID), {logic, machine_not_found} ), @@ -465,7 +465,7 @@ all_statuses() -> -spec call_(options(), mg_core:id(), _, maybe(request_context()), deadline()) -> _ | no_return(). call_(Options, ID, Call, ReqCtx, Deadline) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( mg_core_workers_manager:call(manager_options(Options), ID, Call, ReqCtx, Deadline) ). @@ -483,7 +483,7 @@ call_(Options, ID, Call, ReqCtx, Deadline) -> }. -type scheduler_ref() :: - {mg_core_scheduler:id(), _TargetCutoff :: seconds()}. + {mg_skd:id(), _TargetCutoff :: seconds()}. -spec handle_load(mg_core:id(), options(), request_context()) -> {ok, state()}. handle_load(ID, Options, ReqCtx) -> @@ -917,7 +917,7 @@ opaque_to_notification_args([1, Args, RequestContext]) -> TargetTime :: genlib_time:ts(). send_notification_task(Options, NotificationID, Args, MachineID, Context, TargetTime) -> Task = mg_core_queue_notifications:build_task(NotificationID, MachineID, TargetTime, Context, Args), - mg_core_scheduler:send_task(scheduler_id(notification, Options), Task). + mg_skd:send_task(scheduler_id(notification, Options), Task). -spec process_with_retry(Impact, ProcessingCtx, ReqCtx, Deadline, State, Retry) -> State when Impact :: processor_impact(), @@ -990,7 +990,7 @@ handle_transient_exception(_Reason, State) -> State. -spec handle_exception(Exception, ReqCtx, Deadline, state()) -> state() when - Exception :: mg_core_utils:exception(), + Exception :: mg_utils:exception(), ReqCtx :: request_context(), Deadline :: deadline(). handle_exception(Exception, ReqCtx, Deadline, State) -> @@ -1094,7 +1094,7 @@ handle_notification_processed(NotificationID, State = #{notifications_processed ) -> processor_result(). call_processor(Impact, ProcessingCtx, ReqCtx, Deadline, State) -> #{options := Options, id := ID, storage_machine := #{state := MachineState}} = State, - mg_core_utils:apply_mod_opts( + mg_utils:apply_mod_opts( get_options(processor, Options), process_machine, [ID, Impact, ProcessingCtx, ReqCtx, Deadline, MachineState] @@ -1108,7 +1108,7 @@ notification_child_spec(#{}) -> -spec processor_child_spec(options()) -> supervisor:child_spec(). processor_child_spec(Options) -> - mg_core_utils:apply_mod_opts_if_defined( + mg_utils:apply_mod_opts_if_defined( get_options(processor, Options), processor_child_spec, undefined @@ -1264,16 +1264,16 @@ get_scheduler_ref(SchedulerType, Options) -> undefined end. --spec try_send_timer_task(scheduler_type(), mg_core_queue_task:target_time(), state()) -> ok. +-spec try_send_timer_task(scheduler_type(), mg_skd_task:target_time(), state()) -> ok. try_send_timer_task(SchedulerType, TargetTime, #{id := ID, schedulers := Schedulers}) -> case maps:get(SchedulerType, Schedulers, undefined) of {SchedulerID, Cutoff} when is_integer(Cutoff) -> % Ok let's send if it's not too far in the future. - CurrentTime = mg_core_queue_task:current_time(), + CurrentTime = mg_skd_task:current_time(), case TargetTime =< CurrentTime + Cutoff of true -> Task = mg_core_queue_timer:build_task(ID, TargetTime), - mg_core_scheduler:send_task(SchedulerID, Task); + mg_skd:send_task(SchedulerID, Task); false -> ok end; @@ -1444,7 +1444,7 @@ manager_options(Options = #{namespace := NS, worker := ManagerOptions, pulse := -spec storage_options(options()) -> mg_core_storage:options(). storage_options(#{namespace := NS, storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?MODULE, machines}, pulse => Handler}}. -spec notification_options(options()) -> mg_core_notification:options(). @@ -1459,15 +1459,15 @@ scheduler_child_spec(SchedulerType, Options) -> Config -> SchedulerID = scheduler_id(SchedulerType, Options), SchedulerOptions = scheduler_options(SchedulerType, Options, Config), - mg_core_scheduler_sup:child_spec(SchedulerID, SchedulerOptions, SchedulerType) + mg_skd_sup:child_spec(SchedulerID, SchedulerOptions, SchedulerType) end. --spec scheduler_id(scheduler_type(), options()) -> mg_core_scheduler:id() | undefined. +-spec scheduler_id(scheduler_type(), options()) -> mg_skd:id() | undefined. scheduler_id(SchedulerType, #{namespace := NS}) -> {SchedulerType, NS}. -spec scheduler_options(scheduler_type(), options(), scheduler_opt()) -> - mg_core_scheduler_sup:options(). + mg_skd_sup:options(). scheduler_options(SchedulerType, Options, Config) when SchedulerType == timers; SchedulerType == timers_retries @@ -1503,8 +1503,7 @@ scheduler_options(notification = SchedulerType, Options, Config) -> }, scheduler_options(mg_core_queue_notifications, Options, HandlerOptions, Config). --spec scheduler_options(module(), options(), map(), scheduler_opt()) -> - mg_core_scheduler_sup:options(). +-spec scheduler_options(module(), options(), map(), scheduler_opt()) -> mg_skd_sup:options(). scheduler_options(HandlerMod, Options, HandlerOptions, Config) -> #{ pulse := Pulse @@ -1613,6 +1612,6 @@ do_with_retry(Options = #{namespace := NS}, ID, Fun, RetryStrategy, ReqCtx, Beat %% logging %% --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(#{pulse := Handler}, Beat) -> - ok = mg_core_pulse:handle_beat(Handler, Beat). + ok = mpulse:handle_beat(Handler, Beat). diff --git a/apps/mg_core/src/mg_core_notification.erl b/apps/mg_core/src/mg_core_notification.erl index d961bc7f..cf60c4bd 100644 --- a/apps/mg_core/src/mg_core_notification.erl +++ b/apps/mg_core/src/mg_core_notification.erl @@ -17,7 +17,7 @@ -type context() :: mg_core_storage:context(). -type options() :: #{ namespace := mg_core:ns(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), storage := storage_options() }. @@ -29,7 +29,7 @@ %% Internal types % FIXME like mg_core_storage:options() except `name` --type storage_options() :: mg_core_utils:mod_opts(map()). +-type storage_options() :: mg_utils:mod_opts(map()). -type ts() :: genlib_time:ts(). %% @@ -43,7 +43,7 @@ child_spec(Options, ChildID) -> start => {genlib_adhoc_supervisor, start_link, [ #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ mg_core_storage:child_spec(storage_options(Options), storage) ]) ]}, @@ -124,5 +124,5 @@ data_to_opaque(#{ -spec storage_options(options()) -> mg_core_storage:options(). storage_options(#{namespace := NS, storage := StorageOptions, pulse := Handler}) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(StorageOptions, #{}), + {Mod, Options} = mg_utils:separate_mod_opts(StorageOptions, #{}), {Mod, Options#{name => {NS, ?MODULE, notifications}, pulse => Handler}}. diff --git a/apps/mg_core/src/mg_core_otel.erl b/apps/mg_core/src/mg_core_otel.erl index aa695418..ec271964 100644 --- a/apps/mg_core/src/mg_core_otel.erl +++ b/apps/mg_core/src/mg_core_otel.erl @@ -117,7 +117,7 @@ add_event(Name, Attributes) -> _ = otel_span:add_event(otel_tracer:current_span_ctx(), Name, Attributes), ok. --spec record_exception(mg_core_utils:exception(), opentelemetry:attributes_map()) -> ok. +-spec record_exception(mg_utils:exception(), opentelemetry:attributes_map()) -> ok. record_exception({Class, Reason, Stacktrace}, Attributes) -> _ = otel_span:record_exception(otel_tracer:current_span_ctx(), Class, Reason, Stacktrace, Attributes), ok. diff --git a/apps/mg_core/src/mg_core_procreg.erl b/apps/mg_core/src/mg_core_procreg.erl deleted file mode 100644 index 71231693..00000000 --- a/apps/mg_core/src/mg_core_procreg.erl +++ /dev/null @@ -1,88 +0,0 @@ -%%% -%%% Copyright 2019 RBKmoney -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% - --module(mg_core_procreg). - -% Any term sans ephemeral ones, like `reference()`s / `pid()`s / `fun()`s. --type name() :: term(). --type name_pattern() :: ets:match_pattern(). - --type ref() :: mg_core_utils:gen_ref(). --type reg_name() :: mg_core_utils:gen_reg_name(). - --type procreg_options() :: term(). --type options() :: mg_core_utils:mod_opts(procreg_options()). - --export_type([name/0]). --export_type([name_pattern/0]). --export_type([ref/0]). --export_type([reg_name/0]). --export_type([options/0]). - --export_type([start_link_ret/0]). - --export([ref/2]). --export([reg_name/2]). --export([select/2]). - --export([start_link/5]). --export([call/3]). --export([call/4]). - -%% Names and references - --callback ref(procreg_options(), name()) -> ref(). - --callback reg_name(procreg_options(), name()) -> reg_name(). - --callback select(procreg_options(), name_pattern()) -> [{name(), pid()}]. - --callback call(procreg_options(), ref(), _Call, timeout()) -> _Reply. - --type start_link_ret() :: - {ok, pid()} | {error, term()}. - --callback start_link(procreg_options(), reg_name(), module(), _Args, list()) -> start_link_ret(). - -%% - --spec ref(options(), name()) -> ref(). -ref(Options, Name) -> - mg_core_utils:apply_mod_opts(Options, ref, [Name]). - --spec reg_name(options(), name()) -> reg_name(). -reg_name(Options, Name) -> - mg_core_utils:apply_mod_opts(Options, reg_name, [Name]). - --spec select(options(), name_pattern()) -> [{name(), pid()}]. -select(Options, NamePattern) -> - mg_core_utils:apply_mod_opts(Options, select, [NamePattern]). - --spec call(options(), name(), _Call) -> _Reply. -call(Options, Name, Call) -> - call(Options, Name, Call, 5000). - --spec call(options(), name(), _Call, timeout()) -> _Reply. -call(Options, Name, Call, Timeout) -> - mg_core_utils:apply_mod_opts(Options, call, [ref(Options, Name), Call, Timeout]). - --spec start_link(options(), name(), module(), _Args, list()) -> start_link_ret(). -start_link(Options, Name, Module, Args, Opts) -> - mg_core_utils:apply_mod_opts( - Options, - start_link, - [reg_name(Options, Name), Module, Args, Opts] - ). diff --git a/apps/mg_core/src/mg_core_pulse.erl b/apps/mg_core/src/mg_core_pulse.erl deleted file mode 100644 index b25ded4a..00000000 --- a/apps/mg_core/src/mg_core_pulse.erl +++ /dev/null @@ -1,108 +0,0 @@ -%%% -%%% Copyright 2018 RBKmoney -%%% -%%% Licensed under the Apache License, Version 2.0 (the "License"); -%%% you may not use this file except in compliance with the License. -%%% You may obtain a copy of the License at -%%% -%%% http://www.apache.org/licenses/LICENSE-2.0 -%%% -%%% Unless required by applicable law or agreed to in writing, software -%%% distributed under the License is distributed on an "AS IS" BASIS, -%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%%% See the License for the specific language governing permissions and -%%% limitations under the License. -%%% --module(mg_core_pulse). - --include_lib("mg_core/include/pulse.hrl"). - -%% API --export_type([beat/0]). --export_type([handler/0]). --export([handle_beat/2]). - --callback handle_beat(Options :: any(), beat()) -> ok. - -%% -%% API -%% --type beat() :: - % Timer - #mg_core_timer_lifecycle_created{} - | #mg_core_timer_lifecycle_rescheduled{} - | #mg_core_timer_lifecycle_rescheduling_error{} - | #mg_core_timer_lifecycle_removed{} - % Scheduler handling - | #mg_core_scheduler_task_add_error{} - | #mg_core_scheduler_search_success{} - | #mg_core_scheduler_search_error{} - | #mg_core_scheduler_task_error{} - | #mg_core_scheduler_new_tasks{} - | #mg_core_scheduler_task_started{} - | #mg_core_scheduler_task_finished{} - | #mg_core_scheduler_quota_reserved{} - % Timer handling - | #mg_core_timer_process_started{} - | #mg_core_timer_process_finished{} - % Machine process state - | #mg_core_machine_lifecycle_created{} - | #mg_core_machine_lifecycle_removed{} - | #mg_core_machine_lifecycle_loaded{} - | #mg_core_machine_lifecycle_unloaded{} - | #mg_core_machine_lifecycle_committed_suicide{} - | #mg_core_machine_lifecycle_failed{} - | #mg_core_machine_lifecycle_repaired{} - | #mg_core_machine_lifecycle_loading_error{} - | #mg_core_machine_lifecycle_transient_error{} - % Machine call handling - | #mg_core_machine_process_started{} - | #mg_core_machine_process_finished{} - | #mg_core_machine_process_transient_error{} - % Machine notification - | #mg_core_machine_notification_created{} - | #mg_core_machine_notification_delivered{} - | #mg_core_machine_notification_delivery_error{} - % Machine worker handling - | #mg_core_worker_call_attempt{} - | #mg_core_worker_start_attempt{} - % Storage calls - | #mg_core_storage_get_start{} - | #mg_core_storage_get_finish{} - | #mg_core_storage_put_start{} - | #mg_core_storage_put_finish{} - | #mg_core_storage_search_start{} - | #mg_core_storage_search_finish{} - | #mg_core_storage_delete_start{} - | #mg_core_storage_delete_finish{} - % Event sink operations - | #mg_core_events_sink_kafka_sent{} - % Riak client call handling - | #mg_core_riak_client_get_start{} - | #mg_core_riak_client_get_finish{} - | #mg_core_riak_client_put_start{} - | #mg_core_riak_client_put_finish{} - | #mg_core_riak_client_search_start{} - | #mg_core_riak_client_search_finish{} - | #mg_core_riak_client_delete_start{} - | #mg_core_riak_client_delete_finish{} - % Riak client call handling - | #mg_core_riak_connection_pool_state_reached{} - | #mg_core_riak_connection_pool_connection_killed{} - | #mg_core_riak_connection_pool_error{}. - --type handler() :: mg_core_utils:mod_opts() | undefined. - --spec handle_beat(handler(), any()) -> ok. -handle_beat(undefined, _Beat) -> - ok; -handle_beat(Handler, Beat) -> - {Mod, Options} = mg_core_utils:separate_mod_opts(Handler), - try - ok = Mod:handle_beat(Options, Beat) - catch - Class:Reason:ST -> - Stacktrace = genlib_format:format_stacktrace(ST), - Msg = "Pulse handler ~p failed at beat ~p: ~p:~p ~s", - ok = logger:error(Msg, [{Mod, Options}, Beat, Class, Reason, Stacktrace]) - end. diff --git a/apps/mg_core/src/mg_core_pulse_otel.erl b/apps/mg_core/src/mg_core_pulse_otel.erl index cfcfc472..fb7fc147 100644 --- a/apps/mg_core/src/mg_core_pulse_otel.erl +++ b/apps/mg_core/src/mg_core_pulse_otel.erl @@ -4,7 +4,7 @@ -include_lib("opentelemetry_api/include/opentelemetry.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). @@ -12,8 +12,9 @@ -type options() :: map(). -type beat() :: - mg_core_pulse:beat() - | mg_core_queue_scanner:beat(). + mg_core:beat() + | mg_skd:beat() + | mg_skd_scanner:beat(). -export_type([options/0]). @@ -47,7 +48,7 @@ handle_beat( handle_beat(_Options, #mg_core_timer_lifecycle_removed{machine_id = ID, namespace = NS}) -> mg_core_otel:add_event(<<"timer removed">>, mg_core_otel:machine_tags(NS, ID)); %% Scheduler handling -%% TODO Handle and trace events for 'mg_core_scheduler_*' beats +%% TODO Handle and trace events for 'mg_skd_*' beats %% Timer handling %% Wraps `Module:process_machine/7` when processor impact is 'timeout'. handle_beat(_Options, #mg_core_timer_process_started{machine_id = _ID, namespace = _NS, queue = _Queue}) -> diff --git a/apps/mg_core/src/mg_core_queue_interrupted.erl b/apps/mg_core/src/mg_core_queue_interrupted.erl index d02e9e14..23642479 100644 --- a/apps/mg_core/src/mg_core_queue_interrupted.erl +++ b/apps/mg_core/src/mg_core_queue_interrupted.erl @@ -16,18 +16,18 @@ -module(mg_core_queue_interrupted). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types -type milliseconds() :: non_neg_integer(). -type options() :: #{ - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), min_scan_delay => milliseconds(), rescan_delay => milliseconds(), @@ -45,8 +45,8 @@ -type task_id() :: mg_core:id(). -type task_payload() :: #{}. --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). % 1 minute -define(DEFAULT_PROCESSING_TIMEOUT, 60000). @@ -62,7 +62,7 @@ init(_Options) -> -spec search_tasks(options(), _Limit :: non_neg_integer(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options, Limit, #state{continuation = Continuation} = State) -> - CurrentTime = mg_core_queue_task:current_time(), + CurrentTime = mg_skd_task:current_time(), MachineOptions = machine_options(Options), Query = processing, {IDs, NewContinuation} = mg_core_machine:search(MachineOptions, Query, Limit, Continuation), diff --git a/apps/mg_core/src/mg_core_queue_notifications.erl b/apps/mg_core/src/mg_core_queue_notifications.erl index 2359e96d..067cba36 100644 --- a/apps/mg_core/src/mg_core_queue_notifications.erl +++ b/apps/mg_core/src/mg_core_queue_notifications.erl @@ -22,11 +22,11 @@ -export([build_task/5]). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types @@ -34,8 +34,8 @@ -type seconds() :: non_neg_integer(). -type milliseconds() :: non_neg_integer(). -type options() :: #{ - scheduler_id := mg_core_scheduler:id(), - pulse := mg_core_pulse:handler(), + scheduler_id := mg_skd:id(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), notification := mg_core_notification:options(), % how many seconds behind real time we are @@ -63,10 +63,10 @@ args := mg_core_storage:opaque(), context := mg_core_notification:context() }. --type target_time() :: mg_core_queue_task:target_time(). --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). --type scan_limit() :: mg_core_queue_scanner:scan_limit(). +-type target_time() :: mg_skd_task:target_time(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). +-type scan_limit() :: mg_skd_scanner:scan_limit(). -type fail_action() :: delete | ignore | {reschedule, target_time()}. @@ -107,7 +107,7 @@ init(_Options) -> -spec search_tasks(options(), scan_limit(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options, Limit, State = #state{}) -> - CurrentTs = mg_core_queue_task:current_time(), + CurrentTs = mg_skd_task:current_time(), ScanCutoff = maps:get(scan_cutoff, Options, ?DEFAULT_SCAN_CUTOFF), ScanHandicap = get_handicap_seconds(State), TFrom = CurrentTs - ScanHandicap - ScanCutoff, @@ -149,7 +149,7 @@ execute_task(Options, #{id := NotificationID, machine_id := MachineID, payload : delete -> ok = mg_core_notification:delete(notification_options(Options), NotificationID, Context); {reschedule, NewTargetTime} -> - ok = mg_core_scheduler:send_task(SchedulerID, Task#{target_time => NewTargetTime}); + ok = mg_skd:send_task(SchedulerID, Task#{target_time => NewTargetTime}); ignore -> erlang:raise(throw, Reason, Stacktrace) end @@ -203,13 +203,13 @@ task_fail_action(_Options, _) -> -spec get_reschedule_time(options()) -> target_time(). get_reschedule_time(Options) -> Reschedule = maps:get(reschedule_time, Options, ?DEFAULT_RESCHEDULE_SECONDS), - mg_core_queue_task:current_time() + Reschedule. + mg_skd_task:current_time() + Reschedule. -spec emit_delivery_error_beat( options(), mg_core:id(), mg_core_notification:id(), - mg_core_utils:exception(), + mg_utils:exception(), fail_action() ) -> ok. emit_delivery_error_beat(Options, MachineID, NotificationID, Exception, Action) -> @@ -233,6 +233,6 @@ emit_delivered_beat(Options, MachineID, NotificationID) -> notification_id = NotificationID }). --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(Options, Beat) -> - ok = mg_core_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). + ok = mpulse:handle_beat(maps:get(pulse, Options, undefined), Beat). diff --git a/apps/mg_core/src/mg_core_queue_timer.erl b/apps/mg_core/src/mg_core_queue_timer.erl index 666811a5..eeba39ba 100644 --- a/apps/mg_core/src/mg_core_queue_timer.erl +++ b/apps/mg_core/src/mg_core_queue_timer.erl @@ -18,11 +18,11 @@ -export([build_task/2]). --behaviour(mg_core_queue_scanner). +-behaviour(mg_skd_scanner). -export([init/1]). -export([search_tasks/3]). --behaviour(mg_core_scheduler_worker). +-behaviour(mg_skd_worker). -export([execute_task/2]). %% Types @@ -30,7 +30,7 @@ -type seconds() :: non_neg_integer(). -type milliseconds() :: non_neg_integer(). -type options() :: #{ - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), machine := mg_core_machine:options(), timer_queue := waiting | retrying, lookahead => seconds(), @@ -49,10 +49,10 @@ -type task_id() :: mg_core:id(). -type task_payload() :: #{}. --type target_time() :: mg_core_queue_task:target_time(). --type task() :: mg_core_queue_task:task(task_id(), task_payload()). --type scan_delay() :: mg_core_queue_scanner:scan_delay(). --type scan_limit() :: mg_core_queue_scanner:scan_limit(). +-type target_time() :: mg_skd_task:target_time(). +-type task() :: mg_skd_task:task(task_id(), task_payload()). +-type scan_delay() :: mg_skd_scanner:scan_delay(). +-type scan_limit() :: mg_skd_scanner:scan_limit(). % 1 minute -define(DEFAULT_PROCESSING_TIMEOUT, 60000). @@ -75,7 +75,7 @@ build_task(ID, Timestamp) -> -spec search_tasks(options(), scan_limit(), state()) -> {{scan_delay(), [task()]}, state()}. search_tasks(Options = #{timer_queue := TimerQueue}, Limit, State = #state{}) -> - CurrentTs = mg_core_queue_task:current_time(), + CurrentTs = mg_skd_task:current_time(), Lookahead = maps:get(lookahead, Options, 0), Query = {TimerQueue, 1, CurrentTs + Lookahead}, {Timers, Continuation} = mg_core_machine:search(machine_options(Options), Query, Limit), diff --git a/apps/mg_core/src/mg_core_storage.erl b/apps/mg_core/src/mg_core_storage.erl index e84459e4..85f0efaf 100644 --- a/apps/mg_core/src/mg_core_storage.erl +++ b/apps/mg_core/src/mg_core_storage.erl @@ -77,7 +77,7 @@ %% -type name() :: term(). --type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. +-type opaque() :: mg_utils:opaque(). -type key() :: binary(). -type value() :: opaque(). -type kv() :: {key(), value()}. @@ -104,12 +104,12 @@ -type storage_options() :: #{ name := name(), - pulse := mg_core_pulse:handler(), - sidecar => mg_core_utils:mod_opts(), + pulse := mpulse:handler(), + sidecar => mg_utils:mod_opts(), batching => batching_options(), atom() => any() }. --type options() :: mg_core_utils:mod_opts(storage_options()). +-type options() :: mg_utils:mod_opts(storage_options()). -type batching_options() :: #{ % How many storage requests may be served concurrently at most? @@ -145,12 +145,12 @@ %% --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ - mg_core_utils:apply_mod_opts_if_defined(Options, child_spec, undefined, [storage]), + mg_utils:lists_compact([ + mg_utils:apply_mod_opts_if_defined(Options, child_spec, undefined, [storage]), sidecar_child_spec(Options, sidecar) ]) ). @@ -202,7 +202,7 @@ add_batch_request(Request = {search, _}, Batch) -> -spec run_batch(options(), batch()) -> [{request(), response()}]. run_batch(Options, Batch) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), genlib_pmap:map( fun(Request) -> {Request, do_request(Options, Request)} @@ -224,10 +224,10 @@ construct_pmap_options(Options) -> -spec do_request(options(), request()) -> response(). do_request(Options, Request) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), StartTimestamp = erlang:monotonic_time(), ok = emit_beat_start(Request, StorageOptions), - Result = mg_core_utils:apply_mod_opts(Options, do_request, [Request]), + Result = mg_utils:apply_mod_opts(Options, do_request, [Request]), FinishTimestamp = erlang:monotonic_time(), Duration = FinishTimestamp - StartTimestamp, ok = emit_beat_finish(Request, StorageOptions, Duration), @@ -275,10 +275,10 @@ binary_to_opaque(Binary) -> -spec sidecar_child_spec(options(), term()) -> supervisor:child_spec() | undefined. sidecar_child_spec(Options, ChildID) -> - {_Handler, StorageOptions} = mg_core_utils:separate_mod_opts(Options, #{}), + {_Handler, StorageOptions} = mg_utils:separate_mod_opts(Options, #{}), case maps:find(sidecar, StorageOptions) of {ok, Sidecar} -> - mg_core_utils:apply_mod_opts(Sidecar, child_spec, [Options, ChildID]); + mg_utils:apply_mod_opts(Sidecar, child_spec, [Options, ChildID]); error -> undefined end. @@ -289,40 +289,40 @@ sidecar_child_spec(Options, ChildID) -> -spec emit_beat_start(mg_core_storage:request(), storage_options()) -> ok. emit_beat_start({get, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_get_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_get_start{ name = Name }); emit_beat_start({put, _, _, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_put_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_put_start{ name = Name }); emit_beat_start({search, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_search_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_search_start{ name = Name }); emit_beat_start({delete, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_delete_start{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_delete_start{ name = Name }). -spec emit_beat_finish(mg_core_storage:request(), storage_options(), duration()) -> ok. emit_beat_finish({get, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_get_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_get_finish{ name = Name, duration = Duration }); emit_beat_finish({put, _, _, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_put_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_put_finish{ name = Name, duration = Duration }); emit_beat_finish({search, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_search_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_search_finish{ name = Name, duration = Duration }); emit_beat_finish({delete, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_storage_delete_finish{ + ok = mpulse:handle_beat(Handler, #mg_core_storage_delete_finish{ name = Name, duration = Duration }). diff --git a/apps/mg_core/src/mg_core_storage_memory.erl b/apps/mg_core/src/mg_core_storage_memory.erl index 0c704da5..656634cd 100644 --- a/apps/mg_core/src/mg_core_storage_memory.erl +++ b/apps/mg_core/src/mg_core_storage_memory.erl @@ -36,7 +36,7 @@ %% %% internal API %% --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> gen_server:start_link(reg_name(get_name(Options)), ?MODULE, Options, []). @@ -48,7 +48,7 @@ start_link(Options) -> undefined | #{ name := name(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), existing_storage_name => name(), random_transient_fail => random_fail_policy() }. @@ -92,7 +92,7 @@ get_name(#{name := Name}) -> -type search_result() :: [{mg_core_storage:index_value(), mg_core_storage:key()}] | [mg_core_storage:key()]. --spec init(options()) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(options()) -> mg_utils:gen_server_init_ret(state()). init(Options) -> {ok, #{ options => Options, @@ -100,8 +100,8 @@ init(Options) -> indexes => #{} }}. --spec handle_call(_Call, mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()) | no_return(). +-spec handle_call(_Call, mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()) | no_return(). handle_call({put, Key, Context, Value, IndexesUpdates}, _From, State) -> {Resp, NewState} = do_put(Key, Context, Value, IndexesUpdates, State), {reply, Resp, NewState}; @@ -131,7 +131,7 @@ handle_info(Info, State) -> _ = erlang:exit({'unexpected info received', Info}), {noreply, State}. --spec code_change(_, state(), _) -> mg_core_utils:gen_server_code_change_ret(state()). +-spec code_change(_, state(), _) -> mg_utils:gen_server_code_change_ret(state()). code_change(_, State, _) -> {ok, State}. @@ -380,11 +380,11 @@ start_from_elem(Item, [_ | Tail]) -> %% Registry utils --spec ref(name()) -> mg_core_utils:gen_ref(). +-spec ref(name()) -> mg_utils:gen_ref(). ref(Name) -> {via, gproc, gproc_key(Name)}. --spec reg_name(name()) -> mg_core_utils:gen_reg_name(). +-spec reg_name(name()) -> mg_utils:gen_reg_name(). reg_name(Name) -> {via, gproc, gproc_key(Name)}. diff --git a/apps/mg_core/src/mg_core_worker.erl b/apps/mg_core/src/mg_core_worker.erl index 794d8f03..d58feff5 100644 --- a/apps/mg_core/src/mg_core_worker.erl +++ b/apps/mg_core/src/mg_core_worker.erl @@ -48,8 +48,8 @@ {{reply, _Reply} | noreply, _State}. -type options() :: #{ - worker => mg_core_utils:mod_opts(), - registry => mg_core_procreg:options(), + worker => mg_utils:mod_opts(), + registry => mg_procreg:options(), hibernate_timeout => pos_integer(), unload_timeout => pos_integer(), shutdown_timeout => timeout() @@ -64,7 +64,7 @@ -type call_msg() :: {call, mg_core_deadline:deadline(), call_payload(), req_ctx()}. --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). -define(WRAP_ID(NS, ID), {?MODULE, {NS, ID}}). -define(DEFAULT_SHUTDOWN, brutal_kill). @@ -78,9 +78,9 @@ child_spec(ChildID, Options) -> shutdown => shutdown_timeout(Options, ?DEFAULT_SHUTDOWN) }. --spec start_link(options(), mg_core:ns(), mg_core:id(), req_ctx()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options(), mg_core:ns(), mg_core:id(), req_ctx()) -> mg_utils:gen_start_ret(). start_link(Options, NS, ID, ReqCtx) -> - mg_core_procreg:start_link( + mg_procreg:start_link( procreg_options(Options), ?WRAP_ID(NS, ID), ?MODULE, @@ -98,13 +98,13 @@ start_link(Options, NS, ID, ReqCtx) -> pulse() ) -> _Result | {error, _}. call(Options, NS, ID, Call, ReqCtx, Deadline, Pulse) -> - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_worker_call_attempt{ + ok = mpulse:handle_beat(Pulse, #mg_core_worker_call_attempt{ namespace = NS, machine_id = ID, request_context = ReqCtx, deadline = Deadline }), - mg_core_procreg:call( + mg_procreg:call( procreg_options(Options), ?WRAP_ID(NS, ID), {call, Deadline, Call, ReqCtx}, @@ -114,7 +114,7 @@ call(Options, NS, ID, Call, ReqCtx, Deadline, Pulse) -> %% for testing -spec brutal_kill(options(), mg_core:ns(), mg_core:id()) -> ok. brutal_kill(Options, NS, ID) -> - case mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)) of + case mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)) of undefined -> ok; Pid -> @@ -130,8 +130,8 @@ reply(CallCtx, Reply) -> -spec get_call_queue(options(), mg_core:ns(), mg_core:id()) -> [_Call]. get_call_queue(Options, NS, ID) -> - Pid = mg_core_utils:exit_if_undefined( - mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), + Pid = mg_utils:exit_if_undefined( + mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), noproc ), [Call || {'$gen_call', _, {call, _Deadline, Call, _ReqCtx}} <- get_call_messages(Pid)]. @@ -143,15 +143,15 @@ get_call_messages(Pid) -> -spec is_alive(options(), mg_core:ns(), mg_core:id()) -> boolean(). is_alive(Options, NS, ID) -> - Pid = mg_core_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), + Pid = mg_utils:gen_reg_name_to_pid(self_ref(Options, NS, ID)), Pid =/= undefined andalso erlang:is_process_alive(Pid). % TODO nonuniform interface --spec list(mg_core_procreg:options(), mg_core:ns()) -> [{mg_core:ns(), mg_core:id(), pid()}]. +-spec list(mg_procreg:options(), mg_core:ns()) -> [{mg_core:ns(), mg_core:id(), pid()}]. list(Procreg, NS) -> [ {NS, ID, Pid} - || {?WRAP_ID(_, ID), Pid} <- mg_core_procreg:select(Procreg, ?WRAP_ID(NS, '$1')) + || {?WRAP_ID(_, ID), Pid} <- mg_procreg:select(Procreg, ?WRAP_ID(NS, '$1')) ]. %% @@ -167,12 +167,12 @@ list(Procreg, NS) -> unload_timeout => timeout() }. --spec init(_) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(_) -> mg_utils:gen_server_init_ret(state()). init({ID, Options = #{worker := WorkerModOpts}, ReqCtx}) -> _ = process_flag(trap_exit, true), HibernateTimeout = maps:get(hibernate_timeout, Options, 5 * 1000), UnloadTimeout = maps:get(unload_timeout, Options, 60 * 1000), - {Mod, Args} = mg_core_utils:separate_mod_opts(WorkerModOpts), + {Mod, Args} = mg_utils:separate_mod_opts(WorkerModOpts), State = #{ id => ID, mod => Mod, @@ -183,8 +183,8 @@ init({ID, Options = #{worker := WorkerModOpts}, ReqCtx}) -> }, {ok, schedule_unload_timer(State)}. --spec handle_call(call_msg(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(call_msg(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). % загрузка делается отдельно и лениво, чтобы не блокировать этим супервизор, % т.к. у него легко может начать расти очередь @@ -225,12 +225,12 @@ handle_call(Call, From, State) -> ok = logger:error("unexpected gen_server call received: ~p from ~p", [Call, From]), {noreply, State, hibernate_timeout(State)}. --spec handle_cast(_Cast, state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(_Cast, state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast(Cast, State) -> ok = logger:error("unexpected gen_server cast received: ~p", [Cast]), {noreply, State, hibernate_timeout(State)}. --spec handle_info(_Info, state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(_Info, state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info(timeout, State) -> {noreply, State, hibernate}; handle_info( @@ -251,7 +251,7 @@ handle_info(Info, State) -> ok = logger:error("unexpected gen_server info ~p", [Info]), {noreply, State, hibernate_timeout(State)}. --spec code_change(_, state(), _) -> mg_core_utils:gen_server_code_change_ret(state()). +-spec code_change(_, state(), _) -> mg_utils:gen_server_code_change_ret(state()). code_change(_, State, _) -> {ok, State}. @@ -300,10 +300,10 @@ schedule_unload_timer(State = #{unload_tref := UnloadTRef}) -> start_timer(State) -> erlang:start_timer(unload_timeout(State), erlang:self(), unload). --spec self_ref(options(), mg_core:ns(), mg_core:id()) -> mg_core_procreg:ref(). +-spec self_ref(options(), mg_core:ns(), mg_core:id()) -> mg_procreg:ref(). self_ref(Options, NS, ID) -> - mg_core_procreg:ref(procreg_options(Options), ?WRAP_ID(NS, ID)). + mg_procreg:ref(procreg_options(Options), ?WRAP_ID(NS, ID)). --spec procreg_options(options()) -> mg_core_procreg:options(). +-spec procreg_options(options()) -> mg_procreg:options(). procreg_options(#{registry := ProcregOptions}) -> ProcregOptions. diff --git a/apps/mg_core/src/mg_core_workers_manager.erl b/apps/mg_core/src/mg_core_workers_manager.erl index aeaa1a8f..9953a171 100644 --- a/apps/mg_core/src/mg_core_workers_manager.erl +++ b/apps/mg_core/src/mg_core_workers_manager.erl @@ -46,28 +46,28 @@ %% fixed for name and pulse, registry and worker_options -type options() :: #{ name := name(), - pulse := mg_core_pulse:handler(), - registry := mg_core_procreg:options(), + pulse := mpulse:handler(), + registry := mg_procreg:options(), message_queue_len_limit => queue_limit(), % all but `registry` worker_options := mg_core_worker:options(), - sidecar => mg_core_utils:mod_opts() + sidecar => mg_utils:mod_opts() }. -type queue_limit() :: non_neg_integer(). -type ns_options() :: #{ - registry => mg_core_procreg:options(), + registry => mg_procreg:options(), message_queue_len_limit => queue_limit(), % all but `registry` worker_options => mg_core_worker:options(), - sidecar => mg_core_utils:mod_opts() + sidecar => mg_utils:mod_opts() }. %% Internal types -type id() :: mg_core:id(). -type name() :: mg_core:ns(). -type req_ctx() :: mg_core:request_context(). --type gen_ref() :: mg_core_utils:gen_ref(). +-type gen_ref() :: mg_utils:gen_ref(). -type maybe(T) :: T | undefined. -type deadline() :: mg_core_deadline:deadline(). @@ -87,11 +87,11 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => rest_for_one}, - mg_core_utils:lists_compact([ + mg_utils:lists_compact([ manager_child_spec(Options), sidecar_child_spec(Options) ]) @@ -112,7 +112,7 @@ manager_child_spec(Options) -> -spec sidecar_child_spec(options()) -> supervisor:child_spec() | undefined. sidecar_child_spec(#{sidecar := Sidecar} = Options) -> - mg_core_utils:apply_mod_opts(Sidecar, child_spec, [Options, sidecar]); + mg_utils:apply_mod_opts(Sidecar, child_spec, [Options, sidecar]); sidecar_child_spec(#{}) -> undefined. @@ -219,8 +219,8 @@ start_child(Options, ID, ReqCtx) -> SelfRef = self_ref(Options), #{name := Name, pulse := Pulse} = Options, MsgQueueLimit = message_queue_len_limit(Options), - MsgQueueLen = mg_core_utils:msg_queue_len(SelfRef), - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_worker_start_attempt{ + MsgQueueLen = mg_utils:msg_queue_len(SelfRef), + ok = mpulse:handle_beat(Pulse, #mg_core_worker_start_attempt{ namespace = Name, machine_id = ID, request_context = ReqCtx, @@ -251,7 +251,7 @@ message_queue_len_limit(Options) -> self_ref(Options) -> {via, gproc, gproc_key(Options)}. --spec self_reg_name(options()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(options()) -> mg_utils:gen_reg_name(). self_reg_name(Options) -> {via, gproc, gproc_key(Options)}. diff --git a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl index 05e7d0b3..76042066 100644 --- a/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_continuation_retry_SUITE.erl @@ -119,7 +119,7 @@ increment_fail_count() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -132,7 +132,7 @@ automaton_options() -> namespace => ?MH_NS, processor => ?MODULE, storage => mg_core_storage_memory, - worker => #{registry => mg_core_procreg_global}, + worker => #{registry => mg_procreg_global}, pulse => ?MODULE, notification => #{ namespace => ?MH_NS, @@ -144,6 +144,6 @@ automaton_options() -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_events_machine_SUITE.erl b/apps/mg_core/test/mg_core_events_machine_SUITE.erl index 1e43fa67..221dc45a 100644 --- a/apps/mg_core/test/mg_core_events_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_machine_SUITE.erl @@ -35,8 +35,8 @@ -export_type([options/0]). -export([process_signal/4, process_call/4, process_repair/4]). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% mg_core_storage callbacks @@ -398,7 +398,7 @@ start_automaton(ProcessorOptions, NS) -> -spec start_automaton(mg_core_events_machine:options()) -> {pid(), mg_core_events_machine:options()}. start_automaton(Options) -> - {mg_core_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. + {mg_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -439,7 +439,7 @@ events_machine_options(Base, StorageOptions, ProcessorOptions, NS) -> namespace => NS, storage => mg_cth:build_storage(NS, Storage), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -570,7 +570,7 @@ decode(Value) -> -include("pulse.hrl"). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat = #mg_core_machine_lifecycle_failed{}) -> ct:pal("~p", [Beat]); handle_beat(_, Beat = #mg_core_machine_lifecycle_transient_error{}) -> diff --git a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl index d241b726..55c38a1d 100644 --- a/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_modernizer_SUITE.erl @@ -175,7 +175,7 @@ start_automaton(ProcessorOptions, NS) -> -spec start_automaton(mg_core_events_machine:options()) -> {pid(), mg_core_events_machine:options()}. start_automaton(Options) -> - {mg_core_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. + {mg_utils:throw_if_error(mg_core_events_machine:start_link(Options)), Options}. -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -195,7 +195,7 @@ events_machine_options(ProcessorOptions, NS) -> namespace => NS, storage => mg_cth:build_storage(NS, Storage), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -281,7 +281,7 @@ decode(Value) -> -include("pulse.hrl"). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat = #mg_core_machine_lifecycle_failed{}) -> ct:pal("~p", [Beat]); handle_beat(_, Beat = #mg_core_machine_lifecycle_transient_error{}) -> diff --git a/apps/mg_core/test/mg_core_events_stash_SUITE.erl b/apps/mg_core/test/mg_core_events_stash_SUITE.erl index bd685e26..5e929270 100644 --- a/apps/mg_core/test/mg_core_events_stash_SUITE.erl +++ b/apps/mg_core/test/mg_core_events_stash_SUITE.erl @@ -16,8 +16,8 @@ -behaviour(mg_core_events_machine). -export([process_signal/4, process_call/4, process_repair/4]). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% Pulse @@ -211,7 +211,7 @@ dummy_sink_handler(_Events) -> %% Pulse handler --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). @@ -219,7 +219,7 @@ handle_beat(_, Beat) -> -spec start_automaton(options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( mg_core_events_machine:start_link(events_machine_options(Options)) ). @@ -231,7 +231,7 @@ stop_automaton(Pid) -> -spec events_machine_options(options()) -> mg_core_events_machine:options(). events_machine_options(Options) -> NS = maps:get(namespace, Options), - Scheduler = #{registry => mg_core_procreg_global, interval => 100}, + Scheduler = #{registry => mg_procreg_global, interval => 100}, #{ namespace => NS, processor => maps:get(processor, Options), @@ -242,7 +242,7 @@ events_machine_options(Options) -> existing_storage_name => ?MODULE }}, worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, diff --git a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl index 8f46a688..36ae60d5 100644 --- a/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl +++ b/apps/mg_core/test/mg_core_instant_timer_task_SUITE.erl @@ -187,7 +187,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -204,7 +204,7 @@ automaton_options(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -226,7 +226,7 @@ automaton_options_wo_shedulers(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -239,6 +239,6 @@ automaton_options_wo_shedulers(NS) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl index d91407c3..4e4e852a 100644 --- a/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl +++ b/apps/mg_core/test/mg_core_internal_events_logging_SUITE.erl @@ -113,7 +113,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -127,7 +127,7 @@ automaton_options(NS) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -145,7 +145,7 @@ automaton_options(NS) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> no_return(). +-spec handle_beat(_, mpulse:beat()) -> no_return(). handle_beat(_, _Event) -> erlang:error(logging_oops). diff --git a/apps/mg_core/test/mg_core_interrupted_SUITE.erl b/apps/mg_core/test/mg_core_interrupted_SUITE.erl index 4ebbcbbf..1c15c5f8 100644 --- a/apps/mg_core/test/mg_core_interrupted_SUITE.erl +++ b/apps/mg_core/test/mg_core_interrupted_SUITE.erl @@ -147,7 +147,7 @@ process_machine(_, _, {call, answer}, _, ?REQ_CTX, _, State) -> %% -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -169,7 +169,7 @@ automaton_options(NS, StorageName) -> }} ), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -182,7 +182,7 @@ automaton_options(NS, StorageName) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, {squad, _}) -> ok; handle_beat(_, Beat) -> diff --git a/apps/mg_core/test/mg_core_machine_SUITE.erl b/apps/mg_core/test/mg_core_machine_SUITE.erl index e7f0253c..10c76964 100644 --- a/apps/mg_core/test/mg_core_machine_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_SUITE.erl @@ -78,9 +78,9 @@ end_per_suite(C) -> -spec init_per_group(group_name(), config()) -> config(). init_per_group(with_gproc, C) -> - [{registry, mg_core_procreg_gproc} | C]; + [{registry, mg_procreg_gproc} | C]; init_per_group(with_global, C) -> - [{registry, mg_core_procreg_global} | C]; + [{registry, mg_procreg_global} | C]; init_per_group(base, C) -> C. @@ -218,7 +218,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -249,6 +249,6 @@ automaton_options(C) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl index 94859fcc..2ed26d6a 100644 --- a/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_full_test_SUITE.erl @@ -294,7 +294,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -310,7 +310,7 @@ automaton_options() -> storage => mg_core_storage_memory, worker => #{ %% Use 'global' process registry - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -324,7 +324,7 @@ automaton_options() -> lists_random(List) -> lists:nth(rand:uniform(length(List)), List). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(Options, Beat) -> ok = mg_core_pulse_otel:handle_beat(Options, Beat), %% NOTE для отладки может понадобится diff --git a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl index 3c539813..999d350d 100644 --- a/apps/mg_core/test/mg_core_machine_notification_SUITE.erl +++ b/apps/mg_core/test/mg_core_machine_notification_SUITE.erl @@ -279,7 +279,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -296,7 +296,7 @@ automaton_options(_C) -> processor => ?MODULE, storage => mg_core_storage_memory, worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => notification_options(), notification_processing_timeout => 500, @@ -320,6 +320,6 @@ notification_options() -> storage => mg_core_storage_memory }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_notification_SUITE.erl b/apps/mg_core/test/mg_core_notification_SUITE.erl index 421116ae..007a2b6d 100644 --- a/apps/mg_core/test/mg_core_notification_SUITE.erl +++ b/apps/mg_core/test/mg_core_notification_SUITE.erl @@ -191,6 +191,6 @@ pass_saved_cfg(C) -> get_cfg(Key, C) -> test_server:lookup_config(Key, C). --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_storages_SUITE.erl b/apps/mg_core/test/mg_core_storages_SUITE.erl index b2599761..04e66738 100644 --- a/apps/mg_core/test/mg_core_storages_SUITE.erl +++ b/apps/mg_core/test/mg_core_storages_SUITE.erl @@ -40,10 +40,6 @@ -export([indexes_test_with_limits/1]). -export([stress_test/1]). --export([riak_pool_stable_test/1]). --export([riak_pool_overload_test/1]). --export([riak_pool_misbehaving_connection_test/1]). - -export([handle_beat/2]). %% @@ -56,21 +52,13 @@ -spec all() -> [test_name() | {group, group_name()}]. all() -> [ - {group, memory}, - {group, riak} + {group, memory} ]. -spec groups() -> [{group_name(), list(_), [test_name()]}]. groups() -> [ - {memory, [], tests()}, - {riak, [], - tests() ++ - [ - riak_pool_stable_test, - riak_pool_overload_test, - riak_pool_misbehaving_connection_test - ]} + {memory, [], tests()} ]. -spec tests() -> [test_name()]. @@ -91,9 +79,7 @@ tests() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({riakc_pb_socket, 'get_index_eq', '_'}, x), - % dbg:tpl({riakc_pb_socket, 'get_index_range', '_'}, x), - Apps = mg_cth:start_applications([msgpack, gproc, riakc, pooler]), + Apps = mg_cth:start_applications([msgpack, gproc]), [{apps, Apps} | C]. -spec end_per_suite(config()) -> ok. @@ -365,146 +351,16 @@ stop_wait(Pid, Reason, Timeout) -> %% --spec riak_pool_stable_test(_C) -> ok. -riak_pool_stable_test(_C) -> - Namespace = <<"riak_pool_stable_test">>, - InitialCount = 1, - RequestCount = 10, - Options = riak_options(Namespace, #{ - init_count => InitialCount, - max_count => RequestCount div 2, - idle_timeout => 1000, - cull_interval => 1000, - queue_max => RequestCount * 2 - }), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - % Run multiple requests concurrently - _ = genlib_pmap:map( - fun(N) -> - base_test(genlib:to_binary(N), Storage) - end, - lists:seq(1, RequestCount) - ), - - % Give pool 3 seconds to get back to initial state - ok = timer:sleep(3000), - - {ok, Utilization} = mg_core_storage_riak:pool_utilization(Options), - ?assertMatch( - #{ - in_use_count := 0, - free_count := InitialCount - }, - maps:from_list(Utilization) - ), - - ok = stop_storage(Pid). - --spec riak_pool_overload_test(_C) -> ok. -riak_pool_overload_test(_C) -> - Namespace = <<"riak_pool_overload_test">>, - RequestCount = 40, - Options = riak_options( - Namespace, - #{ - init_count => 1, - max_count => 4, - queue_max => RequestCount div 4 - } - ), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - ?assertThrow( - {transient, {storage_unavailable, no_pool_members}}, - genlib_pmap:map( - fun(N) -> - base_test(genlib:to_binary(N), Storage) - end, - lists:seq(1, RequestCount) - ) - ), - - ok = stop_storage(Pid). - --spec riak_pool_misbehaving_connection_test(_C) -> ok. -riak_pool_misbehaving_connection_test(_C) -> - Namespace = <<"riak_pool_overload_test">>, - WorkersCount = 4, - RequestCount = 4, - Options = riak_options( - Namespace, - #{ - init_count => 1, - max_count => WorkersCount div 2, - queue_max => WorkersCount * 2 - } - ), - Storage = {mg_core_storage_riak, Options}, - Pid = start_storage(Storage), - - _ = genlib_pmap:map( - fun(RequestID) -> - Key = genlib:to_binary(RequestID), - case RequestID of - N when (N rem WorkersCount) == (N div WorkersCount) -> - % Ensure that request fails occasionally... - ?assertThrow( - {transient, {storage_unavailable, _}}, - mg_core_storage:put(Storage, Key, <<"NOTACONTEXT">>, <<>>, []) - ); - _ -> - % ...And it will not affect any concurrently running requests. - ?assertEqual( - undefined, - mg_core_storage:get(Storage, Key) - ) - end - end, - lists:seq(1, RequestCount * WorkersCount), - #{proc_limit => WorkersCount} - ), - - ok = stop_storage(Pid). - -%% - -spec storage_options(atom(), binary()) -> mg_core_storage:options(). -storage_options(riak, Namespace) -> - {mg_core_storage_riak, - riak_options( - Namespace, - #{ - init_count => 1, - max_count => 10, - idle_timeout => 1000, - cull_interval => 1000, - auto_grow_threshold => 5, - queue_max => 100 - } - )}; storage_options(memory, _) -> {mg_core_storage_memory, #{ pulse => ?MODULE, name => storage }}. --spec riak_options(mg_core:ns(), map()) -> mg_core_storage_riak:options(). -riak_options(Namespace, PoolOptions) -> - #{ - name => storage, - pulse => ?MODULE, - host => "riakdb", - port => 8087, - bucket => Namespace, - pool_options => PoolOptions - }. - -spec start_storage(mg_core_storage:options()) -> pid(). start_storage(Options) -> - mg_core_utils:throw_if_error( + mg_utils:throw_if_error( genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, [mg_core_storage:child_spec(Options, storage)] @@ -516,6 +372,6 @@ stop_storage(Pid) -> ok = proc_lib:stop(Pid, normal, 5000), ok. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl index 82ffb634..799f9a35 100644 --- a/apps/mg_core/test/mg_core_timer_retry_SUITE.erl +++ b/apps/mg_core/test/mg_core_timer_retry_SUITE.erl @@ -183,7 +183,7 @@ start() -> -spec start_automaton(mg_core_machine:options()) -> pid(). start_automaton(Options) -> - mg_core_utils:throw_if_error(mg_core_machine:start_link(Options)). + mg_utils:throw_if_error(mg_core_machine:start_link(Options)). -spec stop_automaton(pid()) -> ok. stop_automaton(Pid) -> @@ -201,7 +201,7 @@ automaton_options(NS, RetryPolicy) -> processor => ?MODULE, storage => mg_cth:build_storage(NS, mg_core_storage_memory), worker => #{ - registry => mg_core_procreg_global + registry => mg_procreg_global }, notification => #{ namespace => NS, @@ -219,7 +219,7 @@ automaton_options(NS, RetryPolicy) -> } }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_workers_SUITE.erl b/apps/mg_core/test/mg_core_workers_SUITE.erl index 5e739650..44dfa142 100644 --- a/apps/mg_core/test/mg_core_workers_SUITE.erl +++ b/apps/mg_core/test/mg_core_workers_SUITE.erl @@ -110,7 +110,7 @@ end_per_suite(C) -> -spec init_per_group(group_name(), config()) -> config(). init_per_group(with_global, C) -> [ - {registry, mg_core_procreg_global}, + {registry, mg_procreg_global}, {load_pressure, 100}, {runner_retry_strategy, #{ noproc => genlib_retry:linear(3, 100), @@ -120,7 +120,7 @@ init_per_group(with_global, C) -> ]; init_per_group(with_gproc, C) -> [ - {registry, mg_core_procreg_gproc}, + {registry, mg_procreg_gproc}, {load_pressure, 100}, {runner_retry_strategy, #{ noproc => genlib_retry:linear(3, 100), @@ -547,7 +547,7 @@ try_unlink(#{}) -> %% -spec start_workers(_Options) -> pid(). start_workers(Options) -> - mg_core_utils:throw_if_error(mg_core_workers_manager:start_link(Options)). + mg_utils:throw_if_error(mg_core_workers_manager:start_link(Options)). -spec stop_workers(pid()) -> ok. stop_workers(Pid) -> diff --git a/apps/mg_cth/src/mg_cth.app.src b/apps/mg_cth/src/mg_cth.app.src index 9d35b704..a73f6609 100644 --- a/apps/mg_cth/src/mg_cth.app.src +++ b/apps/mg_cth/src/mg_cth.app.src @@ -6,6 +6,9 @@ kernel, stdlib, genlib, + mg_utils, + mg_core, + mg_es_kafka, mg_woody ]}, {env, []}, diff --git a/apps/mg_cth/src/mg_cth.erl b/apps/mg_cth/src/mg_cth.erl index b27e8aac..3e36a08c 100644 --- a/apps/mg_cth/src/mg_cth.erl +++ b/apps/mg_cth/src/mg_cth.erl @@ -132,7 +132,7 @@ assert_wait_expected(Expected, Fun, Strategy) when is_function(Fun, 0) -> end end. --spec build_storage(mg_core:ns(), mg_core_utils:mod_opts()) -> mg_core_utils:mod_opts(). +-spec build_storage(mg_core:ns(), mg_utils:mod_opts()) -> mg_utils:mod_opts(). build_storage(NS, Module) when is_atom(Module) -> build_storage(NS, {Module, #{}}); build_storage(NS, {Module, Options}) -> diff --git a/apps/mg_cth/src/mg_cth_conf.erl b/apps/mg_cth/src/mg_cth_conf.erl new file mode 100644 index 00000000..b87e4e86 --- /dev/null +++ b/apps/mg_cth/src/mg_cth_conf.erl @@ -0,0 +1,15 @@ +-module(mg_cth_conf). + +-export([construct_child_specs/1]). + +-type config() :: #{ + woody_server := mg_woody:woody_server(), + namespaces := mg_conf:namespaces(), + quotas => [mg_skd_quota_worker:options()] +}. + +-spec construct_child_specs(config() | undefined) -> _. +construct_child_specs(undefined) -> + []; +construct_child_specs(Config) -> + mg_conf:construct_child_specs(Config#{pulse => mg_cth_pulse}, []). diff --git a/apps/mg_cth/src/mg_cth_configurator.erl b/apps/mg_cth/src/mg_cth_configurator.erl deleted file mode 100644 index 8e8a4ba8..00000000 --- a/apps/mg_cth/src/mg_cth_configurator.erl +++ /dev/null @@ -1,184 +0,0 @@ --module(mg_cth_configurator). - --export([construct_child_specs/1]). - --type modernizer() :: #{ - current_format_version := mg_core_events:format_version(), - handler := mg_woody_modernizer:options() -}. - --type events_machines() :: #{ - processor := processor(), - modernizer => modernizer(), - % all but `worker_options.worker` option - worker => mg_core_workers_manager:ns_options(), - storage := mg_core_machine:storage_options(), - event_sinks => [mg_core_events_sink:handler()], - retries := mg_core_machine:retry_opt(), - schedulers := mg_core_machine:schedulers_opt(), - default_processing_timeout := timeout(), - suicide_probability => mg_core_machine:suicide_probability(), - event_stash_size := non_neg_integer() -}. - --type config() :: #{ - woody_server := mg_woody:woody_server(), - namespaces := #{mg_core:ns() => events_machines()}, - quotas => [mg_core_quota_worker:options()] -}. - --type processor() :: mg_woody_processor:options(). - --spec construct_child_specs(config() | undefined) -> _. -construct_child_specs(undefined) -> - []; -construct_child_specs(#{woody_server := WoodyServer, namespaces := Namespaces} = Config) -> - Quotas = maps:get(quotas, Config, []), - - QuotasChSpec = quotas_child_specs(Quotas, quota), - EventMachinesChSpec = events_machines_child_specs(Namespaces), - WoodyServerChSpec = mg_woody:child_spec( - woody_server, - #{ - woody_server => WoodyServer, - automaton => api_automaton_options(Namespaces), - pulse => mg_cth_pulse - } - ), - - lists:flatten([ - WoodyServerChSpec, - QuotasChSpec, - EventMachinesChSpec - ]). - -%% - --spec quotas_child_specs(_, atom()) -> [supervisor:child_spec()]. -quotas_child_specs(Quotas, ChildID) -> - [ - mg_core_quota_worker:child_spec(Options, {ChildID, maps:get(name, Options)}) - || Options <- Quotas - ]. - --spec events_machines_child_specs(_) -> [supervisor:child_spec()]. -events_machines_child_specs(NSs) -> - [ - mg_core_events_machine:child_spec(events_machine_options(NS, NSs), binary_to_atom(NS, utf8)) - || NS <- maps:keys(NSs) - ]. - --spec events_machine_options(mg_core:ns(), _) -> mg_core_events_machine:options(). -events_machine_options(NS, NSs) -> - NSConfigs = maps:get(NS, NSs), - #{processor := ProcessorConfig, storage := Storage} = NSConfigs, - EventSinks = [event_sink_options(SinkConfig) || SinkConfig <- maps:get(event_sinks, NSConfigs, [])], - EventsStorage = sub_storage_options(<<"events">>, Storage), - #{ - namespace => NS, - processor => processor(ProcessorConfig), - machines => machine_options(NS, NSConfigs), - events_storage => EventsStorage, - event_sinks => EventSinks, - pulse => pulse(), - default_processing_timeout => maps:get(default_processing_timeout, NSConfigs), - event_stash_size => maps:get(event_stash_size, NSConfigs, 0) - }. - --spec machine_options(mg_core:ns(), events_machines()) -> mg_core_machine:options(). -machine_options(NS, Config) -> - #{storage := Storage} = Config, - Options = maps:with( - [ - retries, - timer_processing_timeout - ], - Config - ), - MachinesStorage = sub_storage_options(<<"machines">>, Storage), - NotificationsStorage = sub_storage_options(<<"notifications">>, Storage), - Options#{ - namespace => NS, - storage => MachinesStorage, - worker => worker_manager_options(Config), - schedulers => maps:get(schedulers, Config, #{}), - pulse => pulse(), - notification => #{ - namespace => NS, - pulse => pulse(), - storage => NotificationsStorage - }, - % TODO сделать аналогично в event_sink'е и тэгах - suicide_probability => maps:get(suicide_probability, Config, undefined) - }. - --spec api_automaton_options(_) -> mg_woody_automaton:options(). -api_automaton_options(NSs) -> - maps:fold( - fun(NS, ConfigNS, Options) -> - Options#{ - NS => maps:merge( - #{ - machine => events_machine_options(NS, NSs) - }, - modernizer_options(maps:get(modernizer, ConfigNS, undefined)) - ) - } - end, - #{}, - NSs - ). - --spec event_sink_options(mg_core_events_sink:handler()) -> mg_core_events_sink:handler(). -event_sink_options({mg_core_events_sink_kafka, EventSinkConfig}) -> - {mg_core_events_sink_kafka, EventSinkConfig#{ - pulse => pulse(), - encoder => fun mg_woody_event_sink:serialize/3 - }}. - --spec worker_manager_options(map()) -> mg_core_workers_manager:ns_options(). -worker_manager_options(Config) -> - maps:merge( - #{ - %% Use 'global' process registry - registry => mg_core_procreg_global, - sidecar => mg_cth_worker - }, - maps:get(worker, Config, #{}) - ). - --spec processor(processor()) -> mg_core_utils:mod_opts(). -processor(Processor) -> - {mg_woody_processor, Processor#{event_handler => {mg_woody_event_handler, pulse()}}}. - --spec sub_storage_options(mg_core:ns(), mg_core_machine:storage_options()) -> - mg_core_machine:storage_options(). -sub_storage_options(SubNS, Storage0) -> - Storage1 = mg_core_utils:separate_mod_opts(Storage0, #{}), - Storage2 = add_bucket_postfix(SubNS, Storage1), - Storage2. - --spec add_bucket_postfix(mg_core:ns(), mg_core_storage:options()) -> mg_core_storage:options(). -add_bucket_postfix(_, {mg_core_storage_memory, _} = Storage) -> - Storage; -add_bucket_postfix(SubNS, {mg_core_storage_riak, #{bucket := Bucket} = Options}) -> - {mg_core_storage_riak, Options#{bucket := mg_core_utils:concatenate_namespaces(Bucket, SubNS)}}. - --spec pulse() -> mg_core_pulse:handler(). -pulse() -> - mg_cth_pulse. - --spec modernizer_options(modernizer() | undefined) -> - #{modernizer => mg_core_events_modernizer:options()}. -modernizer_options(#{current_format_version := CurrentFormatVersion, handler := WoodyClient}) -> - #{ - modernizer => #{ - current_format_version => CurrentFormatVersion, - handler => - {mg_woody_modernizer, WoodyClient#{ - event_handler => {mg_woody_event_handler, pulse()} - }} - } - }; -modernizer_options(undefined) -> - #{}. diff --git a/apps/mg_cth/src/mg_cth_processor.erl b/apps/mg_cth/src/mg_cth_processor.erl index 4c58339d..6c7d235b 100644 --- a/apps/mg_cth/src/mg_cth_processor.erl +++ b/apps/mg_cth/src/mg_cth_processor.erl @@ -103,7 +103,7 @@ start_link(ID, {Host, Port}, Options, MgConfig) -> ) } ) - | mg_cth_configurator:construct_child_specs(MgConfig) + | mg_cth_conf:construct_child_specs(MgConfig) ], case genlib_adhoc_supervisor:start_link(Flags, ChildsSpecs) of {ok, SupPid} -> diff --git a/apps/mg_cth/src/mg_cth_pulse.erl b/apps/mg_cth/src/mg_cth_pulse.erl index f0ed2789..36998506 100644 --- a/apps/mg_cth/src/mg_cth_pulse.erl +++ b/apps/mg_cth/src/mg_cth_pulse.erl @@ -16,8 +16,8 @@ -module(mg_cth_pulse). -%% mg_pulse handler --behaviour(mg_core_pulse). +%% mpulse handler +-behaviour(mpulse). -export([handle_beat/2]). %% diff --git a/apps/mg_es_kafka/include/pulse.hrl b/apps/mg_es_kafka/include/pulse.hrl new file mode 100644 index 00000000..e13e291c --- /dev/null +++ b/apps/mg_es_kafka/include/pulse.hrl @@ -0,0 +1,17 @@ +%% Events sink operations + +-record(mg_event_sink_kafka_sent, { + name :: atom(), + namespace :: mg_core:ns(), + machine_id :: mg_core:id(), + request_context :: mg_core:request_context(), + deadline :: mg_core_deadline:deadline(), + % in native units + encode_duration :: non_neg_integer(), + % in native units + send_duration :: non_neg_integer(), + % in bytes + data_size :: non_neg_integer(), + partition :: brod:partition(), + offset :: brod:offset() +}). diff --git a/apps/mg_es_kafka/rebar.config b/apps/mg_es_kafka/rebar.config new file mode 100644 index 00000000..4853489b --- /dev/null +++ b/apps/mg_es_kafka/rebar.config @@ -0,0 +1,4 @@ +{deps, [ + {brod, "3.16.1"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_es_kafka/src/mg_es_kafka.app.src b/apps/mg_es_kafka/src/mg_es_kafka.app.src new file mode 100644 index 00000000..9f2e6f46 --- /dev/null +++ b/apps/mg_es_kafka/src/mg_es_kafka.app.src @@ -0,0 +1,18 @@ +{application, mg_es_kafka, [ + {description, "Event sink kafka implementation"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + brod, + mg_core, + prometheus + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_events_sink_kafka.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl similarity index 96% rename from apps/mg_core/src/mg_core_events_sink_kafka.erl rename to apps/mg_es_kafka/src/mg_event_sink_kafka.erl index 3df114b3..42f3fb29 100644 --- a/apps/mg_core/src/mg_core_events_sink_kafka.erl +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka.erl @@ -14,12 +14,12 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka). +-module(mg_event_sink_kafka). --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). -%% mg_core_events_sink handler --behaviour(mg_core_events_sink). +%% mg_core_event_sink handler +-behaviour(mg_core_event_sink). -export([add_events/6]). %% Types @@ -28,7 +28,7 @@ name := atom(), topic := brod:topic(), client := brod:client(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), encoder := encoder() }. @@ -53,7 +53,7 @@ add_events(Options, NS, MachineID, Events, ReqCtx, Deadline) -> EncodeTimestamp = erlang:monotonic_time(), {ok, Partition, Offset} = produce(Client, Topic, event_key(NS, MachineID), Batch), FinishTimestamp = erlang:monotonic_time(), - ok = mg_core_pulse:handle_beat(Pulse, #mg_core_events_sink_kafka_sent{ + ok = mpulse:handle_beat(Pulse, #mg_event_sink_kafka_sent{ name = Name, namespace = NS, machine_id = MachineID, diff --git a/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl new file mode 100644 index 00000000..1da1099e --- /dev/null +++ b/apps/mg_es_kafka/src/mg_event_sink_kafka_prometheus_pulse.erl @@ -0,0 +1,95 @@ +-module(mg_event_sink_kafka_prometheus_pulse). + +-include_lib("mg_es_kafka/include/pulse.hrl"). + +-export([setup/0]). + +%% mpulse handler +-behaviour(mpulse). +-export([handle_beat/2]). + +%% internal types +-type beat() :: #mg_event_sink_kafka_sent{} | mg_core:beat(). +-type options() :: #{}. +-type metric_name() :: prometheus_metric:name(). +-type metric_label_value() :: term(). + +%% +%% mg_pulse handler +%% + +-spec handle_beat(options(), beat()) -> ok. +handle_beat(_Options, Beat) -> + ok = dispatch_metrics(Beat). + +%% +%% management API +%% + +%% Sets all metrics up. Call this when the app starts. +-spec setup() -> ok. +setup() -> + %% Event sink / kafka + true = prometheus_counter:declare([ + {name, mg_events_sink_produced_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of Machinegun event sink events."} + ]), + true = prometheus_histogram:declare([ + {name, mg_events_sink_kafka_produced_duration_seconds}, + {registry, registry()}, + {labels, [namespace, name, action]}, + {buckets, duration_buckets()}, + {duration_unit, seconds}, + {help, "Machinegun event sink addition duration."} + ]), + ok. + +%% Internals + +-spec dispatch_metrics(beat()) -> ok. +% Event sink operations +dispatch_metrics(#mg_event_sink_kafka_sent{ + name = Name, + namespace = NS, + encode_duration = EncodeDuration, + send_duration = SendDuration +}) -> + ok = inc(mg_events_sink_produced_total, [NS, Name]), + ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, encode], EncodeDuration), + ok = observe(mg_events_sink_kafka_produced_duration_seconds, [NS, Name, send], SendDuration); +% Unknown +dispatch_metrics(_Beat) -> + ok. + +-spec inc(metric_name(), [metric_label_value()]) -> ok. +inc(Name, Labels) -> + _ = prometheus_counter:inc(registry(), Name, Labels, 1), + ok. + +-spec observe(metric_name(), [metric_label_value()], number()) -> ok. +observe(Name, Labels, Value) -> + _ = prometheus_histogram:observe(registry(), Name, Labels, Value), + ok. + +-spec registry() -> prometheus_registry:registry(). +registry() -> + default. + +-spec duration_buckets() -> [number()]. +duration_buckets() -> + [ + 0.001, + 0.005, + 0.010, + 0.025, + 0.050, + 0.100, + 0.250, + 0.500, + 1, + 2.5, + 5, + 10 + ]. diff --git a/apps/mg_core/test/mg_core_events_sink_kafka_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl similarity index 94% rename from apps/mg_core/test/mg_core_events_sink_kafka_SUITE.erl rename to apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl index 9fbb2f68..e7efa13c 100644 --- a/apps/mg_core/test/mg_core_events_sink_kafka_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_SUITE.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka_SUITE). +-module(mg_event_sink_kafka_SUITE). -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("kafka_protocol/include/kpro_public.hrl"). @@ -63,7 +63,7 @@ groups() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_core_events_sink_kafka, '_', '_'}, x), + % dbg:tpl({mg_event_sink_kafka, '_', '_'}, x), AppSpecs = [ {brod, [ {clients, [ @@ -105,7 +105,7 @@ add_events_test(C) -> -spec add_events(config()) -> ok. add_events(C) -> F = fun() -> - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, @@ -138,7 +138,7 @@ do_read_all(Hosts, Topic, Partition, Offset, Result) -> do_read_all(Hosts, Topic, Partition, NewOffset, NewRecords ++ Result) end. --spec event_sink_options() -> mg_core_events_sink_kafka:options(). +-spec event_sink_options() -> mg_event_sink_kafka:options(). event_sink_options() -> #{ name => kafka, @@ -150,7 +150,7 @@ event_sink_options() -> end }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). diff --git a/apps/mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl similarity index 96% rename from apps/mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl rename to apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl index f24f56fe..2ce29c05 100644 --- a/apps/mg_core/test/mg_core_events_sink_kafka_errors_SUITE.erl +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_errors_SUITE.erl @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_events_sink_kafka_errors_SUITE). +-module(mg_event_sink_kafka_errors_SUITE). -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("mg_cth/include/mg_cth.hrl"). @@ -78,7 +78,7 @@ groups() -> -spec init_per_suite(config()) -> config(). init_per_suite(C) -> % dbg:tracer(), dbg:p(all, c), - % dbg:tpl({mg_core_events_sink_kafka, '_', '_'}, x), + % dbg:tpl({mg_event_sink_kafka, '_', '_'}, x), {Events, _} = mg_core_events:generate_events_with_range( [{#{}, Body} || Body <- [1, 2, 3]], undefined @@ -143,7 +143,7 @@ add_events_ssl_failed_test(C) -> _ = ?assertException( throw, {transient, {event_sink_unavailable, {connect_failed, [{_, {{failed_to_upgrade_to_ssl, _}, _ST}}]}}}, - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, @@ -287,7 +287,7 @@ add_events_nxdomain_test(C) -> _ = mg_cth:stop_applications(Apps) end. --spec event_sink_options() -> mg_core_events_sink_kafka:options(). +-spec event_sink_options() -> mg_event_sink_kafka:options(). event_sink_options() -> #{ name => kafka, @@ -299,7 +299,7 @@ event_sink_options() -> end }. --spec handle_beat(_, mg_core_pulse:beat()) -> ok. +-spec handle_beat(_, mpulse:beat()) -> ok. handle_beat(_, Beat) -> ct:pal("~p", [Beat]). @@ -315,7 +315,7 @@ change_proxy_mode(ModeWas, Mode, Proxy, C) -> -spec add_events(config()) -> ok. add_events(C) -> - mg_core_events_sink_kafka:add_events( + mg_event_sink_kafka:add_events( event_sink_options(), ?SOURCE_NS, ?SOURCE_ID, diff --git a/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl new file mode 100644 index 00000000..3d0e20a4 --- /dev/null +++ b/apps/mg_es_kafka/test/mg_event_sink_kafka_prometheus_SUITE.erl @@ -0,0 +1,108 @@ +-module(mg_event_sink_kafka_prometheus_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_es_kafka/include/pulse.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). + +-export([event_sink_kafka_sent_test/1]). + +-define(NS, <<"NS">>). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, beats} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {beats, [parallel], [ + event_sink_kafka_sent_test + ]} + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + Apps = mg_cth:start_applications([mg_es_kafka]), + ok = mg_event_sink_kafka_prometheus_pulse:setup(), + [{apps, Apps} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(_, C) -> + C. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% Tests + +-spec event_sink_kafka_sent_test(config()) -> _. +event_sink_kafka_sent_test(_C) -> + Buckets = test_millisecond_buckets(), + Name = kafka, + _ = maps:fold( + fun(DurationMs, BucketIdx, {Counter, BucketAcc}) -> + ok = test_beat(#mg_event_sink_kafka_sent{ + name = Name, + namespace = ?NS, + machine_id = <<"ID">>, + request_context = null, + deadline = undefined, + encode_duration = erlang:convert_time_unit(DurationMs, millisecond, native), + send_duration = erlang:convert_time_unit(DurationMs, millisecond, native), + data_size = 0, + partition = 0, + offset = 0 + }), + ?assertEqual(prometheus_counter:value(mg_events_sink_produced_total, [?NS, Name]), Counter), + {BucketsHits, _} = + prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, encode]), + {BucketsHits, _} = + prometheus_histogram:value(mg_events_sink_kafka_produced_duration_seconds, [?NS, Name, send]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, BucketAcc, 0) + 1, BucketHit), + {Counter + 1, BucketAcc#{BucketIdx => BucketHit}} + end, + {1, #{}}, + Buckets + ). + +%% Metrics utils + +-spec test_beat(term()) -> ok. +test_beat(Beat) -> + mg_event_sink_kafka_prometheus_pulse:handle_beat(#{}, Beat). + +-spec test_millisecond_buckets() -> #{non_neg_integer() => pos_integer()}. +test_millisecond_buckets() -> + #{ + 0 => 1, + 1 => 1, + 5 => 2, + 10 => 3 + }. diff --git a/apps/mg_procreg/src/mg_procreg.app.src b/apps/mg_procreg/src/mg_procreg.app.src new file mode 100644 index 00000000..d7480419 --- /dev/null +++ b/apps/mg_procreg/src/mg_procreg.app.src @@ -0,0 +1,17 @@ +{application, mg_procreg, [ + {description, "Machinegun process registry lib"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + gproc, + mg_utils + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_procreg/src/mg_procreg.erl b/apps/mg_procreg/src/mg_procreg.erl new file mode 100644 index 00000000..16d97ba2 --- /dev/null +++ b/apps/mg_procreg/src/mg_procreg.erl @@ -0,0 +1,124 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_procreg). + +% Any term sans ephemeral ones, like `reference()`s / `pid()`s / `fun()`s. +-type name() :: term(). +-type name_pattern() :: ets:match_pattern(). + +-type ref() :: mg_utils:gen_ref(). +-type reg_name() :: mg_utils:gen_reg_name(). + +-type procreg_options() :: term(). +-type options() :: mg_utils:mod_opts(procreg_options()). + +-export_type([name/0]). +-export_type([name_pattern/0]). +-export_type([ref/0]). +-export_type([reg_name/0]). +-export_type([options/0]). + +-export_type([start_link_ret/0]). + +-export([ref/2]). +-export([reg_name/2]). +-export([select/2]). + +-export([start_link/5]). +-export([call/3]). +-export([call/4]). + +%% Names and references + +-callback ref(procreg_options(), name()) -> ref(). + +-callback reg_name(procreg_options(), name()) -> reg_name(). + +-callback select(procreg_options(), name_pattern()) -> [{name(), pid()}]. + +-callback call(procreg_options(), ref(), _Call, timeout()) -> _Reply. + +-type start_link_ret() :: + {ok, pid()} | {error, term()}. + +-callback start_link(procreg_options(), reg_name(), module(), _Args, list()) -> start_link_ret(). + +-optional_callbacks([ + call/4, + start_link/5 +]). + +%% + +-spec ref(options(), name()) -> ref(). +ref(Options, Name) -> + mg_utils:apply_mod_opts(Options, ref, [Name]). + +-spec reg_name(options(), name()) -> reg_name(). +reg_name(Options, Name) -> + mg_utils:apply_mod_opts(Options, reg_name, [Name]). + +%% TODO Review usage of this function +-spec select(options(), name_pattern()) -> [{name(), pid()}]. +select(Options, NamePattern) -> + mg_utils:apply_mod_opts(Options, select, [NamePattern]). + +%% @doc Functions `call/3', `call/4' and `start_link/5' wrap according +%% `gen_server:call/4' and `gen_server:start_link/4' calls to a +%% worker's gen_server implementation. +%% +%% Direct interaction with a process registry implementation is +%% expected to be performed by separate module with an actual process +%% registry "behaviour". +%% In general this behaviour requires `ref/2' +%% (for referencing pid) and `reg_name/2' (for registering new +%% reference for pid) callbacks that return via-tuple: +%% +%% {via, RegMod :: module(), ViaName :: term()} +%% +%% Register the gen_server process with the registry represented +%% by RegMod. The RegMod callback is to export the functions +%% register_name/2, unregister_name/1, whereis_name/1, and send/2, +%% which are to behave like the corresponding functions in +%% global. Thus, {via,global,GlobalName} is a valid reference +%% equivalent to {global,GlobalName}. +%% +%% @end +-spec call(options(), name(), _Call) -> _Reply. +call(Options, Name, Call) -> + call(Options, Name, Call, 5000). + +-spec call(options(), name(), _Call, timeout()) -> _Reply. +call(Options, Name, Call, Timeout) -> + CallArgs = [ref(Options, Name), Call, Timeout], + mg_utils:apply_mod_opts_with_fallback(Options, call, fun gen_call/4, CallArgs). + +-spec start_link(options(), name(), module(), _Args, list()) -> start_link_ret(). +start_link(Options, Name, Module, Args, Opts) -> + StartArgs = [reg_name(Options, Name), Module, Args, Opts], + mg_utils:apply_mod_opts_with_fallback(Options, start_link, fun gen_start_link/5, StartArgs). + +%% Internal + +-spec gen_start_link(options(), mg_procreg:reg_name(), module(), _Args, list()) -> + mg_procreg:start_link_ret(). +gen_start_link(_Options, RegName, Module, Args, Opts) -> + gen_server:start_link(RegName, Module, Args, Opts). + +-spec gen_call(options(), mg_procreg:ref(), _Call, timeout()) -> _Reply. +gen_call(_Options, Ref, Call, Timeout) -> + gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_core/src/mg_core_procreg_global.erl b/apps/mg_procreg/src/mg_procreg_global.erl similarity index 79% rename from apps/mg_core/src/mg_core_procreg_global.erl rename to apps/mg_procreg/src/mg_procreg_global.erl index 6e9e6d97..d12f4fa3 100644 --- a/apps/mg_core/src/mg_core_procreg_global.erl +++ b/apps/mg_procreg/src/mg_procreg_global.erl @@ -1,29 +1,26 @@ --module(mg_core_procreg_global). +-module(mg_procreg_global). %% --behaviour(mg_core_procreg). +-behaviour(mg_procreg). -export([ref/2]). -export([reg_name/2]). -export([select/2]). --export([start_link/5]). --export([call/4]). - -type options() :: undefined. %% --spec ref(options(), mg_core_procreg:name()) -> mg_core_procreg:ref(). +-spec ref(options(), mg_procreg:name()) -> mg_procreg:ref(). ref(_Options, Name) -> {global, Name}. --spec reg_name(options(), mg_core_procreg:name()) -> mg_core_procreg:reg_name(). +-spec reg_name(options(), mg_procreg:name()) -> mg_procreg:reg_name(). reg_name(Options, Name) -> ref(Options, Name). --spec select(options(), mg_core_procreg:name_pattern()) -> [{mg_core_procreg:name(), pid()}]. +-spec select(options(), mg_procreg:name_pattern()) -> [{mg_procreg:name(), pid()}]. select(_Options, NamePattern) -> lists:foldl( fun(Name, Acc) -> @@ -36,15 +33,6 @@ select(_Options, NamePattern) -> global:registered_names() ). --spec start_link(options(), mg_core_procreg:reg_name(), module(), _Args, list()) -> - mg_core_procreg:start_link_ret(). -start_link(_Options, RegName, Module, Args, Opts) -> - gen_server:start_link(RegName, Module, Args, Opts). - --spec call(options(), mg_core_procreg:ref(), _Call, timeout()) -> _Reply. -call(_Options, Ref, Call, Timeout) -> - gen_server:call(Ref, Call, Timeout). - %% Internal functions -spec match(term(), term()) -> boolean(). diff --git a/apps/mg_core/src/mg_core_procreg_gproc.erl b/apps/mg_procreg/src/mg_procreg_gproc.erl similarity index 55% rename from apps/mg_core/src/mg_core_procreg_gproc.erl rename to apps/mg_procreg/src/mg_procreg_gproc.erl index a9a9e2e9..027b84f5 100644 --- a/apps/mg_core/src/mg_core_procreg_gproc.erl +++ b/apps/mg_procreg/src/mg_procreg_gproc.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,41 +14,29 @@ %%% limitations under the License. %%% --module(mg_core_procreg_gproc). +-module(mg_procreg_gproc). %% --behaviour(mg_core_procreg). +-behaviour(mg_procreg). -export([ref/2]). -export([reg_name/2]). -export([select/2]). --export([start_link/5]). --export([call/4]). - -type options() :: undefined. %% --spec ref(options(), mg_core_procreg:name()) -> mg_core_procreg:ref(). +-spec ref(options(), mg_procreg:name()) -> mg_procreg:ref(). ref(_Options, Name) -> {via, gproc, {n, l, Name}}. --spec reg_name(options(), mg_core_procreg:name()) -> mg_core_procreg:reg_name(). +-spec reg_name(options(), mg_procreg:name()) -> mg_procreg:reg_name(). reg_name(Options, Name) -> ref(Options, Name). --spec select(options(), mg_core_procreg:name_pattern()) -> [{mg_core_procreg:name(), pid()}]. +-spec select(options(), mg_procreg:name_pattern()) -> [{mg_procreg:name(), pid()}]. select(_Options, NamePattern) -> MatchSpec = [{{{n, l, NamePattern}, '_', '_'}, [], ['$$']}], [{Name, Pid} || [{n, l, Name}, Pid, _] <- gproc:select(MatchSpec)]. - --spec start_link(options(), mg_core_procreg:reg_name(), module(), _Args, list()) -> - mg_core_procreg:start_link_ret(). -start_link(_Options, RegName, Module, Args, Opts) -> - gen_server:start_link(RegName, Module, Args, Opts). - --spec call(options(), mg_core_procreg:ref(), _Call, timeout()) -> _Reply. -call(_Options, Ref, Call, Timeout) -> - gen_server:call(Ref, Call, Timeout). diff --git a/apps/mg_riak/include/pulse.hrl b/apps/mg_riak/include/pulse.hrl new file mode 100644 index 00000000..a4a3b31b --- /dev/null +++ b/apps/mg_riak/include/pulse.hrl @@ -0,0 +1,55 @@ +%% Riak client operations +%% Duration is in native units + +-record(mg_riak_client_get_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_get_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_put_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_put_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_search_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_search_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +-record(mg_riak_client_delete_start, { + name :: mg_core_storage:name() +}). + +-record(mg_riak_client_delete_finish, { + name :: mg_core_storage:name(), + duration :: non_neg_integer() +}). + +%% Riak connection pool events + +-record(mg_riak_connection_pool_state_reached, { + name :: mg_core_storage:name(), + state :: no_free_connections | queue_limit_reached +}). + +-record(mg_riak_connection_pool_connection_killed, { + name :: mg_core_storage:name(), + state :: free | in_use +}). + +-record(mg_riak_connection_pool_error, { + name :: mg_core_storage:name(), + reason :: connect_timeout +}). diff --git a/apps/mg_riak/rebar.config b/apps/mg_riak/rebar.config new file mode 100644 index 00000000..12b32ab3 --- /dev/null +++ b/apps/mg_riak/rebar.config @@ -0,0 +1,32 @@ +{deps, [ + {gproc, "0.9.0"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}}, + {riakc, {git, "https://github.com/valitydev/riak-erlang-client", {branch, develop}}}, + {pooler, {git, "https://github.com/seth/pooler", {branch, master}}}, + {msgpack, {git, "https://github.com/msgpack/msgpack-erlang", {branch, master}}}, + {prometheus, "4.8.1"}, + {opentelemetry_api, "1.2.1"} +]}. + +{overrides, [ + {override, rebar3_protobuffs_plugin, [ + {deps, [{protobuffs, {git, "https://github.com/basho/erlang_protobuffs", {tag, "0.8.2"}}}]} + ]}, + + {override, protobuffs, [{deps, []}]}, + + {override, riakc, [ + {erl_opts, [ + {d, namespaced_types}, + {d, deprecated_19} + ]} + ]}, + + {override, riak_pb, [ + {plugins, [ + {riak_pb_msgcodegen, {git, "https://github.com/tsloughter/riak_pb_msgcodegen", {branch, "master"}}}, + {rebar3_protobuffs_plugin, {git, "https://github.com/cmkarlsson/rebar3_protobuffs_plugin", {tag, "0.1.1"}}} + ]}, + {provider_hooks, [{pre, [{compile, {protobuffs, compile}}, {compile, riak_pb_msgcodegen}]}]} + ]} +]}. diff --git a/apps/mg_riak/src/mg_riak.app.src b/apps/mg_riak/src/mg_riak.app.src new file mode 100644 index 00000000..d6219edf --- /dev/null +++ b/apps/mg_riak/src/mg_riak.app.src @@ -0,0 +1,42 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +{application, mg_riak, [ + {description, "Machinegun Riak Storage"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + gproc, + genlib, + prometheus, + mg_utils, + mg_core, + riakc, + pooler, + msgpack, + opentelemetry_api + ]}, + {env, []}, + {modules, []}, + {maintainers, [ + "Petr Kozorezov ", + "Andrey Mayorov " + ]}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/machinegun/src/mg_riak_prometheus.erl b/apps/mg_riak/src/mg_riak_prometheus.erl similarity index 90% rename from apps/machinegun/src/mg_riak_prometheus.erl rename to apps/mg_riak/src/mg_riak_prometheus.erl index ac49f384..1e163077 100644 --- a/apps/machinegun/src/mg_riak_prometheus.erl +++ b/apps/mg_riak/src/mg_riak_prometheus.erl @@ -10,7 +10,7 @@ -export([init/1]). -type options() :: #{}. --type storage() :: mg_core_storage_riak:options(). +-type storage() :: mg_riak_storage:options(). -export_type([storage/0]). @@ -37,7 +37,7 @@ child_spec(_Options, Storage, ChildID) -> -spec init(mg_core_storage:options()) -> genlib_gen:supervisor_ret(). init(Storage) -> - {mg_core_storage_riak, StorageOptions} = mg_core_utils:separate_mod_opts(Storage), + {mg_riak_storage, StorageOptions} = mg_utils:separate_mod_opts(Storage), true = gproc:add_local_property(?PROPNAME, StorageOptions), % NOTE % We only care about keeping gproc property live through this supervisor process. diff --git a/apps/machinegun/src/mg_riak_prometheus_collector.erl b/apps/mg_riak/src/mg_riak_prometheus_collector.erl similarity index 98% rename from apps/machinegun/src/mg_riak_prometheus_collector.erl rename to apps/mg_riak/src/mg_riak_prometheus_collector.erl index 82eabc9a..6cca89ca 100644 --- a/apps/machinegun/src/mg_riak_prometheus_collector.erl +++ b/apps/mg_riak/src/mg_riak_prometheus_collector.erl @@ -82,7 +82,7 @@ collect_storage_metrics(#{name := {NS, _Module, Type}} = Storage, Callback) -> -spec gather_metrics(storage()) -> pooler_metrics(). gather_metrics(#{name := Name} = Storage) -> - case mg_core_storage_riak:pool_utilization(Storage) of + case mg_riak_storage:pool_utilization(Storage) of {ok, Metrics} -> Metrics; {error, Reason} -> diff --git a/apps/mg_riak/src/mg_riak_pulse.erl b/apps/mg_riak/src/mg_riak_pulse.erl new file mode 100644 index 00000000..80438a34 --- /dev/null +++ b/apps/mg_riak/src/mg_riak_pulse.erl @@ -0,0 +1,31 @@ +-module(mg_riak_pulse). + +-include_lib("mg_riak/include/pulse.hrl"). + +-behaviour(mpulse). + +%% API +-export_type([beat/0]). +-export([handle_beat/2]). + +%% +%% API +%% +-type beat() :: + % Riak client call handling + #mg_riak_client_get_start{} + | #mg_riak_client_get_finish{} + | #mg_riak_client_put_start{} + | #mg_riak_client_put_finish{} + | #mg_riak_client_search_start{} + | #mg_riak_client_search_finish{} + | #mg_riak_client_delete_start{} + | #mg_riak_client_delete_finish{} + % Riak client call handling + | #mg_riak_connection_pool_state_reached{} + | #mg_riak_connection_pool_connection_killed{} + | #mg_riak_connection_pool_error{}. + +-spec handle_beat(any(), beat() | mpulse:beat()) -> ok. +handle_beat(_Options, _Beat) -> + ok. diff --git a/apps/mg_riak/src/mg_riak_pulse_prometheus.erl b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl new file mode 100644 index 00000000..ce3287b5 --- /dev/null +++ b/apps/mg_riak/src/mg_riak_pulse_prometheus.erl @@ -0,0 +1,169 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_riak_pulse_prometheus). + +-include_lib("mg_riak/include/pulse.hrl"). + +-export([setup/0]). + +-behaviour(mpulse). +-export([handle_beat/2]). + +%% internal types +-type beat() :: mg_riak_pulse:beat(). +-type options() :: #{}. +-type metric_name() :: prometheus_metric:name(). +-type metric_label_value() :: term(). + +%% +%% mg_pulse handler +%% + +-spec handle_beat(options(), beat() | mpulse:beat()) -> ok. +handle_beat(_Options, Beat) -> + ok = dispatch_metrics(Beat). + +%% +%% management API +%% + +%% Sets all metrics up. Call this when the app starts. +-spec setup() -> ok. +setup() -> + % Riak client operations + true = prometheus_counter:declare([ + {name, mg_riak_client_operation_changes_total}, + {registry, registry()}, + {labels, [namespace, name, operation, change]}, + {help, "Total number of Machinegun riak client operations."} + ]), + true = prometheus_histogram:declare([ + {name, mg_riak_client_operation_duration_seconds}, + {registry, registry()}, + {labels, [namespace, name, operation]}, + {buckets, duration_buckets()}, + {duration_unit, seconds}, + {help, "Machinegun riak client operation duration."} + ]), + %% Riak pool events + true = prometheus_counter:declare([ + {name, mg_riak_pool_no_free_connection_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of no free connection errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_queue_limit_reached_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of queue limit reached errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_connect_timeout_errors_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of connect timeout errors in Machinegun riak pool."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_killed_free_connections_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of killed free Machinegun riak pool connections."} + ]), + true = prometheus_counter:declare([ + {name, mg_riak_pool_killed_in_use_connections_total}, + {registry, registry()}, + {labels, [namespace, name]}, + {help, "Total number of killed used Machinegun riak pool connections."} + ]), + ok. + +%% Internals + +-spec dispatch_metrics(beat() | _Other) -> ok. +% Riak client operations +dispatch_metrics(#mg_riak_client_get_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, start]); +dispatch_metrics(#mg_riak_client_get_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, get, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, get], Duration); +dispatch_metrics(#mg_riak_client_put_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, start]); +dispatch_metrics(#mg_riak_client_put_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, put, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, put], Duration); +dispatch_metrics(#mg_riak_client_search_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, start]); +dispatch_metrics(#mg_riak_client_search_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, search, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, search], Duration); +dispatch_metrics(#mg_riak_client_delete_start{name = {NS, _Caller, Type}}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, start]); +dispatch_metrics(#mg_riak_client_delete_finish{name = {NS, _Caller, Type}, duration = Duration}) -> + ok = inc(mg_riak_client_operation_changes_total, [NS, Type, delete, finish]), + ok = observe(mg_riak_client_operation_duration_seconds, [NS, Type, delete], Duration); +% Riak pool events +dispatch_metrics(#mg_riak_connection_pool_state_reached{ + name = {NS, _Caller, Type}, + state = no_free_connections +}) -> + ok = inc(mg_riak_pool_no_free_connection_errors_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_state_reached{ + name = {NS, _Caller, Type}, + state = queue_limit_reached +}) -> + ok = inc(mg_riak_pool_queue_limit_reached_errors_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = free}) -> + ok = inc(mg_riak_pool_killed_free_connections_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_connection_killed{name = {NS, _Caller, Type}, state = in_use}) -> + ok = inc(mg_riak_pool_killed_in_use_connections_total, [NS, Type]); +dispatch_metrics(#mg_riak_connection_pool_error{name = {NS, _Caller, Type}, reason = connect_timeout}) -> + ok = inc(mg_riak_pool_connect_timeout_errors_total, [NS, Type]); +% Unknown +dispatch_metrics(_Beat) -> + ok. + +-spec inc(metric_name(), [metric_label_value()]) -> ok. +inc(Name, Labels) -> + _ = prometheus_counter:inc(registry(), Name, Labels, 1), + ok. + +-spec observe(metric_name(), [metric_label_value()], number()) -> ok. +observe(Name, Labels, Value) -> + _ = prometheus_histogram:observe(registry(), Name, Labels, Value), + ok. + +-spec registry() -> prometheus_registry:registry(). +registry() -> + default. + +-spec duration_buckets() -> [number()]. +duration_buckets() -> + [ + 0.001, + 0.005, + 0.010, + 0.025, + 0.050, + 0.100, + 0.250, + 0.500, + 1, + 2.5, + 5, + 10 + ]. diff --git a/apps/mg_core/src/mg_core_storage_riak.erl b/apps/mg_riak/src/mg_riak_storage.erl similarity index 94% rename from apps/mg_core/src/mg_core_storage_riak.erl rename to apps/mg_riak/src/mg_riak_storage.erl index e82e20ab..c2e573f7 100644 --- a/apps/mg_core/src/mg_core_storage_riak.erl +++ b/apps/mg_riak/src/mg_riak_storage.erl @@ -46,9 +46,9 @@ %%% TODO: %%% - классификация и обработка ошибок %%% --module(mg_core_storage_riak). +-module(mg_riak_storage). -include_lib("riakc/include/riakc.hrl"). --include("pulse.hrl"). +-include_lib("mg_riak/include/pulse.hrl"). %% mg_core_storage callbacks -behaviour(mg_core_storage). @@ -73,7 +73,7 @@ port := inet:port_number(), bucket := bucket(), pool_options := pool_options(), - pulse := mg_core_pulse:handler(), + pulse := mpulse:handler(), resolve_timeout => timeout(), connect_timeout => timeout(), request_timeout => timeout(), @@ -96,7 +96,7 @@ {return_terms, boolean()} | {term_regex, binary()}. -type range_index_opts() :: [index_opt() | range_index_opt()]. --type client_ref() :: mg_core_utils:gen_ref(). +-type client_ref() :: mg_utils:gen_ref(). %% See https://github.com/seth/pooler/blob/master/src/pooler_config.erl for pool option details -type pool_options() :: #{ @@ -111,7 +111,7 @@ -type pool_name() :: atom(). -type pulse_options() :: #{ name := mg_core_storage:name(), - pulse := mg_core_pulse:handler() + pulse := mpulse:handler() }. %% Duration is measured in native units @@ -146,12 +146,12 @@ pool_utilization(Options) -> %% internal API %% --spec start_client(options()) -> mg_core_utils:gen_start_ret(). +-spec start_client(options()) -> mg_utils:gen_start_ret(). start_client(#{port := Port} = Options) -> IP = get_riak_addr(Options), riakc_pb_socket:start_link(IP, Port, [{connect_timeout, get_option(connect_timeout, Options)}]). --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> PoolName = construct_pool_name(Options), PoolConfig = pooler_config:list_to_pool(make_pool_config(PoolName, Options)), @@ -597,45 +597,45 @@ pulse_options(PoolNameString) -> -spec update_or_create([binary()], number(), pooler_metric_type(), []) -> ok. update_or_create([<<"pooler">>, PoolNameString, <<"error_no_members_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, - #mg_core_riak_connection_pool_state_reached{ + #mg_riak_connection_pool_state_reached{ name = Name, state = no_free_connections } ); update_or_create([<<"pooler">>, PoolNameString, <<"queue_max_reached">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, - #mg_core_riak_connection_pool_state_reached{ + #mg_riak_connection_pool_state_reached{ name = Name, state = queue_limit_reached } ); update_or_create([<<"pooler">>, PoolNameString, <<"starting_member_timeout">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, - #mg_core_riak_connection_pool_error{ + #mg_riak_connection_pool_error{ name = Name, reason = connect_timeout } ); update_or_create([<<"pooler">>, PoolNameString, <<"killed_free_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, - #mg_core_riak_connection_pool_connection_killed{ + #mg_riak_connection_pool_connection_killed{ name = Name, state = free } ); update_or_create([<<"pooler">>, PoolNameString, <<"killed_in_use_count">>], _, _Counter, []) -> #{name := Name, pulse := Handler} = pulse_options(PoolNameString), - mg_core_pulse:handle_beat( + mpulse:handle_beat( Handler, - #mg_core_riak_connection_pool_connection_killed{ + #mg_riak_connection_pool_connection_killed{ name = Name, state = in_use } @@ -649,40 +649,40 @@ update_or_create(_MetricKey, _Value, _Type, []) -> -spec emit_beat_start(mg_core_storage:request(), options()) -> ok. emit_beat_start({get, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_get_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_get_start{ name = Name }); emit_beat_start({put, _, _, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_put_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_put_start{ name = Name }); emit_beat_start({search, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_search_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_search_start{ name = Name }); emit_beat_start({delete, _, _}, #{pulse := Handler, name := Name}) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_delete_start{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_delete_start{ name = Name }). -spec emit_beat_finish(mg_core_storage:request(), options(), duration()) -> ok. emit_beat_finish({get, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_get_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_get_finish{ name = Name, duration = Duration }); emit_beat_finish({put, _, _, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_put_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_put_finish{ name = Name, duration = Duration }); emit_beat_finish({search, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_search_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_search_finish{ name = Name, duration = Duration }); emit_beat_finish({delete, _, _}, #{pulse := Handler, name := Name}, Duration) -> - ok = mg_core_pulse:handle_beat(Handler, #mg_core_riak_client_delete_finish{ + ok = mpulse:handle_beat(Handler, #mg_riak_client_delete_finish{ name = Name, duration = Duration }). diff --git a/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl b/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl new file mode 100644 index 00000000..eb18f51b --- /dev/null +++ b/apps/mg_riak/test/mg_riak_prometheus_metric_SUITE.erl @@ -0,0 +1,349 @@ +%%% +%%% Copyright 2020 RBKmoney +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +-module(mg_riak_prometheus_metric_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("mg_riak/include/pulse.hrl"). +-include_lib("prometheus/include/prometheus_model.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). +-export([riak_client_get_start_test/1]). +-export([riak_client_get_finish_test/1]). +-export([riak_client_put_start_test/1]). +-export([riak_client_put_finish_test/1]). +-export([riak_client_search_start_test/1]). +-export([riak_client_search_finish_test/1]). +-export([riak_client_delete_start_test/1]). +-export([riak_client_delete_finish_test/1]). +-export([riak_pool_no_free_connection_errors_test/1]). +-export([riak_pool_queue_limit_reached_errors_test/1]). +-export([riak_pool_killed_free_connections_test/1]). +-export([riak_pool_killed_in_use_connections_test/1]). +-export([riak_pool_connect_timeout_errors_test/1]). + +-export([riak_pool_collector_test/1]). + +-define(NS, <<"NS">>). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, beats}, + {group, collectors} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {beats, [parallel], [ + riak_client_get_start_test, + riak_client_get_finish_test, + riak_client_put_start_test, + riak_client_put_finish_test, + riak_client_search_start_test, + riak_client_search_finish_test, + riak_client_delete_start_test, + riak_client_delete_finish_test, + riak_pool_no_free_connection_errors_test, + riak_pool_queue_limit_reached_errors_test, + riak_pool_killed_free_connections_test, + riak_pool_killed_in_use_connections_test, + riak_pool_connect_timeout_errors_test + ]}, + {collectors, [], [ + riak_pool_collector_test + ]} + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + Apps = mg_cth:start_applications([mg_riak]), + ok = mg_riak_pulse_prometheus:setup(), + ok = mg_riak_prometheus:setup(), + [{apps, Apps} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(_, C) -> + C. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% Tests + +-spec riak_client_get_start_test(config()) -> _. +riak_client_get_start_test(_C) -> + ok = test_beat(#mg_riak_client_get_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_get_finish_test(config()) -> _. +riak_client_get_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_get_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, get]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_put_start_test(config()) -> _. +riak_client_put_start_test(_C) -> + ok = test_beat(#mg_riak_client_put_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_put_finish_test(config()) -> _. +riak_client_put_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_put_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, put]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_search_start_test(config()) -> _. +riak_client_search_start_test(_C) -> + ok = test_beat(#mg_riak_client_search_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_search_finish_test(config()) -> _. +riak_client_search_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_search_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, search]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_client_delete_start_test(config()) -> _. +riak_client_delete_start_test(_C) -> + ok = test_beat(#mg_riak_client_delete_start{ + name = {?NS, caller, type} + }). + +-spec riak_client_delete_finish_test(config()) -> _. +riak_client_delete_finish_test(_C) -> + Buckets = test_millisecond_buckets(), + _ = maps:fold( + fun(DurationMs, BucketIdx, Acc) -> + ok = test_beat(#mg_riak_client_delete_finish{ + name = {?NS, caller, type}, + duration = erlang:convert_time_unit(DurationMs, millisecond, native) + }), + {BucketsHits, _} = + prometheus_histogram:value(mg_riak_client_operation_duration_seconds, [?NS, type, delete]), + BucketHit = lists:nth(BucketIdx, BucketsHits), + %% Check that bucket under index BucketIdx received one hit + ?assertEqual(maps:get(BucketIdx, Acc, 0) + 1, BucketHit), + Acc#{BucketIdx => BucketHit} + end, + #{}, + Buckets + ). + +-spec riak_pool_no_free_connection_errors_test(config()) -> _. +riak_pool_no_free_connection_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_state_reached{ + name = {?NS, caller, type}, + state = no_free_connections + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_no_free_connection_errors_total, [?NS, type]) + ). + +-spec riak_pool_queue_limit_reached_errors_test(config()) -> _. +riak_pool_queue_limit_reached_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_state_reached{ + name = {?NS, caller, type}, + state = queue_limit_reached + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_queue_limit_reached_errors_total, [?NS, type]) + ). + +-spec riak_pool_killed_free_connections_test(config()) -> _. +riak_pool_killed_free_connections_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_connection_killed{ + name = {?NS, caller, type}, + state = free + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_killed_free_connections_total, [?NS, type]) + ). + +-spec riak_pool_killed_in_use_connections_test(config()) -> _. +riak_pool_killed_in_use_connections_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_connection_killed{ + name = {?NS, caller, type}, + state = in_use + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_killed_in_use_connections_total, [?NS, type]) + ). + +-spec riak_pool_connect_timeout_errors_test(config()) -> _. +riak_pool_connect_timeout_errors_test(_C) -> + ok = test_beat(#mg_riak_connection_pool_error{ + name = {?NS, caller, type}, + reason = connect_timeout + }), + ?assertEqual( + 1, + prometheus_counter:value(mg_riak_pool_connect_timeout_errors_total, [?NS, type]) + ). + +%% + +-spec riak_pool_collector_test(config()) -> _. +riak_pool_collector_test(_C) -> + ok = mg_cth:await_ready(fun mg_cth:riak_ready/0), + Storage = + {mg_riak_storage, #{ + name => {?NS, caller, type}, + host => "riakdb", + port => 8087, + bucket => ?NS, + pool_options => #{ + init_count => 0, + max_count => 10, + queue_max => 100 + }, + pulse => undefined, + sidecar => {mg_riak_prometheus, #{}} + }}, + + {ok, Pid} = genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + [mg_core_storage:child_spec(Storage, storage)] + ), + + Collectors = prometheus_registry:collectors(default), + ?assert(lists:member(mg_riak_prometheus_collector, Collectors)), + + Self = self(), + ok = prometheus_collector:collect_mf( + default, + mg_riak_prometheus_collector, + fun(MF) -> Self ! MF end + ), + MFs = mg_cth:flush(), + MLabels = [ + #'LabelPair'{name = <<"namespace">>, value = <<"NS">>}, + #'LabelPair'{name = <<"name">>, value = <<"type">>} + ], + ?assertMatch( + [ + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_free">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_in_use">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_connections_limit">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 10}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_queued_requests">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 0}}] + }, + #'MetricFamily'{ + name = <<"mg_riak_pool_queued_requests_limit">>, + metric = [#'Metric'{label = MLabels, gauge = #'Gauge'{value = 100}}] + } + ], + lists:sort(MFs) + ), + + ok = proc_lib:stop(Pid, normal, 5000). + +%% Metrics utils + +-spec test_beat(term()) -> ok. +test_beat(Beat) -> + mg_riak_pulse_prometheus:handle_beat(#{}, Beat). + +-spec test_millisecond_buckets() -> #{non_neg_integer() => pos_integer()}. +test_millisecond_buckets() -> + #{ + 0 => 1, + 1 => 1, + 5 => 2, + 10 => 3 + }. diff --git a/apps/mg_riak/test/mg_riak_storage_SUITE.erl b/apps/mg_riak/test/mg_riak_storage_SUITE.erl new file mode 100644 index 00000000..00156a29 --- /dev/null +++ b/apps/mg_riak/test/mg_riak_storage_SUITE.erl @@ -0,0 +1,514 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% + +%%% +%%% Тесты всех возможных бэкендов хранилищ. +%%% +%%% TODO: +%%% - сделать проверку, что неймспейсы не пересекаются +%%% +-module(mg_riak_storage_SUITE). +-include_lib("common_test/include/ct.hrl"). +-include_lib("stdlib/include/assert.hrl"). + +%% tests descriptions +-export([all/0]). +-export([groups/0]). +-export([init_per_suite/1]). +-export([end_per_suite/1]). +-export([init_per_group/2]). +-export([end_per_group/2]). + +%% base group tests +-export([base_test/1]). +-export([batch_test/1]). +-export([indexes_test/1]). +-export([key_length_limit_test/1]). +-export([indexes_test_with_limits/1]). +-export([stress_test/1]). + +-export([riak_pool_stable_test/1]). +-export([riak_pool_overload_test/1]). +-export([riak_pool_misbehaving_connection_test/1]). + +-export([handle_beat/2]). + +%% +%% tests descriptions +%% +-type group_name() :: atom(). +-type test_name() :: atom(). +-type config() :: [{atom(), _}]. + +-spec all() -> [test_name() | {group, group_name()}]. +all() -> + [ + {group, riak} + ]. + +-spec groups() -> [{group_name(), list(_), [test_name()]}]. +groups() -> + [ + {riak, [], + tests() ++ + [ + riak_pool_stable_test, + riak_pool_overload_test, + riak_pool_misbehaving_connection_test + ]} + ]. + +-spec tests() -> [test_name()]. +tests() -> + [ + base_test, + batch_test, + % incorrect_context_test, + indexes_test, + key_length_limit_test, + indexes_test_with_limits, + stress_test + ]. + +%% +%% starting/stopping +%% +-spec init_per_suite(config()) -> config(). +init_per_suite(C) -> + % dbg:tracer(), dbg:p(all, c), + % dbg:tpl({riakc_pb_socket, 'get_index_eq', '_'}, x), + % dbg:tpl({riakc_pb_socket, 'get_index_range', '_'}, x), + Apps = mg_cth:start_applications([msgpack, gproc, riakc, pooler]), + [{apps, Apps} | C]. + +-spec end_per_suite(config()) -> ok. +end_per_suite(C) -> + mg_cth:stop_applications(?config(apps, C)). + +-spec init_per_group(group_name(), config()) -> config(). +init_per_group(Group, C) -> + [{storage_type, Group} | C]. + +-spec end_per_group(group_name(), config()) -> ok. +end_per_group(_, _C) -> + ok. + +%% +%% base group tests +%% +-spec base_test(config()) -> _. +base_test(C) -> + Options = storage_options(?config(storage_type, C), <<"base_test">>), + Pid = start_storage(Options), + base_test(<<"1">>, Options), + ok = stop_storage(Pid). + +-spec base_test(mg_core:id(), mg_core_storage:options()) -> _. +base_test(Key, Options) -> + Value1 = #{<<"hello">> => <<"world">>}, + Value2 = [<<"hello">>, 1], + + undefined = mg_core_storage:get(Options, Key), + Ctx1 = mg_core_storage:put(Options, Key, undefined, Value1, []), + {Ctx1, Value1} = mg_core_storage:get(Options, Key), + Ctx2 = mg_core_storage:put(Options, Key, Ctx1, Value2, []), + {Ctx2, Value2} = mg_core_storage:get(Options, Key), + ok = mg_core_storage:delete(Options, Key, Ctx2), + undefined = mg_core_storage:get(Options, Key), + ok. + +-spec batch_test(config()) -> _. +batch_test(C) -> + {Mod, StorageOpts} = storage_options(?config(storage_type, C), <<"batch_test">>), + Options = {Mod, StorageOpts#{bathing => #{concurrency_limit => 3}}}, + Pid = start_storage(Options), + Keys = lists:map(fun genlib:to_binary/1, lists:seq(1, 10)), + Value = #{<<"hello">> => <<"world">>}, + + PutBatch = lists:foldl( + fun(Key, Batch) -> + mg_core_storage:add_batch_request({put, Key, undefined, Value, []}, Batch) + end, + mg_core_storage:new_batch(), + Keys + ), + PutResults = mg_core_storage:run_batch(Options, PutBatch), + Ctxs = lists:zipwith( + fun(Key, Result) -> + {{put, Key, undefined, Value, _}, Ctx} = Result, + Ctx + end, + Keys, + PutResults + ), + + GetBatch = lists:foldl( + fun(Key, Batch) -> + mg_core_storage:add_batch_request({get, Key}, Batch) + end, + mg_core_storage:new_batch(), + Keys + ), + GetResults = mg_core_storage:run_batch(Options, GetBatch), + _ = lists:zipwith3( + fun(Key, Ctx, Result) -> + {{get, Key}, {Ctx, Value}} = Result + end, + Keys, + Ctxs, + GetResults + ), + + ok = stop_storage(Pid). + +-spec indexes_test(config()) -> _. +indexes_test(C) -> + Options = storage_options(?config(storage_type, C), <<"indexes_test">>), + Pid = start_storage(Options), + + K1 = <<"Key_24">>, + I1 = {integer, <<"index1">>}, + IV1 = 1, + + K2 = <<"Key_42">>, + I2 = {integer, <<"index2">>}, + IV2 = 2, + + Value = #{<<"hello">> => <<"world">>}, + + [] = mg_core_storage:search(Options, {I1, IV1}), + [] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + Ctx1 = mg_core_storage:put(Options, K1, undefined, Value, [{I1, IV1}, {I2, IV2}]), + + [K1] = mg_core_storage:search(Options, {I1, IV1}), + [{IV1, K1}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [K1] = mg_core_storage:search(Options, {I2, IV2}), + [{IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + Ctx2 = mg_core_storage:put(Options, K2, undefined, Value, [{I1, IV2}, {I2, IV1}]), + + [K1] = mg_core_storage:search(Options, {I1, IV1}), + [{IV1, K1}, {IV2, K2}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [K1] = mg_core_storage:search(Options, {I2, IV2}), + [{IV1, K2}, {IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = mg_core_storage:delete(Options, K1, Ctx1), + + [{IV2, K2}] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [{IV1, K2}] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = mg_core_storage:delete(Options, K2, Ctx2), + + [] = mg_core_storage:search(Options, {I1, {IV1, IV2}}), + [] = mg_core_storage:search(Options, {I2, {IV1, IV2}}), + + ok = stop_storage(Pid). + +-spec key_length_limit_test(config()) -> _. +key_length_limit_test(C) -> + Options = storage_options(?config(storage_type, C), <<"key_length_limit">>), + Pid = start_storage(Options), + + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:get(Options, <<"">>)), + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:add_batch_request({get, <<"">>}, mg_core_storage:new_batch())), + + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:put(Options, <<"">>, undefined, <<"test">>, [])), + {logic, {invalid_key, {too_small, _}}} = + (catch mg_core_storage:add_batch_request( + {put, <<"">>, undefined, <<"test">>, []}, + mg_core_storage:new_batch() + )), + + _ = mg_core_storage:get(Options, binary:copy(<<"K">>, 1024)), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:get(Options, binary:copy(<<"K">>, 1025))), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:add_batch_request( + {get, binary:copy(<<"K">>, 1025)}, + mg_core_storage:new_batch() + )), + + _ = mg_core_storage:put( + Options, + binary:copy(<<"K">>, 1024), + undefined, + <<"test">>, + [] + ), + + {logic, {invalid_key, {too_big, _}}} = + (catch mg_core_storage:put( + Options, + binary:copy(<<"K">>, 1025), + undefined, + <<"test">>, + [] + )), + + ok = stop_storage(Pid). + +-spec indexes_test_with_limits(config()) -> _. +indexes_test_with_limits(C) -> + Options = storage_options(?config(storage_type, C), <<"indexes_test_with_limits">>), + Pid = start_storage(Options), + + K1 = <<"Key_24">>, + I1 = {integer, <<"index1">>}, + IV1 = 1, + + K2 = <<"Key_42">>, + I2 = {integer, <<"index2">>}, + IV2 = 2, + + Value = #{<<"hello">> => <<"world">>}, + + Ctx1 = mg_core_storage:put(Options, K1, undefined, Value, [{I1, IV1}, {I2, IV2}]), + Ctx2 = mg_core_storage:put(Options, K2, undefined, Value, [{I1, IV2}, {I2, IV1}]), + + {[{IV1, K1}], Cont1} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, undefined}), + {[{IV2, K2}], Cont2} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, Cont1}), + {[], undefined} = mg_core_storage:search(Options, {I1, {IV1, IV2}, 1, Cont2}), + + [{IV1, K2}, {IV2, K1}] = mg_core_storage:search(Options, {I2, {IV1, IV2}, inf, undefined}), + + ok = mg_core_storage:delete(Options, K1, Ctx1), + ok = mg_core_storage:delete(Options, K2, Ctx2), + + ok = stop_storage(Pid). + +-spec stress_test(_C) -> ok. +stress_test(C) -> + Options = storage_options(?config(storage_type, C), <<"stress_test">>), + Pid = start_storage(Options), + ProcessCount = 20, + Processes = [ + stress_test_start_process(ID, ProcessCount, Options) + || ID <- lists:seq(1, ProcessCount) + ], + + timer:sleep(5000), + ok = stop_wait_all(Processes, shutdown, 5000), + ok = stop_storage(Pid). + +-spec stress_test_start_process(integer(), pos_integer(), mg_core_storage:options()) -> pid(). +stress_test_start_process(ID, ProcessCount, Options) -> + erlang:spawn_link(fun() -> stress_test_process(ID, ProcessCount, 0, Options) end). + +-spec stress_test_process(integer(), pos_integer(), integer(), mg_core_storage:options()) -> + no_return(). +stress_test_process(ID, ProcessCount, RunCount, Options) -> + % Добавляем смещение ID, чтобы не было пересечения ID машин + ok = base_test(erlang:integer_to_binary(ID), Options), + + receive + {stop, Reason} -> + ct:print("Process: ~p. Number of runs: ~p", [self(), RunCount]), + exit(Reason) + after 0 -> stress_test_process(ID + ProcessCount, ProcessCount, RunCount + 1, Options) + end. + +-spec stop_wait_all([pid()], _Reason, timeout()) -> ok. +stop_wait_all(Pids, Reason, Timeout) -> + OldTrap = process_flag(trap_exit, true), + + lists:foreach( + fun(Pid) -> send_stop(Pid, Reason) end, + Pids + ), + + lists:foreach( + fun(Pid) -> + case stop_wait(Pid, Reason, Timeout) of + ok -> ok; + timeout -> exit(stop_timeout) + end + end, + Pids + ), + + true = process_flag(trap_exit, OldTrap), + ok. + +-spec send_stop(pid(), _Reason) -> ok. +send_stop(Pid, Reason) -> + Pid ! {stop, Reason}, + ok. + +-spec stop_wait(pid(), _Reason, timeout()) -> ok | timeout. +stop_wait(Pid, Reason, Timeout) -> + receive + {'EXIT', Pid, Reason} -> ok + after Timeout -> timeout + end. + +%% + +-spec riak_pool_stable_test(_C) -> ok. +riak_pool_stable_test(_C) -> + Namespace = <<"riak_pool_stable_test">>, + InitialCount = 1, + RequestCount = 10, + Options = riak_options(Namespace, #{ + init_count => InitialCount, + max_count => RequestCount div 2, + idle_timeout => 1000, + cull_interval => 1000, + queue_max => RequestCount * 2 + }), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + % Run multiple requests concurrently + _ = genlib_pmap:map( + fun(N) -> + base_test(genlib:to_binary(N), Storage) + end, + lists:seq(1, RequestCount) + ), + + % Give pool 3 seconds to get back to initial state + ok = timer:sleep(3000), + + {ok, Utilization} = mg_riak_storage:pool_utilization(Options), + ?assertMatch( + #{ + in_use_count := 0, + free_count := InitialCount + }, + maps:from_list(Utilization) + ), + + ok = stop_storage(Pid). + +-spec riak_pool_overload_test(_C) -> ok. +riak_pool_overload_test(_C) -> + Namespace = <<"riak_pool_overload_test">>, + RequestCount = 40, + Options = riak_options( + Namespace, + #{ + init_count => 1, + max_count => 4, + queue_max => RequestCount div 4 + } + ), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + ?assertThrow( + {transient, {storage_unavailable, no_pool_members}}, + genlib_pmap:map( + fun(N) -> + base_test(genlib:to_binary(N), Storage) + end, + lists:seq(1, RequestCount) + ) + ), + + ok = stop_storage(Pid). + +-spec riak_pool_misbehaving_connection_test(_C) -> ok. +riak_pool_misbehaving_connection_test(_C) -> + Namespace = <<"riak_pool_overload_test">>, + WorkersCount = 4, + RequestCount = 4, + Options = riak_options( + Namespace, + #{ + init_count => 1, + max_count => WorkersCount div 2, + queue_max => WorkersCount * 2 + } + ), + Storage = {mg_riak_storage, Options}, + Pid = start_storage(Storage), + + _ = genlib_pmap:map( + fun(RequestID) -> + Key = genlib:to_binary(RequestID), + case RequestID of + N when (N rem WorkersCount) == (N div WorkersCount) -> + % Ensure that request fails occasionally... + ?assertThrow( + {transient, {storage_unavailable, _}}, + mg_core_storage:put(Storage, Key, <<"NOTACONTEXT">>, <<>>, []) + ); + _ -> + % ...And it will not affect any concurrently running requests. + ?assertEqual( + undefined, + mg_core_storage:get(Storage, Key) + ) + end + end, + lists:seq(1, RequestCount * WorkersCount), + #{proc_limit => WorkersCount} + ), + + ok = stop_storage(Pid). + +%% + +-spec storage_options(atom(), binary()) -> mg_core_storage:options(). +storage_options(riak, Namespace) -> + {mg_riak_storage, + riak_options( + Namespace, + #{ + init_count => 1, + max_count => 10, + idle_timeout => 1000, + cull_interval => 1000, + auto_grow_threshold => 5, + queue_max => 100 + } + )}. + +-spec riak_options(mg_core:ns(), map()) -> mg_riak_storage:options(). +riak_options(Namespace, PoolOptions) -> + #{ + name => storage, + pulse => ?MODULE, + host => "riakdb", + port => 8087, + bucket => Namespace, + pool_options => PoolOptions + }. + +-spec start_storage(mg_core_storage:options()) -> pid(). +start_storage(Options) -> + mg_utils:throw_if_error( + genlib_adhoc_supervisor:start_link( + #{strategy => one_for_all}, + [mg_core_storage:child_spec(Options, storage)] + ) + ). + +-spec stop_storage(pid()) -> ok. +stop_storage(Pid) -> + ok = proc_lib:stop(Pid, normal, 5000), + ok. + +-spec handle_beat(_, mpulse:beat()) -> ok. +handle_beat(_, Beat) -> + ct:pal("~p", [Beat]). diff --git a/apps/mg_scheduler/include/pulse.hrl b/apps/mg_scheduler/include/pulse.hrl new file mode 100644 index 00000000..d30be2c4 --- /dev/null +++ b/apps/mg_scheduler/include/pulse.hrl @@ -0,0 +1,63 @@ +%% Scheduler + +-record(mg_skd_search_success, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + delay :: mg_skd_scanner:scan_delay(), + tasks :: [mg_skd_task:task()], + limit :: mg_skd_scanner:scan_limit(), + % in native units + duration :: non_neg_integer() +}). + +-record(mg_skd_search_error, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_utils:exception() +}). + +-record(mg_skd_task_error, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_utils:exception(), + machine_id :: mg_utils:id() | undefined +}). + +-record(mg_skd_task_add_error, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + exception :: mg_utils:exception(), + machine_id :: mg_utils:id(), + request_context :: mg_utils:request_context() +}). + +-record(mg_skd_new_tasks, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + new_tasks_count :: non_neg_integer() +}). + +-record(mg_skd_task_started, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + machine_id :: mg_utils:id() | undefined, + task_delay :: timeout() +}). + +-record(mg_skd_task_finished, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + machine_id :: mg_utils:id() | undefined, + task_delay :: timeout(), + % in native units + process_duration :: non_neg_integer() +}). + +-record(mg_skd_quota_reserved, { + namespace :: mg_utils:ns(), + scheduler_name :: mg_skd:name(), + active_tasks :: non_neg_integer(), + waiting_tasks :: non_neg_integer(), + quota_name :: mg_skd_quota_worker:name(), + quota_reserved :: mg_skd_quota:resource() +}). diff --git a/apps/mg_scheduler/rebar.config b/apps/mg_scheduler/rebar.config new file mode 100644 index 00000000..2118765e --- /dev/null +++ b/apps/mg_scheduler/rebar.config @@ -0,0 +1,4 @@ +{deps, [ + {gproc, "0.9.0"}, + {genlib, {git, "https://github.com/valitydev/genlib", {branch, master}}} +]}. diff --git a/apps/mg_scheduler/src/mg_scheduler.app.src b/apps/mg_scheduler/src/mg_scheduler.app.src new file mode 100644 index 00000000..6f640c39 --- /dev/null +++ b/apps/mg_scheduler/src/mg_scheduler.app.src @@ -0,0 +1,20 @@ +{application, mg_scheduler, [ + {description, "Machinegun scheduler"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib, + gproc, + gen_squad, + mg_utils, + mg_procreg, + opentelemetry_api + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_scheduler.erl b/apps/mg_scheduler/src/mg_skd.erl similarity index 86% rename from apps/mg_core/src/mg_core_scheduler.erl rename to apps/mg_scheduler/src/mg_skd.erl index f58494c5..7c3ea07f 100644 --- a/apps/mg_core/src/mg_core_scheduler.erl +++ b/apps/mg_scheduler/src/mg_skd.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,9 @@ %%% limitations under the License. %%% --module(mg_core_scheduler). +-module(mg_skd). + +-include_lib("mg_scheduler/include/pulse.hrl"). -export([child_spec/3]). -export([start_link/2]). @@ -30,21 +32,35 @@ -export([handle_cast/2]). -export([handle_call/3]). +%% Beats +-type beat() :: + % Scheduler handling + #mg_skd_task_add_error{} + | #mg_skd_search_success{} + | #mg_skd_search_error{} + | #mg_skd_task_error{} + | #mg_skd_new_tasks{} + | #mg_skd_task_started{} + | #mg_skd_task_finished{} + | #mg_skd_quota_reserved{}. + +-export_type([beat/0]). + %% Types -type options() :: #{ start_interval => non_neg_integer(), capacity := non_neg_integer(), - quota_name := mg_core_quota_worker:name(), - quota_share => mg_core_quota:share(), - pulse => mg_core_pulse:handler() + quota_name := mg_skd_quota_worker:name(), + quota_share => mg_skd_quota:share(), + pulse => mpulse:handler() }. -type name() :: atom(). --type id() :: {name(), mg_core:ns()}. +-type id() :: {name(), mg_utils:ns()}. --type task_id() :: mg_core_queue_task:id(). --type task() :: mg_core_queue_task:task(). --type target_time() :: mg_core_queue_task:target_time(). +-type task_id() :: mg_skd_task:id(). +-type task() :: mg_skd_task:task(). +-type target_time() :: mg_skd_task:target_time(). -export_type([id/0]). -export_type([name/0]). @@ -54,11 +70,11 @@ %% Internal types -record(state, { id :: id(), - pulse :: mg_core_pulse:handler(), + pulse :: mpulse:handler(), capacity :: non_neg_integer(), - quota_name :: mg_core_quota_worker:name(), - quota_share :: mg_core_quota:share(), - quota_reserved :: mg_core_quota:resource() | undefined, + quota_name :: mg_skd_quota_worker:name(), + quota_share :: mg_skd_quota:share(), + quota_reserved :: mg_skd_quota:resource() | undefined, timer :: timer:tref(), waiting_tasks :: task_queue(), active_tasks :: #{task_id() => pid()}, @@ -99,7 +115,7 @@ child_spec(ID, Options, ChildID) -> type => worker }. --spec start_link(id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_utils:gen_start_ret(). start_link(ID, Options) -> gen_server:start_link(self_reg_name(ID), ?MODULE, {ID, Options}, []). @@ -117,7 +133,7 @@ distribute_tasks(Pid, Tasks) when is_pid(Pid) -> %% gen_server callbacks --spec init({id(), options()}) -> mg_core_utils:gen_server_init_ret(state()). +-spec init({id(), options()}) -> mg_utils:gen_server_init_ret(state()). init({ID, Options}) -> {ok, TimerRef} = timer:send_interval(maps:get(start_interval, Options, 1000), start), {ok, #state{ @@ -133,8 +149,8 @@ init({ID, Options}) -> timer = TimerRef }}. --spec handle_call(Call :: any(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). handle_call(inquire, _From, State) -> Status = #{ pid => self(), @@ -150,7 +166,7 @@ handle_call(Call, From, State) -> -type cast() :: {tasks, [task()]}. --spec handle_cast(cast(), state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(cast(), state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast({tasks, Tasks}, State0) -> State1 = add_tasks(Tasks, State0), State2 = maybe_update_reserved(State1), @@ -164,7 +180,7 @@ handle_cast(Cast, State) -> {'DOWN', monitor(), process, pid(), _Info} | start. --spec handle_info(info(), state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(info(), state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info({'DOWN', Monitor, process, _Object, _Info}, State0) -> State1 = forget_about_task(Monitor, State0), State2 = start_new_tasks(State1), @@ -179,13 +195,13 @@ handle_info(Info, State) -> % Process registration --spec self_reg_name(id()) -> mg_core_procreg:reg_name(). +-spec self_reg_name(id()) -> mg_procreg:reg_name(). self_reg_name(ID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, {?MODULE, ID}). + mg_procreg:reg_name(mg_procreg_gproc, {?MODULE, ID}). --spec self_ref(id()) -> mg_core_procreg:ref(). +-spec self_ref(id()) -> mg_procreg:ref(). self_ref(ID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, {?MODULE, ID}). + mg_procreg:ref(mg_procreg_gproc, {?MODULE, ID}). % Helpers @@ -250,7 +266,7 @@ start_multiple_tasks(N, Iterator, State) when N > 0 -> case dequeue_task(Rank, WaitingTasks) of {Task = #{}, NewWaitingTasks} -> % ...so let's start it. - {ok, Pid, Monitor} = mg_core_scheduler_worker:start_task(ID, Task, SpanCtx), + {ok, Pid, Monitor} = mg_skd_worker:start_task(ID, Task, SpanCtx), NewState = State#state{ waiting_tasks = NewWaitingTasks, active_tasks = ActiveTasks#{TaskID => Pid}, @@ -317,7 +333,7 @@ update_reserved(State = #state{id = ID, quota_name = Quota, quota_share = QuotaS client_id => ID, share => QuotaShare }, - Reserved = mg_core_quota_worker:reserve( + Reserved = mg_skd_quota_worker:reserve( ClientOptions, TotalActiveTasks, TotalKnownTasks, @@ -337,25 +353,23 @@ get_waiting_task_count(#state{waiting_tasks = WaitingTasks}) -> %% logging --include_lib("mg_core/include/pulse.hrl"). - --spec emit_beat(mg_core_pulse:handler(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(mpulse:handler(), mpulse:beat()) -> ok. emit_beat(Handler, Beat) -> - ok = mg_core_pulse:handle_beat(Handler, Beat). + ok = mpulse:handle_beat(Handler, Beat). -spec emit_new_tasks_beat(non_neg_integer(), state()) -> ok. emit_new_tasks_beat(NewTasksCount, #state{pulse = Pulse, id = {Name, NS}}) -> - emit_beat(Pulse, #mg_core_scheduler_new_tasks{ + emit_beat(Pulse, #mg_skd_new_tasks{ namespace = NS, scheduler_name = Name, new_tasks_count = NewTasksCount }). --spec emit_reserved_beat(non_neg_integer(), non_neg_integer(), mg_core_quota:resource(), state()) -> +-spec emit_reserved_beat(non_neg_integer(), non_neg_integer(), mg_skd_quota:resource(), state()) -> ok. emit_reserved_beat(Active, Total, Reserved, State) -> #state{pulse = Pulse, id = {Name, NS}, quota_name = Quota} = State, - emit_beat(Pulse, #mg_core_scheduler_quota_reserved{ + emit_beat(Pulse, #mg_skd_quota_reserved{ namespace = NS, scheduler_name = Name, active_tasks = Active, diff --git a/apps/mg_core/src/mg_core_quota.erl b/apps/mg_scheduler/src/mg_skd_quota.erl similarity index 99% rename from apps/mg_core/src/mg_core_quota.erl rename to apps/mg_scheduler/src/mg_skd_quota.erl index bd5f9183..e2217d9a 100644 --- a/apps/mg_core/src/mg_core_quota.erl +++ b/apps/mg_scheduler/src/mg_skd_quota.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota). +-module(mg_skd_quota). %% Менеджер ресурса, который пытается справедливо распределить ограниченный %% запас этого ресурса (размером limit) между множеством потребителей. diff --git a/apps/mg_core/src/mg_core_quota_manager.erl b/apps/mg_scheduler/src/mg_skd_quota_manager.erl similarity index 80% rename from apps/mg_core/src/mg_core_quota_manager.erl rename to apps/mg_scheduler/src/mg_skd_quota_manager.erl index 46eb6c7e..857f141d 100644 --- a/apps/mg_core/src/mg_core_quota_manager.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_manager.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_manager). +-module(mg_skd_quota_manager). -export([child_spec/2]). -export([start_link/1]). @@ -25,8 +25,8 @@ -export_type([options/0]). %% Internal types --type quota_name() :: mg_core_quota_worker:name(). --type quota_options() :: mg_core_quota_worker:options(). +-type quota_name() :: mg_skd_quota_worker:name(). +-type quota_options() :: mg_skd_quota_worker:options(). %% %% API @@ -41,12 +41,12 @@ child_spec(Options, ChildID) -> type => supervisor }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(Options) -> genlib_adhoc_supervisor:start_link( #{strategy => one_for_one}, [ - mg_core_quota_worker:child_spec(QuotaOptions, Name) + mg_skd_quota_worker:child_spec(QuotaOptions, Name) || #{name := Name} = QuotaOptions <- maps:values(Options) ] ). diff --git a/apps/mg_core/src/mg_core_quota_worker.erl b/apps/mg_scheduler/src/mg_skd_quota_worker.erl similarity index 82% rename from apps/mg_core/src/mg_core_quota_worker.erl rename to apps/mg_scheduler/src/mg_skd_quota_worker.erl index cc9b881a..898be921 100644 --- a/apps/mg_core/src/mg_core_quota_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_quota_worker.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_worker). +-module(mg_skd_quota_worker). -behaviour(gen_server). @@ -38,10 +38,10 @@ update_interval => timeout() }. -type name() :: binary() | unlimited. --type share() :: mg_core_quota:share(). --type resource() :: mg_core_quota:resource(). --type client_id() :: mg_core_quota:client_id(). --type limit_options() :: mg_core_quota:limit_options(). +-type share() :: mg_skd_quota:share(). +-type resource() :: mg_skd_quota:resource(). +-type client_id() :: mg_skd_quota:client_id(). +-type limit_options() :: mg_skd_quota:limit_options(). -export_type([name/0]). -export_type([share/0]). @@ -65,9 +65,9 @@ pid :: pid() }). -type state() :: #state{}. --type quota() :: mg_core_quota:state(). +-type quota() :: mg_skd_quota:state(). -type client() :: #client{}. --type client_options() :: mg_core_quota:client_options(). +-type client_options() :: mg_skd_quota:client_options(). -type monitor() :: reference(). -define(DEFAULT_UPDATE_INTERVAL, 5000). @@ -86,7 +86,7 @@ child_spec(Options, ChildID) -> shutdown => 5000 }. --spec start_link(options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(options()) -> mg_utils:gen_start_ret(). start_link(#{name := Name} = Options) -> gen_server:start_link(self_reg_name(Name), ?MODULE, Options, []). @@ -99,24 +99,24 @@ reserve(ClientOptions, Usage, Expectation, Name) -> %% gen_server callbacks --spec init(options()) -> mg_core_utils:gen_server_init_ret(state()). +-spec init(options()) -> mg_utils:gen_server_init_ret(state()). init(Options) -> #{limit := Limit} = Options, Interval = maps:get(update_interval, Options, ?DEFAULT_UPDATE_INTERVAL), {ok, #state{ options = Options, - quota = mg_core_quota:new(#{limit => Limit}), + quota = mg_skd_quota:new(#{limit => Limit}), clients = #{}, client_monitors = #{}, interval = Interval, timer = erlang:send_after(Interval, self(), ?UPDATE_MESSAGE) }}. --spec handle_call(Call :: any(), mg_core_utils:gen_server_from(), state()) -> - mg_core_utils:gen_server_handle_call_ret(state()). +-spec handle_call(Call :: any(), mg_utils:gen_server_from(), state()) -> + mg_utils:gen_server_handle_call_ret(state()). handle_call({reserve, ClientOptions, Usage, Expectation}, {Pid, _Tag}, State0) -> State1 = ensure_is_registered(ClientOptions, Pid, State0), - {ok, NewReserved, NewQuota} = mg_core_quota:reserve( + {ok, NewReserved, NewQuota} = mg_skd_quota:reserve( ClientOptions, Usage, Expectation, @@ -127,14 +127,14 @@ handle_call(Call, From, State) -> ok = logger:error("unexpected gen_server call received: ~p from ~p", [Call, From]), {noreply, State}. --spec handle_cast(Cast :: any(), state()) -> mg_core_utils:gen_server_handle_cast_ret(state()). +-spec handle_cast(Cast :: any(), state()) -> mg_utils:gen_server_handle_cast_ret(state()). handle_cast(Cast, State) -> ok = logger:error("unexpected gen_server cast received: ~p", [Cast]), {noreply, State}. --spec handle_info(Info :: any(), state()) -> mg_core_utils:gen_server_handle_info_ret(state()). +-spec handle_info(Info :: any(), state()) -> mg_utils:gen_server_handle_info_ret(state()). handle_info(?UPDATE_MESSAGE, State) -> - {ok, NewQuota} = mg_core_quota:recalculate_targets(State#state.quota), + {ok, NewQuota} = mg_skd_quota:recalculate_targets(State#state.quota), {noreply, restart_timer(?UPDATE_MESSAGE, State#state{quota = NewQuota})}; handle_info({'DOWN', Monitor, process, _Object, _Info}, State) -> {noreply, forget_about_client(Monitor, State)}; @@ -143,7 +143,7 @@ handle_info(Info, State) -> {noreply, State}. -spec code_change(OldVsn :: any(), state(), Extra :: any()) -> - mg_core_utils:gen_server_code_change_ret(state()). + mg_utils:gen_server_code_change_ret(state()). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -179,7 +179,7 @@ forget_about_client(Monitor, State) -> State#state{ clients = maps:remove(ClientID, AllClients), client_monitors = maps:remove(Monitor, Monitors), - quota = mg_core_quota:remove_client(ClientID, Quota) + quota = mg_skd_quota:remove_client(ClientID, Quota) }; error -> State @@ -187,11 +187,11 @@ forget_about_client(Monitor, State) -> % Worker registration --spec self_ref(name()) -> mg_core_utils:gen_ref(). +-spec self_ref(name()) -> mg_utils:gen_ref(). self_ref(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. --spec self_reg_name(name()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(name()) -> mg_utils:gen_reg_name(). self_reg_name(ID) -> {via, gproc, {n, l, wrap_id(ID)}}. diff --git a/apps/mg_core/src/mg_core_queue_scanner.erl b/apps/mg_scheduler/src/mg_skd_scanner.erl similarity index 79% rename from apps/mg_core/src/mg_core_queue_scanner.erl rename to apps/mg_scheduler/src/mg_skd_scanner.erl index 6028d2df..1276eacb 100644 --- a/apps/mg_core/src/mg_core_queue_scanner.erl +++ b/apps/mg_scheduler/src/mg_skd_scanner.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -24,9 +24,9 @@ %%% Distribution process DOES NOT take into account processing locality (_allocate tasks near %%% idling machines_), it just splits tasks uniformly among a set of known schedulers. --module(mg_core_queue_scanner). +-module(mg_skd_scanner). --type scheduler_id() :: mg_core_scheduler:id(). +-type scheduler_id() :: mg_skd:id(). -type scan_delay() :: milliseconds(). -type scan_limit() :: non_neg_integer(). % as in A×X + B @@ -39,8 +39,8 @@ max_scan_limit => scan_limit() | unlimited, scan_ahead => scan_ahead(), retry_scan_delay => scan_delay(), - squad_opts => mg_core_gen_squad:opts(), - pulse => mg_core_pulse:handler() + squad_opts => gen_squad:opts(), + pulse => mpulse:handler() }. -export_type([options/0]). @@ -49,16 +49,16 @@ -export_type([scan_limit/0]). -export_type([scan_ahead/0]). --type beat() :: {squad, {atom(), mg_core_gen_squad_pulse:beat(), _ExtraMeta}}. +-type beat() :: {squad, {atom(), gen_squad_pulse:beat(), _ExtraMeta}}. -export_type([beat/0]). %% --type task() :: mg_core_queue_task:task(). +-type task() :: mg_skd_task:task(). -type queue_state() :: any(). -type queue_options() :: any(). --type queue_handler() :: mg_core_utils:mod_opts(queue_options()). +-type queue_handler() :: mg_utils:mod_opts(queue_options()). -callback child_spec(queue_options(), atom()) -> supervisor:child_spec() | undefined. -callback init(queue_options()) -> {ok, queue_state()}. @@ -84,7 +84,7 @@ -export([start_link/2]). -export([where_is/1]). --behaviour(mg_core_gen_squad). +-behaviour(gen_squad). -export([init/1]). -export([discover/1]). -export([handle_rank_change/3]). @@ -92,7 +92,7 @@ -export([handle_cast/4]). -export([handle_info/4]). --behaviour(mg_core_gen_squad_pulse). +-behaviour(gen_squad_pulse). -export([handle_beat/2]). %% @@ -100,7 +100,7 @@ -spec child_spec(scheduler_id(), options(), _ChildID) -> supervisor:child_spec(). child_spec(SchedulerID, Options, ChildID) -> Flags = #{strategy => rest_for_one}, - ChildSpecs = mg_core_utils:lists_compact([ + ChildSpecs = mg_utils:lists_compact([ handler_child_spec(Options, {ChildID, handler}), #{ id => {ChildID, scanner}, @@ -117,11 +117,11 @@ child_spec(SchedulerID, Options, ChildID) -> -spec handler_child_spec(options(), _ChildID) -> supervisor:child_spec() | undefined. handler_child_spec(#{queue_handler := Handler}, ChildID) -> - mg_core_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). + mg_utils:apply_mod_opts_if_defined(Handler, child_spec, undefined, [ChildID]). %% --spec start_link(scheduler_id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> SquadOpts = maps:merge( maps:get(squad_opts, Options, #{}), @@ -130,7 +130,7 @@ start_link(SchedulerID, Options) -> maps:with([pulse], Options) ) ), - mg_core_gen_squad:start_link( + gen_squad:start_link( self_reg_name(SchedulerID), ?MODULE, {SchedulerID, Options}, @@ -139,7 +139,7 @@ start_link(SchedulerID, Options) -> -spec where_is(scheduler_id()) -> pid() | undefined. where_is(SchedulerID) -> - mg_core_utils:gen_ref_to_pid(self_ref(SchedulerID)). + mg_utils:gen_ref_to_pid(self_ref(SchedulerID)). %% @@ -152,13 +152,13 @@ where_is(SchedulerID) -> scan_ahead :: scan_ahead(), retry_delay :: scan_delay(), timer :: reference() | undefined, - pulse :: mg_core_pulse:handler() | undefined + pulse :: mpulse:handler() | undefined }). -type st() :: #st{}. --type rank() :: mg_core_gen_squad:rank(). --type squad() :: mg_core_gen_squad:squad(). +-type rank() :: gen_squad:rank(). +-type squad() :: gen_squad:squad(). -spec init({scheduler_id(), options()}) -> {ok, st()}. init({SchedulerID, Options}) -> @@ -191,15 +191,15 @@ handle_rank_change(follower, _Squad, St) -> -spec handle_cast(_Cast, rank(), squad(), st()) -> {noreply, st()}. handle_cast(Cast, Rank, _Squad, St) -> ok = logger:error( - "unexpected mg_core_gen_squad cast received: ~p, from ~p, rank ~p, state ~p", + "unexpected gen_squad cast received: ~p, from ~p, rank ~p, state ~p", [Cast, Rank, St] ), {noreply, St}. --spec handle_call(_Call, mg_core_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. +-spec handle_call(_Call, mg_utils:gen_server_from(), rank(), squad(), st()) -> {noreply, st()}. handle_call(Call, From, Rank, _Squad, St) -> ok = logger:error( - "unexpected mg_core_gen_squad call received: ~p, from ~p, rank ~p, state ~p", + "unexpected gen_squad call received: ~p, from ~p, rank ~p, state ~p", [Call, From, Rank, St] ), {noreply, St}. @@ -213,12 +213,12 @@ handle_info(scan, follower, _Squad, St) -> {noreply, St}; handle_info(Info, Rank, _Squad, St) -> ok = logger:warning( - "unexpected mg_core_gen_squad info received: ~p, rank ~p, state ~p", + "unexpected gen_squad info received: ~p, rank ~p, state ~p", [Info, Rank, St] ), {noreply, St}. --spec handle_scan(mg_core_gen_squad:squad(), st()) -> st(). +-spec handle_scan(gen_squad:squad(), st()) -> st(). handle_scan(Squad, St0 = #st{max_limit = MaxLimit, retry_delay = RetryDelay}) -> StartedAt = erlang:monotonic_time(), %% Try to find out which schedulers are here, getting their statuses @@ -253,30 +253,30 @@ scan_queue(Limit, St = #st{queue_handler = HandlerState, retry_delay = RetryDela ok = emit_scan_success_beat(Result, Limit, StartedAt, St), {Result, St#st{queue_handler = HandlerStateNext}}. --spec disseminate_tasks([task()], [mg_core_scheduler:status()], [scan_limit()], st()) -> ok. +-spec disseminate_tasks([task()], [mg_skd:status()], [scan_limit()], st()) -> ok. disseminate_tasks(Tasks, [_Scheduler = #{pid := Pid}], _Capacities, _St) -> %% A single scheduler, just send him all tasks optimizing away meaningless partitioning - mg_core_scheduler:distribute_tasks(Pid, Tasks); + mg_skd:distribute_tasks(Pid, Tasks); disseminate_tasks(Tasks, Schedulers, Capacities, _St) -> %% Partition tasks among known schedulers proportionally to their capacities - Partitions = mg_core_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), + Partitions = mg_utils:partition(Tasks, lists:zip(Schedulers, Capacities)), %% Distribute shares of tasks among schedulers, sending directly to pids maps:fold( fun(_Scheduler = #{pid := Pid}, TasksShare, _) -> - mg_core_scheduler:distribute_tasks(Pid, TasksShare) + mg_skd:distribute_tasks(Pid, TasksShare) end, ok, Partitions ). --spec inquire_schedulers(mg_core_gen_squad:squad(), st()) -> [mg_core_scheduler:status()]. +-spec inquire_schedulers(gen_squad:squad(), st()) -> [mg_skd:status()]. inquire_schedulers(Squad, #st{scheduler_id = SchedulerID}) -> %% Take all known members, there's at least one which is `self()` - Members = mg_core_gen_squad:members(Squad), + Members = gen_squad:members(Squad), Nodes = lists:map(fun erlang:node/1, Members), - multicall(Nodes, mg_core_scheduler, inquire, [SchedulerID], ?INQUIRY_TIMEOUT). + multicall(Nodes, mg_skd, inquire, [SchedulerID], ?INQUIRY_TIMEOUT). --spec compute_adjusted_capacity(mg_core_scheduler:status(), st()) -> scan_limit(). +-spec compute_adjusted_capacity(mg_skd:status(), st()) -> scan_limit(). compute_adjusted_capacity(#{waiting_tasks := W, capacity := C}, #st{scan_ahead = {A, B}}) -> erlang:max(erlang:round(A * erlang:max(C - W, 0)) + B, 0). @@ -324,32 +324,32 @@ cancel_timer(St) -> -spec init_handler(queue_handler()) -> queue_handler_state(). init_handler(Handler) -> - {ok, InitialState} = mg_core_utils:apply_mod_opts(Handler, init), + {ok, InitialState} = mg_utils:apply_mod_opts(Handler, init), {Handler, InitialState}. -spec run_handler(queue_handler_state(), _Function :: atom(), _Args :: list()) -> {_Result, queue_handler_state()}. run_handler({Handler, State}, Function, Args) -> - {Result, NextState} = mg_core_utils:apply_mod_opts(Handler, Function, Args ++ [State]), + {Result, NextState} = mg_utils:apply_mod_opts(Handler, Function, Args ++ [State]), {Result, {Handler, NextState}}. %% --spec self_reg_name(scheduler_id()) -> mg_core_procreg:reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_procreg:reg_name(). self_reg_name(SchedulerID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, {?MODULE, SchedulerID}). + mg_procreg:reg_name(mg_procreg_gproc, {?MODULE, SchedulerID}). --spec self_ref(scheduler_id()) -> mg_core_procreg:ref(). +-spec self_ref(scheduler_id()) -> mg_procreg:ref(). self_ref(SchedulerID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, {?MODULE, SchedulerID}). + mg_procreg:ref(mg_procreg_gproc, {?MODULE, SchedulerID}). %% --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). --spec emit_scan_error_beat(mg_core_utils:exception(), st()) -> ok. +-spec emit_scan_error_beat(mg_utils:exception(), st()) -> ok. emit_scan_error_beat(Exception, #st{pulse = Pulse, scheduler_id = {Name, NS}}) -> - mg_core_pulse:handle_beat(Pulse, #mg_core_scheduler_search_error{ + mpulse:handle_beat(Pulse, #mg_skd_search_error{ namespace = NS, scheduler_name = Name, exception = Exception @@ -360,7 +360,7 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ pulse = Pulse, scheduler_id = {Name, NS} }) -> - mg_core_pulse:handle_beat(Pulse, #mg_core_scheduler_search_success{ + mpulse:handle_beat(Pulse, #mg_skd_search_success{ namespace = NS, scheduler_name = Name, delay = Delay, @@ -371,8 +371,8 @@ emit_scan_success_beat({Delay, Tasks}, Limit, StartedAt, #st{ %% --spec handle_beat({mg_core_pulse:handler(), scheduler_id()}, mg_core_gen_squad_pulse:beat()) -> _. +-spec handle_beat({mpulse:handler(), scheduler_id()}, gen_squad_pulse:beat()) -> _. handle_beat({Handler, {Name, NS}}, Beat) -> Producer = queue_scanner, Extra = [{scheduler_type, Name}, {namespace, NS}], - mg_core_pulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). + mpulse:handle_beat(Handler, {squad, {Producer, Beat, Extra}}). diff --git a/apps/mg_core/src/mg_core_scheduler_sup.erl b/apps/mg_scheduler/src/mg_skd_sup.erl similarity index 63% rename from apps/mg_core/src/mg_core_scheduler_sup.erl rename to apps/mg_scheduler/src/mg_skd_sup.erl index 11fc809f..a34e590e 100644 --- a/apps/mg_core/src/mg_core_scheduler_sup.erl +++ b/apps/mg_scheduler/src/mg_skd_sup.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,26 +14,26 @@ %%% limitations under the License. %%% --module(mg_core_scheduler_sup). +-module(mg_skd_sup). --type id() :: mg_core_scheduler:id(). +-type id() :: mg_skd:id(). -type options() :: #{ % manager start_interval => non_neg_integer(), capacity := non_neg_integer(), - quota_name := mg_core_quota_worker:name(), - quota_share => mg_core_quota:share(), + quota_name := mg_skd_quota_worker:name(), + quota_share => mg_skd_quota:share(), % scanner - queue_handler := mg_core_queue_scanner:queue_handler(), - max_scan_limit => mg_core_queue_scanner:scan_limit() | unlimited, - scan_ahead => mg_core_queue_scanner:scan_ahead(), - retry_scan_delay => mg_core_queue_scanner:scan_delay(), - squad_opts => mg_core_gen_squad:opts(), + queue_handler := mg_skd_scanner:queue_handler(), + max_scan_limit => mg_skd_scanner:scan_limit() | unlimited, + scan_ahead => mg_skd_scanner:scan_ahead(), + retry_scan_delay => mg_skd_scanner:scan_delay(), + squad_opts => gen_squad:opts(), % workers - task_handler := mg_core_utils:mod_opts(), + task_handler := mg_utils:mod_opts(), % common - pulse => mg_core_pulse:handler() + pulse => mpulse:handler() }. -export_type([options/0]). @@ -52,7 +52,7 @@ child_spec(ID, Options, ChildID) -> type => supervisor }. --spec start_link(id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> ManagerOptions = maps:with( [start_interval, capacity, quota_name, quota_share, pulse], @@ -68,9 +68,9 @@ start_link(SchedulerID, Options) -> ), genlib_adhoc_supervisor:start_link( #{strategy => one_for_all}, - mg_core_utils:lists_compact([ - mg_core_queue_scanner:child_spec(SchedulerID, ScannerOptions, queue), - mg_core_scheduler_worker:child_spec(SchedulerID, WorkerOptions, tasks), - mg_core_scheduler:child_spec(SchedulerID, ManagerOptions, manager) + mg_utils:lists_compact([ + mg_skd_scanner:child_spec(SchedulerID, ScannerOptions, queue), + mg_skd_worker:child_spec(SchedulerID, WorkerOptions, tasks), + mg_skd:child_spec(SchedulerID, ManagerOptions, manager) ]) ). diff --git a/apps/mg_core/src/mg_core_queue_task.erl b/apps/mg_scheduler/src/mg_skd_task.erl similarity index 92% rename from apps/mg_core/src/mg_core_queue_task.erl rename to apps/mg_scheduler/src/mg_skd_task.erl index 796ff310..6465d86e 100644 --- a/apps/mg_core/src/mg_core_queue_task.erl +++ b/apps/mg_scheduler/src/mg_skd_task.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_queue_task). +-module(mg_skd_task). -type id() :: any(). -type payload() :: any(). @@ -24,7 +24,7 @@ -type task(TaskID, TaskPayload) :: #{ id := TaskID, target_time := target_time(), - machine_id := mg_core:id(), + machine_id := mg_utils:id(), payload => TaskPayload }. diff --git a/apps/mg_core/src/mg_core_scheduler_worker.erl b/apps/mg_scheduler/src/mg_skd_worker.erl similarity index 78% rename from apps/mg_core/src/mg_core_scheduler_worker.erl rename to apps/mg_scheduler/src/mg_skd_worker.erl index 8dcda772..73ef1644 100644 --- a/apps/mg_core/src/mg_core_scheduler_worker.erl +++ b/apps/mg_scheduler/src/mg_skd_worker.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2019 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ %%% limitations under the License. %%% --module(mg_core_scheduler_worker). +-module(mg_skd_worker). --include_lib("mg_core/include/pulse.hrl"). +-include_lib("mg_scheduler/include/pulse.hrl"). -export([child_spec/3]). -export([start_link/2]). @@ -30,13 +30,13 @@ -callback execute_task(Options :: any(), task()) -> ok. %% Internal types --type scheduler_id() :: mg_core_scheduler:id(). --type task() :: mg_core_queue_task:task(). +-type scheduler_id() :: mg_skd:id(). +-type task() :: mg_skd_task:task(). -type maybe_span() :: opentelemetry:span_ctx() | undefined. -type options() :: #{ - task_handler := mg_core_utils:mod_opts(), - pulse => mg_core_pulse:handler() + task_handler := mg_utils:mod_opts(), + pulse => mpulse:handler() }. -type monitor() :: reference(). @@ -54,7 +54,7 @@ child_spec(SchedulerID, Options, ChildID) -> type => supervisor }. --spec start_link(scheduler_id(), options()) -> mg_core_utils:gen_start_ret(). +-spec start_link(scheduler_id(), options()) -> mg_utils:gen_start_ret(). start_link(SchedulerID, Options) -> genlib_adhoc_supervisor:start_link( self_reg_name(SchedulerID), @@ -78,7 +78,7 @@ start_task(SchedulerID, Task, SpanCtx) -> Error end. --spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_core_utils:gen_start_ret(). +-spec do_start_task(scheduler_id(), options(), task(), maybe_span()) -> mg_utils:gen_start_ret(). do_start_task(SchedulerID, Options, Task, SpanCtx) -> proc_lib:start_link(?MODULE, execute, [SchedulerID, Options, Task, SpanCtx]). @@ -91,7 +91,7 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> ok = emit_start_beat(Task, SchedulerID, Options), ok = try - ok = mg_core_utils:apply_mod_opts(Handler, execute_task, [Task]), + ok = mg_utils:apply_mod_opts(Handler, execute_task, [Task]), End = erlang:monotonic_time(), ok = emit_finish_beat(Task, Start, End, SchedulerID, Options) catch @@ -104,13 +104,13 @@ execute(SchedulerID, #{task_handler := Handler} = Options, Task, SpanCtx) -> % Process registration --spec self_ref(scheduler_id()) -> mg_core_utils:gen_ref(). +-spec self_ref(scheduler_id()) -> mg_utils:gen_ref(). self_ref(ID) -> - mg_core_procreg:ref(mg_core_procreg_gproc, wrap_id(ID)). + mg_procreg:ref(mg_procreg_gproc, wrap_id(ID)). --spec self_reg_name(scheduler_id()) -> mg_core_utils:gen_reg_name(). +-spec self_reg_name(scheduler_id()) -> mg_utils:gen_reg_name(). self_reg_name(ID) -> - mg_core_procreg:reg_name(mg_core_procreg_gproc, wrap_id(ID)). + mg_procreg:reg_name(mg_procreg_gproc, wrap_id(ID)). -spec wrap_id(scheduler_id()) -> term(). wrap_id(ID) -> @@ -118,9 +118,9 @@ wrap_id(ID) -> %% logging --spec emit_beat(options(), mg_core_pulse:beat()) -> ok. +-spec emit_beat(options(), mpulse:beat()) -> ok. emit_beat(Options, Beat) -> - ok = mg_core_pulse:handle_beat(maps:get(pulse, Options, undefined), Beat). + ok = mpulse:handle_beat(maps:get(pulse, Options, undefined), Beat). -spec get_delay(task()) -> timeout(). get_delay(#{target_time := Target}) -> @@ -129,7 +129,7 @@ get_delay(#{target_time := Target}) -> -spec emit_start_beat(task(), scheduler_id(), options()) -> ok. emit_start_beat(Task, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_started{ + emit_beat(Options, #mg_skd_task_started{ namespace = NS, scheduler_name = Name, task_delay = get_delay(Task), @@ -138,7 +138,7 @@ emit_start_beat(Task, {Name, NS}, Options) -> -spec emit_finish_beat(task(), integer(), integer(), scheduler_id(), options()) -> ok. emit_finish_beat(Task, StartedAt, FinishedAt, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_finished{ + emit_beat(Options, #mg_skd_task_finished{ namespace = NS, scheduler_name = Name, task_delay = get_delay(Task), @@ -147,9 +147,9 @@ emit_finish_beat(Task, StartedAt, FinishedAt, {Name, NS}, Options) -> process_duration = FinishedAt - StartedAt }). --spec emit_error_beat(task(), mg_core_utils:exception(), scheduler_id(), options()) -> ok. +-spec emit_error_beat(task(), mg_utils:exception(), scheduler_id(), options()) -> ok. emit_error_beat(Task, Exception, {Name, NS}, Options) -> - emit_beat(Options, #mg_core_scheduler_task_error{ + emit_beat(Options, #mg_skd_task_error{ namespace = NS, scheduler_name = Name, exception = Exception, diff --git a/apps/mg_core/test/mg_core_quota_SUITE.erl b/apps/mg_scheduler/test/mg_skd_quota_SUITE.erl similarity index 91% rename from apps/mg_core/test/mg_core_quota_SUITE.erl rename to apps/mg_scheduler/test/mg_skd_quota_SUITE.erl index 87196b02..1ddff790 100644 --- a/apps/mg_core/test/mg_core_quota_SUITE.erl +++ b/apps/mg_scheduler/test/mg_skd_quota_SUITE.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2018 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ %%% limitations under the License. %%% --module(mg_core_quota_SUITE). +-module(mg_skd_quota_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("stdlib/include/assert.hrl"). @@ -52,14 +52,14 @@ -type group_name() :: atom(). -record(client, { - options :: mg_core_quota:client_options(), + options :: mg_skd_quota:client_options(), usage = 0 :: resource(), expectation = 0 :: resource(), reserved = 0 :: resource() }). -type client() :: #client{}. --type quota() :: mg_core_quota:state(). --type resource() :: mg_core_quota:resource(). +-type quota() :: mg_skd_quota:state(). +-type resource() :: mg_skd_quota:resource(). %% %% tests descriptions @@ -124,7 +124,7 @@ end_per_test(_Name, _C) -> -spec no_over_allocation_test(config()) -> any(). no_over_allocation_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -135,7 +135,7 @@ no_over_allocation_test(_C) -> -spec fair_sharing_without_usage_test(config()) -> any(). fair_sharing_without_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -144,7 +144,7 @@ fair_sharing_without_usage_test(_C) -> {Clients2, Q1} = reserve(Clients1, Q0), ok = validate_quota_contract(Clients2, Limit), % Don't use reserved resurces and recalculate targets - {ok, Q2} = mg_core_quota:recalculate_targets(Q1), + {ok, Q2} = mg_skd_quota:recalculate_targets(Q1), {Clients3, _Q3} = reserve(Clients2, Q2), ok = validate_quota_contract(Clients3, Limit), Expected = repeat(10, 10), @@ -153,7 +153,7 @@ fair_sharing_without_usage_test(_C) -> -spec sharing_respects_usage_test(config()) -> any(). sharing_respects_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -167,7 +167,7 @@ sharing_respects_usage_test(_C) -> {Clients4, Q2} = reserve(Clients3, Q1), ok = validate_quota_contract(Clients4, Limit), % Recalculate targets - {ok, Q3} = mg_core_quota:recalculate_targets(Q2), + {ok, Q3} = mg_skd_quota:recalculate_targets(Q2), {Clients5, _Q4} = reserve(Clients4, Q3), ok = validate_quota_contract(Clients5, Limit), Expected = repeat(10, 5) ++ repeat(0, 5), @@ -176,7 +176,7 @@ sharing_respects_usage_test(_C) -> -spec fair_sharing_with_full_usage_test(config()) -> any(). fair_sharing_with_full_usage_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -189,7 +189,7 @@ fair_sharing_with_full_usage_test(_C) -> {Clients4, Q2} = reserve(Clients3, Q1), ok = validate_quota_contract(Clients4, Limit), % Recalculate targets - {ok, Q3} = mg_core_quota:recalculate_targets(Q2), + {ok, Q3} = mg_skd_quota:recalculate_targets(Q2), {Clients5, Q4} = reserve(Clients4, Q3), ok = validate_quota_contract(Clients5, Limit), ?assertEqual([50, 0], get_reserve(Clients5)), @@ -202,7 +202,7 @@ fair_sharing_with_full_usage_test(_C) -> -spec fair_share_with_large_limit(config()) -> any(). fair_share_with_large_limit(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10), @@ -213,7 +213,7 @@ fair_share_with_large_limit(_C) -> -spec unwanted_resources_redistribution_test(config()) -> any(). unwanted_resources_redistribution_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -228,7 +228,7 @@ unwanted_resources_redistribution_test(_C) -> -spec guaranteed_resources_redistribution_test(config()) -> any(). guaranteed_resources_redistribution_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2), @@ -247,7 +247,7 @@ guaranteed_resources_redistribution_test(_C) -> -spec large_amount_of_clients_not_freeze_test(config()) -> any(). large_amount_of_clients_not_freeze_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10000), @@ -258,7 +258,7 @@ large_amount_of_clients_not_freeze_test(_C) -> -spec large_amount_of_clients_with_zero_share_not_freeze_test(config()) -> any(). large_amount_of_clients_with_zero_share_not_freeze_test(_C) -> Limit = 100, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(10000, repeat(0, 10000)), @@ -269,7 +269,7 @@ large_amount_of_clients_with_zero_share_not_freeze_test(_C) -> -spec sharing_respects_shares(config()) -> any(). sharing_respects_shares(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [1, 2]), @@ -280,7 +280,7 @@ sharing_respects_shares(_C) -> -spec sharing_respects_zero_shares(config()) -> any(). sharing_respects_zero_shares(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [0, 2]), @@ -291,7 +291,7 @@ sharing_respects_zero_shares(_C) -> -spec share_can_be_changed(config()) -> any(). share_can_be_changed(_C) -> Limit = 6, - Q0 = mg_core_quota:new(#{ + Q0 = mg_skd_quota:new(#{ limit => #{value => Limit} }), Clients0 = create_clients(2, [1, 1]), @@ -313,7 +313,7 @@ repeat(Element, Count) -> create_clients(Number) -> create_clients(Number, repeat(1, Number)). --spec create_clients(non_neg_integer(), [mg_core_quota:share()]) -> [client()]. +-spec create_clients(non_neg_integer(), [mg_skd_quota:share()]) -> [client()]. create_clients(Number, Shares) -> [ #client{ @@ -333,7 +333,7 @@ reserve(Clients, Quota) -> -spec do_reserve(client(), Acc) -> Acc when Acc :: {[client()], quota()}. do_reserve(Client, {Acc, Quota}) -> #client{options = Options, usage = Usage, expectation = Exp} = Client, - {ok, Reserved, NewQuota} = mg_core_quota:reserve(Options, Usage, Exp, Quota), + {ok, Reserved, NewQuota} = mg_skd_quota:reserve(Options, Usage, Exp, Quota), {[Client#client{reserved = Reserved} | Acc], NewQuota}. -spec loop([client()], quota()) -> {[client()], quota()}. @@ -345,7 +345,7 @@ loop(Clients, Quota, 0) -> {Clients, Quota}; loop(Clients0, Quota0, N) when N > 0 -> {Clients1, Quota1} = reserve(Clients0, Quota0), - {ok, Quota2} = mg_core_quota:recalculate_targets(Quota1), + {ok, Quota2} = mg_skd_quota:recalculate_targets(Quota1), loop(Clients1, Quota2, N - 1). -spec get_reserve([client()]) -> [resource()]. @@ -360,14 +360,14 @@ get_expectation(Clients) -> set_expectation(Clients, Expecations) -> [C#client{expectation = E} || {C, E} <- lists:zip(Clients, Expecations)]. --spec set_share([client()], [mg_core_quota:share()]) -> [client()]. +-spec set_share([client()], [mg_skd_quota:share()]) -> [client()]. set_share(Clients, Shares) -> [ C#client{options = O#{share => S}} || {#client{options = O} = C, S} <- lists:zip(Clients, Shares) ]. --spec validate_quota_contract([client()], Limit :: mg_core_quota:resource()) -> ok. +-spec validate_quota_contract([client()], Limit :: mg_skd_quota:resource()) -> ok. validate_quota_contract(Clients, Limit) -> true = lists:sum(get_reserve(Clients)) =< Limit, TotalUsage = [C#client.usage || C <- Clients], diff --git a/apps/mg_utils/src/mg_utils.app.src b/apps/mg_utils/src/mg_utils.app.src new file mode 100644 index 00000000..2efde35f --- /dev/null +++ b/apps/mg_utils/src/mg_utils.app.src @@ -0,0 +1,15 @@ +{application, mg_utils, [ + {description, "Machinegun utils library"}, + {vsn, "1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + genlib + ]}, + {env, []}, + {modules, []}, + {maintainers, []}, + {licenses, []}, + {links, []} +]}. diff --git a/apps/mg_core/src/mg_core_utils.erl b/apps/mg_utils/src/mg_utils.erl similarity index 87% rename from apps/mg_core/src/mg_core_utils.erl rename to apps/mg_utils/src/mg_utils.erl index 9f377b4c..c8d92856 100644 --- a/apps/mg_core/src/mg_core_utils.erl +++ b/apps/mg_utils/src/mg_utils.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2017 RBKmoney +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ %%% То, чего не хватает в OTP. %%% TODO перенести в genlib %%% --module(mg_core_utils). +-module(mg_utils). %% API %% OTP @@ -54,6 +54,7 @@ -export([apply_mod_opts/3]). -export([apply_mod_opts_if_defined/3]). -export([apply_mod_opts_if_defined/4]). +-export([apply_mod_opts_with_fallback/4]). -export([separate_mod_opts/1]). -export([separate_mod_opts/2]). @@ -75,6 +76,19 @@ -export([take_defined/1]). +%% FIXME Minor crutch for `concatenate_namespaces/2' and scheduler beats +-type ns() :: binary(). +-export_type([ns/0]). + +-type id() :: binary(). +-export_type([id/0]). + +-export_type([opaque/0]). +-type opaque() :: null | true | false | number() | binary() | [opaque()] | #{opaque() => opaque()}. + +-export_type([request_context/0]). +-type request_context() :: opaque(). + %% %% API %% OTP @@ -225,14 +239,33 @@ apply_mod_opts_if_defined(ModOpts, Function, Default) -> -spec apply_mod_opts_if_defined(mod_opts(), atom(), _Default, list(_Arg)) -> _Result. apply_mod_opts_if_defined(ModOpts, Function, Default, Args) -> + case prepare_applicable_mod_opts(ModOpts, Function, Args) of + {ok, {Mod, Function, FunctionArgs}} -> + erlang:apply(Mod, Function, FunctionArgs); + {error, {undefined, _FunctionArgs}} -> + Default + end. + +-spec apply_mod_opts_with_fallback(mod_opts(), atom(), Fallback :: fun(), list(_Arg)) -> _Result. +apply_mod_opts_with_fallback(ModOpts, Function, Fallback, Args) -> + case prepare_applicable_mod_opts(ModOpts, Function, Args) of + {ok, {Mod, Function, FunctionArgs}} -> + erlang:apply(Mod, Function, FunctionArgs); + {error, {undefined, FunctionArgs}} -> + erlang:apply(Fallback, FunctionArgs) + end. + +-spec prepare_applicable_mod_opts(mod_opts(), atom(), list(_Arg)) -> + {ok, MFArgs :: {module(), atom(), list(_Arg)}} | {error, {undefined, list(_Arg)}}. +prepare_applicable_mod_opts(ModOpts, Function, Args) -> {Mod, Arg} = separate_mod_opts(ModOpts), FunctionArgs = [Arg | Args], ok = maybe_load_module(Mod), case erlang:function_exported(Mod, Function, length(FunctionArgs)) of true -> - erlang:apply(Mod, Function, FunctionArgs); + {ok, {Mod, Function, FunctionArgs}}; false -> - Default + {error, {undefined, FunctionArgs}} end. -spec maybe_load_module(module()) -> ok. @@ -342,7 +375,7 @@ lists_compact(List) -> List ). --spec concatenate_namespaces(mg_core:ns(), mg_core:ns()) -> mg_core:ns(). +-spec concatenate_namespaces(ns(), ns()) -> ns(). concatenate_namespaces(NamespaceA, NamespaceB) -> <>. diff --git a/apps/mg_utils/src/mpulse.erl b/apps/mg_utils/src/mpulse.erl new file mode 100644 index 00000000..b9497070 --- /dev/null +++ b/apps/mg_utils/src/mpulse.erl @@ -0,0 +1,44 @@ +%%% +%%% Copyright 2024 Valitydev +%%% +%%% Licensed under the Apache License, Version 2.0 (the "License"); +%%% you may not use this file except in compliance with the License. +%%% You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, software +%%% distributed under the License is distributed on an "AS IS" BASIS, +%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%%% See the License for the specific language governing permissions and +%%% limitations under the License. +%%% +-module(mpulse). + +%% API +-export_type([beat/0]). +-export_type([handler/0]). +-export([handle_beat/2]). + +-callback handle_beat(Options :: any(), beat()) -> ok. + +%% +%% API +%% +-type beat() :: tuple() | atom() | any(). + +-type handler() :: mg_utils:mod_opts() | undefined. + +-spec handle_beat(handler(), any()) -> ok. +handle_beat(undefined, _Beat) -> + ok; +handle_beat(Handler, Beat) -> + {Mod, Options} = mg_utils:separate_mod_opts(Handler), + try + ok = Mod:handle_beat(Options, Beat) + catch + Class:Reason:ST -> + Stacktrace = genlib_format:format_stacktrace(ST), + Msg = "Pulse handler ~p failed at beat ~p: ~p:~p ~s", + ok = logger:error(Msg, [{Mod, Options}, Beat, Class, Reason, Stacktrace]) + end. diff --git a/apps/mg_woody/include/pulse.hrl b/apps/mg_woody/include/pulse.hrl index b4df40cf..344f3971 100644 --- a/apps/mg_woody/include/pulse.hrl +++ b/apps/mg_woody/include/pulse.hrl @@ -3,7 +3,7 @@ machine_id :: mg_core_events_machine:id(), request_context :: mg_core:request_context(), deadline :: mg_core_deadline:deadline(), - exception :: mg_core_utils:exception() + exception :: mg_utils:exception() }). -record(woody_event, { diff --git a/apps/mg_woody/src/mg_woody.app.src b/apps/mg_woody/src/mg_woody.app.src index ff8e11ce..fcdede20 100644 --- a/apps/mg_woody/src/mg_woody.app.src +++ b/apps/mg_woody/src/mg_woody.app.src @@ -24,6 +24,7 @@ genlib, mg_proto, woody, + mg_utils, mg_core, opentelemetry_api ]}, diff --git a/apps/mg_woody/src/mg_woody_automaton.erl b/apps/mg_woody/src/mg_woody_automaton.erl index 681b51c4..f8b84eb0 100644 --- a/apps/mg_woody/src/mg_woody_automaton.erl +++ b/apps/mg_woody/src/mg_woody_automaton.erl @@ -237,7 +237,7 @@ get_ns_options(Namespace, Options) -> throw({logic, namespace_not_found}) end. --spec pulse(mg_core:ns(), options()) -> mg_core_pulse:handler(). +-spec pulse(mg_core:ns(), options()) -> mpulse:handler(). pulse(Namespace, Options) -> try get_machine_options(Namespace, Options) of #{machines := #{pulse := Pulse}} -> @@ -265,7 +265,7 @@ default_processing_timeout(Namespace, Options) -> simplify_core_machine(Machine = #{status := Status}) -> Machine#{status => simplify_machine_status(Status)}. --spec exception_to_string(mg_core_utils:exception()) -> binary(). +-spec exception_to_string(mg_utils:exception()) -> binary(). exception_to_string(Exception) -> iolist_to_binary(genlib_format:format_exception(Exception)). diff --git a/apps/mg_woody/src/mg_woody_event_handler.erl b/apps/mg_woody/src/mg_woody_event_handler.erl index d31b0f1b..b949211d 100644 --- a/apps/mg_woody/src/mg_woody_event_handler.erl +++ b/apps/mg_woody/src/mg_woody_event_handler.erl @@ -29,9 +29,9 @@ Event :: woody_event_handler:event(), RpcID :: woody:rpc_id(), EventMeta :: woody_event_handler:event_meta(), - PulseHandler :: mg_core_pulse:handler(). + PulseHandler :: mpulse:handler(). handle_event(Event, RpcID, EventMeta, PulseHandler) -> - mg_core_pulse:handle_beat(PulseHandler, #woody_event{ + mpulse:handle_beat(PulseHandler, #woody_event{ event = Event, rpc_id = RpcID, event_meta = EventMeta diff --git a/apps/mg_woody/src/mg_woody_event_sink.erl b/apps/mg_woody/src/mg_woody_event_sink.erl index f2b09674..06845bef 100644 --- a/apps/mg_woody/src/mg_woody_event_sink.erl +++ b/apps/mg_woody/src/mg_woody_event_sink.erl @@ -1,5 +1,5 @@ %%% -%%% Copyright 2020 Valitydev +%%% Copyright 2024 Valitydev %%% %%% Licensed under the Apache License, Version 2.0 (the "License"); %%% you may not use this file except in compliance with the License. @@ -17,36 +17,11 @@ -module(mg_woody_event_sink). -include_lib("mg_proto/include/mg_proto_event_sink_thrift.hrl"). --include_lib("mg_proto/include/mg_proto_state_processing_thrift.hrl"). -%% API --export([handler/1]). -export([serialize/3]). --export_type([options/0]). - -%% woody handler --behaviour(woody_server_thrift_handler). --export([handle_function/4]). - -%% -%% API -%% --type options() :: {[mg_core:id()], _NSOptions}. - --spec handler(options()) -> mg_woody_utils:woody_handler(). -handler(Options) -> - {"/v1/event_sink", {{mg_proto_state_processing_thrift, 'EventSink'}, {?MODULE, Options}}}. - -%% -%% woody handler -%% --spec handle_function(woody:func(), woody:args(), woody_context:ctx(), options()) -> no_return(). - -handle_function('GetHistory', {_EventSinkID, _Range}, _WoodyContext, {_AvaliableEventSinks, _Options}) -> - erlang:throw(#mg_stateproc_EventSinkNotFound{}). %% -%% events_sink events encoder +%% event_sink events encoder %% -spec serialize(mg_core:ns(), mg_core:id(), mg_core_events:event()) -> iodata(). diff --git a/apps/mg_woody/src/mg_woody_life_sink.erl b/apps/mg_woody/src/mg_woody_life_sink.erl index 7838e840..6a4bd733 100644 --- a/apps/mg_woody/src/mg_woody_life_sink.erl +++ b/apps/mg_woody/src/mg_woody_life_sink.erl @@ -41,7 +41,7 @@ -type machine_lifecycle_failed_event() :: event(machine_lifecycle_failed, #{ occurred_at := timestamp_ns(), - exception := mg_core_utils:exception() + exception := mg_utils:exception() }). -type machine_lifecycle_repaired_event() :: event(machine_lifecycle_repaired, #{ @@ -105,7 +105,7 @@ serialize_data({machine_lifecycle_repaired, _}) -> serialize_data({machine_lifecycle_removed, _}) -> {machine, {removed, #mg_lifesink_MachineLifecycleRemovedEvent{}}}. --spec exception_to_string(mg_core_utils:exception()) -> binary(). +-spec exception_to_string(mg_utils:exception()) -> binary(). exception_to_string(Exception) -> iolist_to_binary(genlib_format:format_exception(Exception)). diff --git a/apps/mg_woody/src/mg_woody_packer.erl b/apps/mg_woody/src/mg_woody_packer.erl index 66b72544..8a1c62a1 100644 --- a/apps/mg_woody/src/mg_woody_packer.erl +++ b/apps/mg_woody/src/mg_woody_packer.erl @@ -211,18 +211,6 @@ pack(machine_descriptor, {NS, Ref, Range}) -> ref = pack(ref, Ref), range = pack(history_range, Range) }; -pack(sink_event, #{ - id := ID, - body := #{source_ns := SourceNS, source_id := SourceID, event := Event} -}) -> - #mg_stateproc_SinkEvent{ - id = pack(event_id, ID), - source_id = pack(id, SourceID), - source_ns = pack(ns, SourceNS), - event = pack(event, Event) - }; -pack(sink_history, SinkHistory) -> - pack({list, sink_event}, SinkHistory); pack(Type, Value) -> erlang:error(badarg, [Type, Value]). @@ -356,7 +344,7 @@ unpack(state_change, MachineStateChange) -> } = MachineStateChange, { unpack(aux_state, AuxState), - unpack({list, event_body}, mg_core_utils:take_defined([EventBodies, []])) + unpack({list, event_body}, mg_utils:take_defined([EventBodies, []])) }; unpack(signal, {timeout, #mg_stateproc_TimeoutSignal{}}) -> timeout; @@ -413,19 +401,6 @@ unpack(history_range, #mg_stateproc_HistoryRange{ {unpack(event_id, After), unpack(integer, Limit), unpack(direction, Direction)}; unpack(machine_descriptor, #mg_stateproc_MachineDescriptor{ns = NS, ref = Ref, range = Range}) -> {unpack(ns, NS), unpack(ref, Ref), unpack(history_range, Range)}; -unpack(sink_event, SinkEvent) -> - #mg_stateproc_SinkEvent{id = ID, source_ns = SourceNS, source_id = SourceID, event = Event} = - SinkEvent, - #{ - id => unpack(id, ID), - body => #{ - source_ns => unpack(ns, SourceNS), - source_id => unpack(id, SourceID), - event => unpack(event, Event) - } - }; -unpack(sink_history, SinkHistory) -> - unpack({list, sink_event}, SinkHistory); unpack(Type, Value) -> erlang:error(badarg, [Type, Value]). diff --git a/apps/mg_woody/src/mg_woody_processor.erl b/apps/mg_woody/src/mg_woody_processor.erl index 9c1381d0..9fbf03cb 100644 --- a/apps/mg_woody/src/mg_woody_processor.erl +++ b/apps/mg_woody/src/mg_woody_processor.erl @@ -101,11 +101,9 @@ call_processor(Options, ReqCtx, Deadline, Function, Args) -> % TODO сделать нормально! {ok, TRef} = timer:kill_after(call_duration_limit(Options, Deadline) + ?KILL_TIMEOUT), try - woody_client:call( - {{mg_proto_state_processing_thrift, 'Processor'}, Function, Args}, - Options, - mg_woody_utils:set_deadline(Deadline, request_context_to_woody_context(ReqCtx)) - ) + WoodyContext = mg_woody_utils:set_deadline(Deadline, request_context_to_woody_context(ReqCtx)), + Service = {mg_proto_state_processing_thrift, 'Processor'}, + woody_client:call({Service, Function, Args}, Options, WoodyContext) of {ok, _} = Result -> Result; diff --git a/apps/mg_woody/src/mg_woody_pulse_otel.erl b/apps/mg_woody/src/mg_woody_pulse_otel.erl index de339293..25aa885f 100644 --- a/apps/mg_woody/src/mg_woody_pulse_otel.erl +++ b/apps/mg_woody/src/mg_woody_pulse_otel.erl @@ -3,7 +3,7 @@ -include_lib("mg_woody/include/pulse.hrl"). %% mg_pulse handler --behaviour(mg_core_pulse). +-behaviour(mpulse). -export([handle_beat/2]). @@ -13,8 +13,9 @@ -type beat() :: #woody_event{} | #woody_request_handle_error{} - | mg_core_pulse:beat() - | mg_core_queue_scanner:beat(). + | mg_core:beat() + | mg_skd:beat() + | mg_skd_scanner:beat(). -export_type([options/0]). diff --git a/apps/mg_woody/src/mg_woody_utils.erl b/apps/mg_woody/src/mg_woody_utils.erl index 86b7427e..574f18be 100644 --- a/apps/mg_woody/src/mg_woody_utils.erl +++ b/apps/mg_woody/src/mg_woody_utils.erl @@ -38,7 +38,7 @@ deadline => mg_core_deadline:deadline(), request_context := mg_core:request_context() }. --type pulse() :: mg_core_pulse:handler(). +-type pulse() :: mpulse:handler(). %% %% Woody @@ -58,7 +58,7 @@ handle_error(Ctx, F, Pulse) -> machine_id := ID, request_context := ReqCtx } = Ctx, - ok = mg_core_pulse:handle_beat(Pulse, #woody_request_handle_error{ + ok = mpulse:handle_beat(Pulse, #woody_request_handle_error{ namespace = NS, machine_id = ID, request_context = ReqCtx, @@ -88,7 +88,6 @@ handle_logic_error(machine_already_exist) -> #mg_stateproc_MachineAlreadyExists{ handle_logic_error(machine_failed) -> #mg_stateproc_MachineFailed{}; handle_logic_error(machine_already_working) -> #mg_stateproc_MachineAlreadyWorking{}; handle_logic_error(namespace_not_found) -> #mg_stateproc_NamespaceNotFound{}; -handle_logic_error(event_sink_not_found) -> #mg_stateproc_EventSinkNotFound{}; % TODO обработать случай создания машины c некорректным ID в рамках thrift handle_logic_error({invalid_machine_id, _}) -> #mg_stateproc_MachineNotFound{}. diff --git a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl index f08743fd..b7c3c520 100644 --- a/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_modernizer_tests_SUITE.erl @@ -176,7 +176,11 @@ mg_woody_config(Name, C) -> timers => #{} }, retries => #{}, - event_stash_size => 0 + event_stash_size => 0, + worker => #{ + registry => mg_procreg_global, + sidecar => mg_cth_worker + } }, case Name of legacy_activities -> diff --git a/apps/mg_woody/test/mg_stress_SUITE.erl b/apps/mg_woody/test/mg_stress_SUITE.erl index 29bf3aa0..7971717f 100644 --- a/apps/mg_woody/test/mg_stress_SUITE.erl +++ b/apps/mg_woody/test/mg_stress_SUITE.erl @@ -40,6 +40,7 @@ all() -> init_per_suite(C) -> Config = mg_woody_config(C), Apps = mg_cth:start_applications([ + brod, {hackney, [{use_default_pool, false}]}, mg_woody, opentelemetry_exporter, @@ -120,8 +121,18 @@ mg_woody_config(_C) -> timers => #{} }, retries => #{}, - event_sinks => [], - event_stash_size => 10 + event_sinks => [ + {mg_event_sink_kafka, #{ + name => kafka, + topic => <<"mg_core_event_sink">>, + client => mg_cth:config(kafka_client_name) + }} + ], + event_stash_size => 10, + worker => #{ + registry => mg_procreg_global, + sidecar => mg_cth_worker + } } } }. diff --git a/apps/mg_woody/test/mg_woody_tests_SUITE.erl b/apps/mg_woody/test/mg_woody_tests_SUITE.erl index f317aab2..cb0cf4e5 100644 --- a/apps/mg_woody/test/mg_woody_tests_SUITE.erl +++ b/apps/mg_woody/test/mg_woody_tests_SUITE.erl @@ -96,6 +96,7 @@ all() -> groups() -> [ % TODO проверить отмену таймера + % TODO проверить отдельно get_history {base, [sequence], [ namespace_not_found, machine_id_not_found, @@ -332,12 +333,16 @@ mg_woody_config(C) -> % сейчас же можно иногда включать и смотреть % suicide_probability => 0.1, event_sinks => [ - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => ?ES_ID, client => mg_cth:config(kafka_client_name) }} - ] + ], + worker => #{ + registry => mg_procreg_global, + sidecar => mg_cth_worker + } } } }. @@ -613,12 +618,16 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => <<"mg_core_event_sink">>, client => mg_cth:config(kafka_client_name) }} - ] + ], + worker => #{ + registry => mg_procreg_global, + sidecar => mg_cth_worker + } }, <<"2">> => #{ storage => mg_core_storage_memory, @@ -633,17 +642,21 @@ config_with_multiple_event_sinks(_C) -> }, retries => #{}, event_sinks => [ - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka_other, topic => <<"mg_core_event_sink_2">>, client => mg_cth:config(kafka_client_name) }}, - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => kafka, topic => <<"mg_core_event_sink">>, client => mg_cth:config(kafka_client_name) }} - ] + ], + worker => #{ + registry => mg_procreg_global, + sidecar => mg_cth_worker + } } } }, @@ -654,7 +667,7 @@ config_with_multiple_event_sinks(_C) -> {ok, _Pid} = genlib_adhoc_supervisor:start_link( {local, mg_core_sup_does_nothing}, #{strategy => rest_for_one}, - mg_cth_configurator:construct_child_specs(Config) + mg_cth_conf:construct_child_specs(Config) ), ok = mg_cth:stop_applications(Apps). diff --git a/config/config.yaml b/config/config.yaml index 8458dc2e..7ee197ac 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -100,22 +100,6 @@ erlang: ipv6: true disable_dns_cache: false -# TODO Retire this option, rely upon OTEL env variables -# https://empayre.youtrack.cloud/issue/TD-838 -# Opentelemetry settings -# By default opentelemetry is disabled which is equivalent to -# "opentelemetry: disabled" -opentelemetry: - # TODO Describe sampling - # Name of the service to use in recording machinegun's spans - service_name: machinegun - # For now spans processed always in batches. - # We support only "otlp" traces exporter - exporter: - # Supports only "http/protobuf" or "grpc" - protocol: http/protobuf - endpoint: http://jaeger:4318 - # API server options. woody_server: ip: "::" @@ -136,9 +120,10 @@ cluster: # optional, default value 5000 ms reconnect_timeout: 5000 -# if undefined then 'mg_core_procreg_gproc' will be used +# TODO Use aliases, not actual module names. +# if undefined then 'mg_procreg_gproc' will be used process_registry: - module: mg_core_procreg_global + module: mg_procreg_global limits: process_heap: 2M # heap limit diff --git a/docs/c4.dsl b/docs/c4.dsl index cfdc6215..ecf1fd76 100644 --- a/docs/c4.dsl +++ b/docs/c4.dsl @@ -45,15 +45,6 @@ workspace { support_services -> this "Executes RPC" "http, thrift" } - mg_es_thrift = container "Event Sink Thrift API" "Thrift service for reading all events of all machine in strictly ordered fashion" "erlang, http, thrift" { - unknown -> this "Executes RPC" "http, thrift" - } - - mg_es_machine = container "Event sink collector machine" { - description "This process starts as a machine with internal processor. Other than that it is a machine with a namespace and its very own options." - mg_es_thrift -> this "Queries" "erlang" - } - mg_es_kafka = container "Machines Business Event Sink" "Machines business events publisher via kafka topic on per-namcespace basis and thirft serialization of its data" "erlang, kafka, thrift" { this -> eventstream "Produces" "thrift" } @@ -148,7 +139,6 @@ workspace { this -> mg_processor "Calls state processor" this -> mg_events_storage "Reads and writes" "erlang" this -> mg_es_kafka "Publishes business events" "erlang" - this -> mg_es_machine "Publishes business events" "erlang" } } } diff --git a/elvis.config b/elvis.config index ae0fa37d..fb74f980 100644 --- a/elvis.config +++ b/elvis.config @@ -39,8 +39,8 @@ }}, {elvis_style, state_record_and_type, #{ ignore => [ - mg_core_gen_squad, - mg_core_gen_squad_heart, + gen_squad, + gen_squad_heart, mg_core_storage_memory, mg_core_union, mg_core_worker @@ -55,7 +55,7 @@ {elvis_style, invalid_dynamic_call, #{ ignore => [ % Working with generic registries. - mg_core_utils + mg_utils ] }}, {elvis_style, no_debug_call, #{ @@ -101,7 +101,7 @@ }}, {elvis_style, used_ignored_variable, #{ ignore => [ - mg_core_events_sink_kafka_errors_SUITE, + mg_event_sink_kafka_errors_SUITE, mg_core_workers_SUITE ] }}, diff --git a/example/.dockerignore b/example/.dockerignore new file mode 100644 index 00000000..e69de29b diff --git a/example/.formatter.exs b/example/.formatter.exs new file mode 100644 index 00000000..d2cda26e --- /dev/null +++ b/example/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/example/.gitignore b/example/.gitignore new file mode 100644 index 00000000..6bf5bcb7 --- /dev/null +++ b/example/.gitignore @@ -0,0 +1,26 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +load_processor-*.tar + +# Temporary files, for example, from tests. +/tmp/ diff --git a/example/Dockerfile b/example/Dockerfile new file mode 100644 index 00000000..a58a7166 --- /dev/null +++ b/example/Dockerfile @@ -0,0 +1,23 @@ +ARG ELIXIR_VERSION +ARG OTP_VERSION + +FROM docker.io/library/elixir:${ELIXIR_VERSION}-otp-${OTP_VERSION} +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Install woorl +RUN wget -q -O- "https://github.com/valitydev/woorl/releases/download/1.8/woorl-1.8.tar.gz" \ + | tar -xvz -C /usr/local/bin/ + +# Set env +ENV CHARSET=UTF-8 +ENV LANG=C.UTF-8 + +RUN apt-get update && \ + apt-get install -y inotify-tools=3.22.6.0-4 --no-install-recommends && \ + mix local.hex --force && \ + mix local.rebar --force && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Set runtime +CMD ["/bin/bash"] diff --git a/example/README.md b/example/README.md new file mode 100644 index 00000000..1f7a7145 --- /dev/null +++ b/example/README.md @@ -0,0 +1,37 @@ +# LoadProcessor + +**TODO: Add description** + + +## Examples + +### Per machine + +``` elixir +# Alias machinery helper for convinence +alias LoadProcessor.Machine + +# Create machine client +machine = Machine.new("eventful-counter") + +# Start machine +machine |> Machine.start("start payload") + +# Call machine (returns call result, not machine client) +machine |> Machine.call("call payload") + +# Repair machine +machine |> Machine.simple_repair() + +# Get machine +machine |> Machine.get() + +# Get machine status +machine |> Machine.get() |> Map.get(:status) +``` + +### Start batch + +``` elixir +LoadProcessor.run("simple-counter", 100) +``` diff --git a/example/compose.yaml b/example/compose.yaml new file mode 100644 index 00000000..3eccc802 --- /dev/null +++ b/example/compose.yaml @@ -0,0 +1,156 @@ +version: '3' +services: + + load-processor: + build: + dockerfile: Dockerfile + context: . + args: + OTP_VERSION: 26 + ELIXIR_VERSION: 1.16.2 + volumes: + - .:$PWD + environment: + OTEL_TRACES_EXPORTER: otlp + OTEL_TRACES_SAMPLER: parentbased_always_on + OTEL_EXPORTER_OTLP_PROTOCOL: http_protobuf + OTEL_EXPORTER_OTLP_ENDPOINT: http://jaeger:4318 + OTEL_SERVICE_NAME: load-processor + depends_on: + machinegun: + condition: service_healthy + working_dir: $PWD + command: /usr/local/bin/iex + + machinegun: + build: + dockerfile: Dockerfile + context: .. + args: + SERVICE_NAME: machinegun + OTP_VERSION: 25.3 + REBAR_VERSION: 3.18 + THRIFT_VERSION: 0.14.2.2 + volumes: + - ./machinegun/config.yaml:/opt/machinegun/etc/config.yaml + - ./machinegun/cookie:/opt/machinegun/etc/cookie + healthcheck: + # For `ERL_DIST_PORT` see `dist_port` entry in `example/machinegun/config.yaml` + test: "ERL_DIST_PORT=31337 /opt/machinegun/bin/machinegun ping" + interval: 5s + timeout: 5s + retries: 20 + environment: + OTEL_TRACES_EXPORTER: otlp + OTEL_TRACES_SAMPLER: parentbased_always_off + OTEL_EXPORTER_OTLP_PROTOCOL: http_protobuf + OTEL_EXPORTER_OTLP_ENDPOINT: http://jaeger:4318 + depends_on: + jaeger: + condition: service_healthy + riakdb: + condition: service_started + member1: + condition: service_started + member2: + condition: service_started + kafka1: + condition: service_healthy + kafka2: + condition: service_healthy + kafka3: + condition: service_healthy + # See https://docs.docker.com/compose/compose-file/deploy/ + deploy: + mode: replicated + replicas: 5 + endpoint_mode: dnsrr + resources: + limits: + cpus: '0.5' + memory: 1G + reservations: + cpus: '0.2' + memory: 512M + restart_policy: + condition: on-failure + + riakdb: &member-node + image: docker.io/basho/riak-kv:ubuntu-2.2.3 + environment: + - CLUSTER_NAME=riakkv + - COORDINATOR_NODE=riakdb + labels: + - "com.basho.riak.cluster.name=riakkv" + volumes: + - ./riak/riak_user.conf:/etc/riak/user.conf:ro + - schemas:/etc/riak/schemas + member1: + <<: *member-node + links: + - riakdb + depends_on: + - riakdb + member2: + <<: *member-node + links: + - riakdb + depends_on: + - riakdb + + zookeeper: + image: docker.io/confluentinc/cp-zookeeper:5.1.2 + healthcheck: + test: echo ruok | nc 127.0.0.1 2181 || exit -1 + interval: 5s + timeout: 240s + retries: 50 + environment: + KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" + ZOOKEEPER_CLIENT_PORT: 2181 + + kafka1: &kafka-broker + image: docker.io/confluentinc/cp-kafka:5.1.2 + depends_on: + - zookeeper + healthcheck: + test: ["CMD", "kafka-topics", "--list", "--zookeeper", "zookeeper:2181"] + interval: 5s + timeout: 10s + retries: 5 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092 + kafka2: + <<: *kafka-broker + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092 + kafka3: + <<: *kafka-broker + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092 + + jaeger: + image: jaegertracing/all-in-one:1.47 + environment: + - COLLECTOR_OTLP_ENABLED=true + healthcheck: + test: "/go/bin/all-in-one-linux status" + interval: 2s + timeout: 1s + retries: 20 + ports: + - 4317:4317 + - 4318:4318 + - 5778:5778 + - 14250:14250 + - 16686:16686 + +volumes: + schemas: + external: false diff --git a/example/config/config.exs b/example/config/config.exs new file mode 100644 index 00000000..a056d653 --- /dev/null +++ b/example/config/config.exs @@ -0,0 +1,7 @@ +import Config + +config :logger, level: :info + +config :load_processor, :automaton, + url: "http://machinegun:8022/v1/automaton", + options: nil diff --git a/example/lib/load_processor.ex b/example/lib/load_processor.ex new file mode 100644 index 00000000..9370355a --- /dev/null +++ b/example/lib/load_processor.ex @@ -0,0 +1,29 @@ +defmodule LoadProcessor do + @moduledoc false + alias LoadProcessor.Machine + + def run(ns, count) do + :timer.tc(fn -> do_run(ns, count) end) + end + + defp do_run(ns, count) do + 1..count + |> Task.async_stream(fn i -> try_start(ns, i) end, max_concurrency: count, timeout: :infinity) + |> Enum.filter(fn + {:ok, {_i, nil}} -> false + {:ok, _} -> true + end) + |> Enum.map(&elem(&1, 1)) + end + + defp try_start(ns, i) do + ns + |> Machine.new() + |> Machine.start(%{"payload" => nil}) + + {i, nil} + rescue + exception in Woody.BadResultError -> + {i, exception} + end +end diff --git a/example/lib/load_processor/application.ex b/example/lib/load_processor/application.ex new file mode 100644 index 00000000..70719bc2 --- /dev/null +++ b/example/lib/load_processor/application.ex @@ -0,0 +1,52 @@ +defmodule LoadProcessor.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + require Logger + + alias Woody.Server.Http, as: Server + alias LoadProcessor.StfuWoodyHandler, as: WoodyHandler + alias LoadProcessor.ProcessorHandler + + @impl true + def start(_type, _args) do + children = [ + server_spec([{"/", LoadProcessor.WebHandler, []}]) + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: LoadProcessor.Supervisor] + + case Supervisor.start_link(children, opts) do + {:ok, pid} -> + Logger.info("Woody server now running on #{server_endpoint()}") + {:ok, pid} + + bad_ret -> + bad_ret + end + end + + defp server_spec(additional_handlers) do + endpoint = + Server.Endpoint.any(:inet) + |> Map.put(:port, 8022) + + Server.child_spec(LoadProcessor, endpoint, [ + ProcessorHandler.new("/v1/stateproc/simple-counter", + event_handler: WoodyHandler, + less_events: true + ), + ProcessorHandler.new("/v1/stateproc/eventful-counter", event_handler: WoodyHandler) + | additional_handlers + ]) + end + + defp server_endpoint() do + Server.endpoint(LoadProcessor) + end +end diff --git a/example/lib/load_processor/machine.ex b/example/lib/load_processor/machine.ex new file mode 100644 index 00000000..db706267 --- /dev/null +++ b/example/lib/load_processor/machine.ex @@ -0,0 +1,98 @@ +defmodule LoadProcessor.Machine do + @moduledoc false + + alias Woody.Generated.MachinegunProto.StateProcessing.Automaton.Client + alias MachinegunProto.StateProcessing.{MachineDescriptor, Reference, HistoryRange, Direction} + alias LoadProcessor.Machine.{History, Utils} + + require OpenTelemetry.Tracer, as: Tracer + require Direction + + @default_range %HistoryRange{limit: 10, direction: Direction.backward()} + + @enforce_keys [:client, :ns, :id] + defstruct client: nil, + ns: nil, + id: nil, + status: nil, + history: nil, + aux_state: nil + + @automaton_url Application.compile_env!(:load_processor, [:automaton, :url]) + @automaton_opts List.wrap(Application.compile_env!(:load_processor, [:automaton, :options])) + + def new(ns, id) do + new(Woody.Context.new(), ns, id) + end + + def new(ns) do + new(ns, random_id()) + end + + def new(woody_ctx, ns, id) do + %__MODULE__{ + client: Client.new(woody_ctx, @automaton_url, @automaton_opts), + ns: ns, + id: id + } + end + + def loaded?(%__MODULE__{status: nil}), do: false + def loaded?(%__MODULE__{status: _}), do: true + + def start(%__MODULE__{client: client, ns: ns, id: id} = machine, args) do + Tracer.with_span "starting machine" do + _ = Client.start!(client, ns, id, Utils.pack(args)) + end + + get(machine) + end + + def get(%__MODULE__{client: client, ns: ns, id: id} = machine) do + Tracer.with_span "getting machine" do + machine_state = Client.get_machine!(client, make_descr(ns, id, nil)) + + %{ + machine + | history: History.from_machine_state(machine_state), + status: machine_state.status, + aux_state: Utils.marshal(:aux_state, machine_state.aux_state) + } + end + end + + def call(%__MODULE__{client: client, ns: ns, id: id}, args) do + Tracer.with_span "calling machine" do + client + |> Client.call!(make_descr(ns, id, nil), Utils.pack(args)) + |> Utils.unpack() + end + end + + def notify(%__MODULE__{client: client, ns: ns, id: id}, args) do + Tracer.with_span "sending notification to machine" do + client + |> Client.notify!(make_descr(ns, id, nil), Utils.pack(args)) + end + end + + def simple_repair(%__MODULE__{client: client, ns: ns, id: id} = machine) do + Tracer.with_span "simply repairaing machine" do + _ = Client.simple_repair(client, ns, make_ref(id)) + machine + end + end + + defp make_descr(ns, id, range) do + %MachineDescriptor{ns: ns, ref: make_ref(id), range: range || @default_range} + end + + defp make_ref(id) do + %Reference{id: id} + end + + defp random_id() do + <> = :snowflake.new() + :genlib_format.format_int_base(id, 62) + end +end diff --git a/example/lib/load_processor/machine/event.ex b/example/lib/load_processor/machine/event.ex new file mode 100644 index 00000000..ee2bde04 --- /dev/null +++ b/example/lib/load_processor/machine/event.ex @@ -0,0 +1,23 @@ +defmodule LoadProcessor.Machine.Event do + @moduledoc false + + alias MachinegunProto.StateProcessing.Event, as: MachineEvent + alias LoadProcessor.Machine.Utils + + @enforce_keys [:id, :occurred_at, :body] + defstruct id: nil, occurred_at: nil, body: nil + + def from_machine_history(history) do + Enum.map(history, &construct_event/1) + end + + defp construct_event(%MachineEvent{} = machine_event) do + {:ok, occurred_at, _rest} = DateTime.from_iso8601(machine_event.created_at) + + %__MODULE__{ + id: machine_event.id, + occurred_at: occurred_at, + body: Utils.marshal(:event, machine_event) + } + end +end diff --git a/example/lib/load_processor/machine/history.ex b/example/lib/load_processor/machine/history.ex new file mode 100644 index 00000000..e2d29cd3 --- /dev/null +++ b/example/lib/load_processor/machine/history.ex @@ -0,0 +1,13 @@ +defmodule LoadProcessor.Machine.History do + alias MachinegunProto.StateProcessing.Machine, as: MachineState + alias LoadProcessor.Machine.Event + + @enforce_keys [:events, :range] + defstruct events: nil, range: nil + + def from_machine_state(%MachineState{history: history, history_range: history_range}) do + events = Event.from_machine_history(history) + + %__MODULE__{events: events, range: history_range} + end +end diff --git a/example/lib/load_processor/machine/utils.ex b/example/lib/load_processor/machine/utils.ex new file mode 100644 index 00000000..a79b3d1e --- /dev/null +++ b/example/lib/load_processor/machine/utils.ex @@ -0,0 +1,37 @@ +defmodule LoadProcessor.Machine.Utils do + @moduledoc false + alias MachinegunProto.MsgPack + alias MachinegunProto.StateProcessing.Content + alias MachinegunProto.StateProcessing.Event + + def pack(nil) do + %MsgPack.Value{nl: %MsgPack.Nil{}} + end + + def pack(data) do + %MsgPack.Value{bin: :erlang.term_to_binary(data)} + end + + def unpack(%MsgPack.Value{nl: %MsgPack.Nil{}}) do + nil + end + + def unpack(%MsgPack.Value{bin: bin}) do + :erlang.binary_to_term(bin) + end + + @format_version 1 + + def marshal(:content, %Content{format_version: @format_version, data: data}), do: unpack(data) + def marshal(:content, _), do: nil + def marshal(:aux_state, value), do: marshal(:content, value) + def marshal(:event, %Event{format_version: @format_version, data: data}), do: unpack(data) + def marshal(type, _value), do: raise("Marshalling of type #{inspect(type)} is not supported") + + def unmarshal(:content, nil), do: nil + def unmarshal(:content, value), do: %Content{format_version: @format_version, data: pack(value)} + def unmarshal(:aux_state, value), do: unmarshal(:content, value) + + def unmarshal(type, _value), + do: raise("Unmarshalling of type #{inspect(type)} is not supported") +end diff --git a/example/lib/load_processor/processor_handler.ex b/example/lib/load_processor/processor_handler.ex new file mode 100644 index 00000000..33234e35 --- /dev/null +++ b/example/lib/load_processor/processor_handler.ex @@ -0,0 +1,198 @@ +defmodule LoadProcessor.ProcessorHandler do + @moduledoc false + alias Woody.Generated.MachinegunProto.StateProcessing.Processor + @behaviour Processor.Handler + + require OpenTelemetry.Tracer, as: Tracer + require Logger + + alias LoadProcessor.Machine.Utils + alias MachinegunProto.StateProcessing.{SignalArgs, CallArgs, RepairArgs} + alias MachinegunProto.StateProcessing.{Signal, InitSignal, TimeoutSignal, NotificationSignal} + alias MachinegunProto.StateProcessing.{SignalResult, CallResult} + alias MachinegunProto.StateProcessing.{HistoryRange, Direction} + alias MachinegunProto.StateProcessing.{Machine, MachineStateChange} + alias MachinegunProto.StateProcessing.{ComplexAction, TimerAction, SetTimerAction} + alias MachinegunProto.Base.Timer + + def new(http_path, options \\ []) do + {hdlopts, options} = + case Keyword.pop(options, :less_events, false) do + {false, opts} -> {%{less_events: false}, opts} + {true, opts} -> {%{less_events: true}, opts} + end + + Processor.Handler.new({__MODULE__, hdlopts}, http_path, options) + end + + @impl true + def process_signal(%SignalArgs{signal: signal, machine: machine}, _ctx, hdlopts) do + case signal do + %Signal{init: %InitSignal{arg: args}} -> + Tracer.with_span "initializing" do + process_init(machine, Utils.unpack(args), hdlopts) + end + + %Signal{timeout: %TimeoutSignal{}} -> + Tracer.with_span "timeouting" do + process_timeout(machine, hdlopts) + end + + %Signal{notification: %NotificationSignal{arg: args}} -> + Tracer.with_span "notifying" do + process_notification(machine, Utils.unpack(args), hdlopts) + end + + _uknown_signal -> + throw(:not_implemented) + end + end + + @impl true + def process_call(%CallArgs{arg: args, machine: machine}, _ctx, hdlopts) do + Tracer.with_span "processing call" do + Logger.debug("Calling machine #{machine.id} of #{machine.ns} with #{inspect(args)}") + + change = + %MachineStateChange{} + |> preserve_aux_state(machine) + |> put_events([{:call_processed, args}], hdlopts) + + action = + %ComplexAction{} + |> set_timer(0) + + {:ok, %CallResult{response: Utils.pack("result"), change: change, action: action}} + end + end + + @impl true + def process_repair(%RepairArgs{arg: _arg, machine: _machine}, _ctx, _hdlopts) do + throw(:not_implemented) + end + + defp process_init(%Machine{id: id, ns: ns} = _machine, args, hdlopts) do + Logger.debug("Starting '#{id}' of '#{ns}' with arguments: #{inspect(args)}") + + change = + %MachineStateChange{} + |> put_aux_state(%{"arbitrary" => "arbitrary aux state data", :counter => 0}) + |> put_events([:counter_created], hdlopts) + + action = + %ComplexAction{} + |> set_timer(get_rand_sleep_time([3, 2, 1])) + + {:ok, %SignalResult{change: change, action: action}} + end + + defp get_rand_sleep_time(seed) do + # 0s 1s 2s etc occurrences in seed + seed + |> Enum.with_index() + |> Enum.map(fn {occ, i} -> List.duplicate(i, occ) end) + |> List.flatten() + |> Enum.random() + end + + defp process_timeout(%Machine{id: id, ns: ns} = machine, hdlopts) do + Logger.debug("Timeouting machine #{id} of #{ns}") + aux_state = get_aux_state(machine) + + case aux_state do + %{notified: true} -> + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_stopped], hdlopts) + + {:ok, %SignalResult{change: change, action: %ComplexAction{}}} + + %{counter: counter} when counter < 100 -> + aux_state = Map.update!(aux_state, :counter, &(&1 + 1)) + Logger.debug("New aux state #{inspect(aux_state)}") + + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([{:counter_incremented, 1}], hdlopts) + + action = + %ComplexAction{} + |> set_timer(get_rand_sleep_time([3, 2, 1])) + + {:ok, %SignalResult{change: change, action: action}} + + _ -> + change = + %MachineStateChange{} + |> put_aux_state(aux_state) + |> put_events([:counter_stopped], hdlopts) + + {:ok, %SignalResult{change: change, action: %ComplexAction{}}} + end + end + + defp process_notification(%Machine{id: id, ns: ns} = machine, args, hdlopts) do + Logger.debug("Notifying machine #{id} of #{ns}") + + change = + %MachineStateChange{} + |> put_aux_state(Map.put(get_aux_state(machine), :notified, true)) + |> put_events([{:counter_notified, args}], hdlopts) + + action = + %ComplexAction{} + |> set_timer(get_rand_sleep_time([3, 2, 1])) + + {:ok, %SignalResult{change: change, action: action}} + end + + defp preserve_aux_state(change, %Machine{aux_state: aux_state}) do + %MachineStateChange{change | aux_state: aux_state} + end + + defp get_aux_state(%Machine{aux_state: aux_state}) do + Utils.marshal(:aux_state, aux_state) + end + + defp put_aux_state(change, data) do + # Optional 'aux_state' technically can be 'nil' but this will + # break machine, because it is not interpreted as msg_pack's 'nil' + # but actually erlang's 'undefined'. In another words, default + # 'nil' value of 'aux_state' does not leave previous value + # unchanged but always expects it to be explicitly set. + %MachineStateChange{change | aux_state: Utils.unmarshal(:aux_state, data)} + end + + defp put_events(change, _events, %{less_events: true}) do + change + end + + defp put_events(change, events, _hdlopts) do + wrapped_events = + events + |> Enum.map(&Utils.unmarshal(:content, &1)) + + %MachineStateChange{change | events: wrapped_events} + end + + defp set_timer(action, timeout, deadline \\ nil, range \\ nil) do + timer = %SetTimerAction{ + timer: %Timer{timeout: timeout, deadline: deadline}, + range: maybe_last_n_range(range, 10), + timeout: nil + } + + %ComplexAction{action | timer: %TimerAction{set_timer: timer}} + end + + defp maybe_last_n_range(nil, limit) do + require Direction + %HistoryRange{after: nil, limit: limit, direction: Direction.backward()} + end + + defp maybe_last_n_range(range, _limit) do + range + end +end diff --git a/example/lib/load_processor/stfu_woody_handler.ex b/example/lib/load_processor/stfu_woody_handler.ex new file mode 100644 index 00000000..84a0a689 --- /dev/null +++ b/example/lib/load_processor/stfu_woody_handler.ex @@ -0,0 +1,34 @@ +defmodule LoadProcessor.StfuWoodyHandler do + @moduledoc false + alias :woody_event_handler, as: WoodyEventHandler + alias Woody.EventHandler.Formatter + @behaviour WoodyEventHandler + require Logger + + @exposed_meta [ + :event, + :service, + :function, + :type, + :metadata, + :url, + :deadline, + :execution_duration_ms + ] + + @allowed_severity [:warning, :error] + + def handle_event(event, rpc_id, meta, _opts) do + case WoodyEventHandler.get_event_severity(event, meta) do + level when level in @allowed_severity -> + Logger.log( + level, + Formatter.format(rpc_id, event, meta), + WoodyEventHandler.format_meta(event, meta, @exposed_meta) + ) + + _ -> + :ok + end + end +end diff --git a/example/lib/load_processor/web_handler.ex b/example/lib/load_processor/web_handler.ex new file mode 100644 index 00000000..162f7157 --- /dev/null +++ b/example/lib/load_processor/web_handler.ex @@ -0,0 +1,24 @@ +defmodule LoadProcessor.WebHandler do + @moduledoc false + + def init(req, state) do + req = + :cowboy_req.reply( + 200, + %{"content-type" => "text/plain"}, + "New random id\n#{inspect(random_id())}\n", + req + ) + + {:ok, req, state} + end + + def terminate(_, _, _) do + :ok + end + + defp random_id() do + <> = :snowflake.new() + :genlib_format.format_int_base(id, 62) + end +end diff --git a/example/machinegun/config.yaml b/example/machinegun/config.yaml new file mode 100644 index 00000000..1fc736e6 --- /dev/null +++ b/example/machinegun/config.yaml @@ -0,0 +1,186 @@ +service_name: machinegun + +dist_node_name: + hostpart: ip + +dist_port: + mode: static + port: 31337 + +erlang: + secret_cookie_file: "/opt/machinegun/etc/cookie" + ipv6: false + disable_dns_cache: true + +woody_server: + ip: "::" + port: 8022 + max_concurrent_connections: 8000 + http_keep_alive_timeout: 3000ms + shutdown_timeout: 5s + +snowflake_machine_id: 1 + +storage: + type: riak + host: riakdb + port: 8087 + pool: + size: 100 + queue_max: 500 + connect_timeout: 500ms + request_timeout: 10s + index_query_timeout: 60s + batch_concurrency_limit: 10 + +cluster: + discovery: + type: dns + options: + domain_name: machinegun + sname: machinegun + reconnect_timeout: 5000 + +process_registry: + module: mg_procreg_global + +limits: + process_heap: 2M + memory: + type: cgroups # cgroups | total + value: 90% + scheduler_tasks: 5000 + +logging: + out_type: stdout + level: warning + formatter: + level_map: + 'emergency': 'ERROR' + 'alert': 'ERROR' + 'critical': 'ERROR' + 'error': 'ERROR' + 'warning': 'WARN' + 'notice': 'INFO' + 'info': 'INFO' + 'debug': 'DEBUG' + +namespaces: + simple-counter: + retries: &retries + storage: + type: exponential + max_retries: infinity + factor: 2 + timeout: 10ms + max_timeout: 60s + timers: + type: exponential + max_retries: 100 + factor: 2 + timeout: 2s + max_timeout: 30m + processor: + type: exponential + max_retries: + max_total_timeout: 1d + factor: 2 + timeout: 10ms + max_timeout: 60s + continuation: + type: exponential + max_retries: infinity + factor: 2 + timeout: 10ms + max_timeout: 60s + event_sinks: + kafka: + type: kafka + client: default_kafka_client + topic: simple-counter + default_processing_timeout: 30s + timer_processing_timeout: 60s + reschedule_timeout: 60s + hibernate_timeout: 5s + shutdown_timeout: 5s + unload_timeout: 1m + processor: + url: http://load-processor:8022/v1/stateproc/simple-counter + pool_size: 50 + http_keep_alive_timeout: 10s + overseer: &overseer + capacity: 1000 + min_scan_delay: 1s + scan_interval: 1m + timers: &timers + scan_interval: 1m + scan_limit: 1000 + capacity: 1000 + min_scan_delay: 10s + notification: ¬ification + capacity: 1000 + scan_interval: 1m + min_scan_delay: 1s + scan_handicap: 10s + scan_cutoff: 4W + reschedule_time: 5s + event_stash_size: 5 + modernizer: + current_format_version: 1 + handler: + url: http://load-processor:8022/v1/modernizer/simple-counter + pool_size: 50 + http_keep_alive_timeout: 10s + + eventful-counter: + retries: *retries + event_sinks: + kafka: + type: kafka + client: default_kafka_client + topic: eventful-counter + default_processing_timeout: 30s + timer_processing_timeout: 60s + reschedule_timeout: 60s + hibernate_timeout: 5s + shutdown_timeout: 5s + unload_timeout: 1m + processor: + url: http://load-processor:8022/v1/stateproc/eventful-counter + pool_size: 50 + http_keep_alive_timeout: 10s + overseer: *overseer + timers: *timers + notification: *notification + event_stash_size: 5 + modernizer: + current_format_version: 1 + handler: + url: http://load-processor:8022/v1/modernizer/eventful-counter + pool_size: 50 + http_keep_alive_timeout: 10s + +lifecycle_pulse: + topic: mg-lifecycle + client: default_kafka_client + +kafka: + default_kafka_client: + endpoints: + - host: "kafka1" + port: 9092 + - host: "kafka2" + port: 9092 + - host: "kafka3" + port: 9092 + producer: + compression: no_compression + partition_onwire_limit: 1 + ack_timeout: 10s + required_acks: all_isr + partition_buffer_limit: 256 + max_linger: 0ms + max_linger_count: 0 + max_batch_size: 1M + max_retries: 3 + retry_backoff: 500ms diff --git a/example/machinegun/cookie b/example/machinegun/cookie new file mode 100644 index 00000000..dfa851bc --- /dev/null +++ b/example/machinegun/cookie @@ -0,0 +1 @@ +load-test-machinegun \ No newline at end of file diff --git a/example/mix.exs b/example/mix.exs new file mode 100644 index 00000000..ebb6fcc0 --- /dev/null +++ b/example/mix.exs @@ -0,0 +1,60 @@ +defmodule LoadProcessor.MixProject do + use Mix.Project + + def project do + [ + app: :load_processor, + version: "0.1.0", + elixir: "~> 1.16", + start_permanent: Mix.env() == :prod, + deps: deps(), + releases: releases() + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + extra_applications: [:logger], + mod: {LoadProcessor.Application, []} + ] + end + + defp releases do + [ + api_key_mgmt: [ + version: "0.1.0", + applications: [ + api_key_mgmt: :permanent, + logstash_logger_formatter: :load, + opentelemetry: :temporary + ], + include_executables_for: [:unix], + include_erts: false + ] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + {:thrift, + git: "https://github.com/valitydev/elixir-thrift.git", + branch: "ft/subst-reserved-vars", + override: true}, + {:mg_proto, + git: "https://github.com/valitydev/machinegun-proto", branch: "ft/elixir-support"}, + {:snowflake, git: "https://github.com/valitydev/snowflake.git", branch: "master"}, + {:genlib, git: "https://github.com/valitydev/genlib.git", branch: "master"}, + {:logstash_logger_formatter, + git: "https://github.com/valitydev/logstash_logger_formatter.git", + branch: "master", + only: [:prod], + runtime: false}, + {:gproc, "~> 0.9.1", override: true}, + {:opentelemetry, "~> 1.3"}, + {:opentelemetry_api, "~> 1.2"}, + {:opentelemetry_exporter, "~> 1.6"} + ] + end +end diff --git a/example/mix.lock b/example/mix.lock new file mode 100644 index 00000000..21695b87 --- /dev/null +++ b/example/mix.lock @@ -0,0 +1,36 @@ +%{ + "acceptor_pool": {:hex, :acceptor_pool, "1.0.0", "43c20d2acae35f0c2bcd64f9d2bde267e459f0f3fd23dab26485bf518c281b21", [:rebar3], [], "hexpm", "0cbcd83fdc8b9ad2eee2067ef8b91a14858a5883cb7cd800e6fcd5803e158788"}, + "cache": {:hex, :cache, "2.3.3", "b23a5fe7095445a88412a6e614c933377e0137b44ffed77c9b3fef1a731a20b2", [:rebar3], [], "hexpm", "44516ce6fa03594d3a2af025dd3a87bfe711000eb730219e1ddefc816e0aa2f4"}, + "certifi": {:hex, :certifi, "2.8.0", "d4fb0a6bb20b7c9c3643e22507e42f356ac090a1dcea9ab99e27e0376d695eba", [:rebar3], [], "hexpm", "6ac7efc1c6f8600b08d625292d4bbf584e14847ce1b6b5c44d983d273e1097ea"}, + "chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"}, + "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, + "cowboy": {:hex, :cowboy, "2.9.0", "865dd8b6607e14cf03282e10e934023a1bd8be6f6bacf921a7e2a96d800cd452", [:make, :rebar3], [{:cowlib, "2.11.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "2c729f934b4e1aa149aff882f57c6372c15399a20d54f65c8d67bef583021bde"}, + "cowlib": {:hex, :cowlib, "2.11.0", "0b9ff9c346629256c42ebe1eeb769a83c6cb771a6ee5960bd110ab0b9b872063", [:make, :rebar3], [], "hexpm", "2b3e9da0b21c4565751a6d4901c20d1b4cc25cbb7fd50d91d2ab6dd287bc86a9"}, + "ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"}, + "genlib": {:git, "https://github.com/valitydev/genlib.git", "f6074551d6586998e91a97ea20acb47241254ff3", [branch: "master"]}, + "gproc": {:hex, :gproc, "0.9.1", "f1df0364423539cf0b80e8201c8b1839e229e5f9b3ccb944c5834626998f5b8c", [:rebar3], [], "hexpm", "905088e32e72127ed9466f0bac0d8e65704ca5e73ee5a62cb073c3117916d507"}, + "grpcbox": {:hex, :grpcbox, "0.17.1", "6e040ab3ef16fe699ffb513b0ef8e2e896da7b18931a1ef817143037c454bcce", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.15.1", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.9.1", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "4a3b5d7111daabc569dc9cbd9b202a3237d81c80bf97212fbc676832cb0ceb17"}, + "hackney": {:hex, :hackney, "1.18.0", "c4443d960bb9fba6d01161d01cd81173089686717d9490e5d3606644c48d121f", [:rebar3], [{:certifi, "~> 2.8.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "9afcda620704d720db8c6a3123e9848d09c87586dc1c10479c42627b905b5c5e"}, + "hpack": {:hex, :hpack_erl, "0.3.0", "2461899cc4ab6a0ef8e970c1661c5fc6a52d3c25580bc6dd204f84ce94669926", [:rebar3], [], "hexpm", "d6137d7079169d8c485c6962dfe261af5b9ef60fbc557344511c1e65e3d95fb0"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "logstash_logger_formatter": {:git, "https://github.com/valitydev/logstash_logger_formatter.git", "e18d378ce155b4e3c06db9f5b696eb5ad1cf4f58", [branch: "master"]}, + "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, + "mg_proto": {:git, "https://github.com/valitydev/machinegun-proto", "5c2bcfdde8d91d7bd31011af8d6be1c558e9f2d3", [branch: "ft/elixir-support"]}, + "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "opentelemetry": {:hex, :opentelemetry, "1.3.0", "988ac3c26acac9720a1d4fb8d9dc52e95b45ecfec2d5b5583276a09e8936bc5e", [:rebar3], [{:opentelemetry_api, "~> 1.2.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "8e09edc26aad11161509d7ecad854a3285d88580f93b63b0b1cf0bac332bfcc0"}, + "opentelemetry_api": {:hex, :opentelemetry_api, "1.2.1", "7b69ed4f40025c005de0b74fce8c0549625d59cb4df12d15c32fe6dc5076ff42", [:mix, :rebar3], [{:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "6d7a27b7cad2ad69a09cabf6670514cafcec717c8441beb5c96322bac3d05350"}, + "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.6.0", "f4fbf69aa9f1541b253813221b82b48a9863bc1570d8ecc517bc510c0d1d3d8c", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.3", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.2", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "1802d1dca297e46f21e5832ecf843c451121e875f73f04db87355a6cb2ba1710"}, + "opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "0.2.0", "b67fe459c2938fcab341cb0951c44860c62347c005ace1b50f8402576f241435", [:mix, :rebar3], [], "hexpm", "d61fa1f5639ee8668d74b527e6806e0503efc55a42db7b5f39939d84c07d6895"}, + "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, + "prometheus": {:hex, :prometheus, "4.8.1", "fa76b152555273739c14b06f09f485cf6d5d301fe4e9d31b7ff803d26025d7a0", [:mix, :rebar3], [{:quantile_estimator, "~> 0.2.1", [hex: :quantile_estimator, repo: "hexpm", optional: false]}], "hexpm", "6edfbe928d271c7f657a6f2c46258738086584bd6cae4a000b8b9a6009ba23a5"}, + "quantile_estimator": {:hex, :quantile_estimator, "0.2.1", "ef50a361f11b5f26b5f16d0696e46a9e4661756492c981f7b2229ef42ff1cd15", [:rebar3], [], "hexpm", "282a8a323ca2a845c9e6f787d166348f776c1d4a41ede63046d72d422e3da946"}, + "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, + "snowflake": {:git, "https://github.com/valitydev/snowflake.git", "de159486ef40cec67074afe71882bdc7f7deab72", [branch: "master"]}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, + "thrift": {:git, "https://github.com/valitydev/elixir-thrift.git", "f561f7746c3f5634021258c86ea02b94579ef6eb", [branch: "ft/subst-reserved-vars"]}, + "tls_certificate_check": {:hex, :tls_certificate_check, "1.22.1", "0f450cc1568a67a65ce5e15df53c53f9a098c3da081c5f126199a72505858dc1", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3092be0babdc0e14c2e900542351e066c0fa5a9cf4b3597559ad1e67f07938c0"}, + "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, + "woody": {:git, "https://github.com/valitydev/woody_erlang.git", "c4f6b9c62c77699d7489c6913f1a157cd9c7ca1e", [branch: "ft/bump-compat-woody-ex"]}, + "woody_ex": {:git, "https://github.com/valitydev/woody_ex.git", "e8d89c1dc8be889a0371227730b45b84a94bd67d", [branch: "ft/subst-reserved-vars"]}, +} diff --git a/example/riak/riak_user.conf b/example/riak/riak_user.conf new file mode 100644 index 00000000..88f6dcbf --- /dev/null +++ b/example/riak/riak_user.conf @@ -0,0 +1,8 @@ +## Specifies the storage engine used for Riak's key-value data +## and secondary indexes (if supported). +## +## Default: bitcask +## +## Acceptable values: +## - one of: bitcask, leveldb, memory, multi, prefix_multi +storage_backend = leveldb diff --git a/example/test/load_processor_test.exs b/example/test/load_processor_test.exs new file mode 100644 index 00000000..6f5cdf21 --- /dev/null +++ b/example/test/load_processor_test.exs @@ -0,0 +1,8 @@ +defmodule LoadProcessorTest do + use ExUnit.Case + doctest LoadProcessor + + test "truthness" do + assert true + end +end diff --git a/example/test/test_helper.exs b/example/test/test_helper.exs new file mode 100644 index 00000000..869559e7 --- /dev/null +++ b/example/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start() diff --git a/rebar.lock b/rebar.lock index 90b966f9..c6f57f77 100644 --- a/rebar.lock +++ b/rebar.lock @@ -35,9 +35,9 @@ {<<"metrics">>,{pkg,<<"metrics">>,<<"1.0.1">>},2}, {<<"mg_proto">>, {git,"https://github.com/valitydev/machinegun-proto", - {ref,"96f7f11b184c29d8b7e83cd7646f3f2c13662bda"}}, + {ref,"3decc8f8b13c9cd1701deab47781aacddd7dbc92"}}, 0}, - {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.2.0">>},2}, + {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.3.0">>},2}, {<<"msgpack">>, {git,"https://github.com/msgpack/msgpack-erlang", {ref,"9d56647ed77498c7655da39891c4985142697083"}}, @@ -112,7 +112,7 @@ {<<"jsx">>, <<"D12516BAA0BB23A59BB35DCCAF02A1BD08243FCBB9EFE24F2D9D056CCFF71268">>}, {<<"kafka_protocol">>, <<"FC696880C73483C8B032C4BB60F2873046035C7824E1EDCB924CFCE643CF23DD">>}, {<<"metrics">>, <<"25F094DEA2CDA98213CECC3AEFF09E940299D950904393B2A29D191C346A8486">>}, - {<<"mimerl">>, <<"67E2D3F571088D5CFD3E550C383094B47159F3EEE8FFA08E64106CDF5E981BE3">>}, + {<<"mimerl">>, <<"D0CD9FC04B9061F82490F6581E0128379830E78535E017F7780F37FEA7545726">>}, {<<"opentelemetry">>, <<"988AC3C26ACAC9720A1D4FB8D9DC52E95B45ECFEC2D5B5583276A09E8936BC5E">>}, {<<"opentelemetry_api">>, <<"7B69ED4F40025C005DE0B74FCE8C0549625D59CB4DF12D15C32FE6DC5076FF42">>}, {<<"opentelemetry_exporter">>, <<"1D8809C0D4F4ACF986405F7700ED11992BCBDB6A4915DD11921E80777FFA7167">>}, @@ -148,7 +148,7 @@ {<<"jsx">>, <<"0C5CC8FDC11B53CC25CF65AC6705AD39E54ECC56D1C22E4ADB8F5A53FB9427F3">>}, {<<"kafka_protocol">>, <<"687BFD9989998EC8FBBC3ED50D1239A6C07A7DC15B52914AD477413B89ECB621">>}, {<<"metrics">>, <<"69B09ADDDC4F74A40716AE54D140F93BEB0FB8978D8636EADED0C31B6F099F16">>}, - {<<"mimerl">>, <<"F278585650AA581986264638EBF698F8BB19DF297F66AD91B18910DFC6E19323">>}, + {<<"mimerl">>, <<"A1E15A50D1887217DE95F0B9B0793E32853F7C258A5CD227650889B38839FE9D">>}, {<<"opentelemetry">>, <<"8E09EDC26AAD11161509D7ECAD854A3285D88580F93B63B0B1CF0BAC332BFCC0">>}, {<<"opentelemetry_api">>, <<"6D7A27B7CAD2AD69A09CABF6670514CAFCEC717C8441BEB5C96322BAC3D05350">>}, {<<"opentelemetry_exporter">>, <<"2B40007F509D38361744882FD060A8841AF772AB83BB542AA5350908B303AD65">>}, diff --git a/rel_scripts/configurator.escript b/rel_scripts/configurator.escript index e79a67e8..7caac23b 100755 --- a/rel_scripts/configurator.escript +++ b/rel_scripts/configurator.escript @@ -269,8 +269,8 @@ health_check(YamlConfig) -> health_check_fun(_YamlConfig) -> %% TODO Review necessity of that configuration handle - %% case ?C:conf([process_registry, module], YamlConfig, <<"mg_core_procreg_global">>) of - %% <<"mg_core_procreg_global">> -> global + %% case ?C:conf([process_registry, module], YamlConfig, <<"mg_procreg_global">>) of + %% <<"mg_procreg_global">> -> global %% end. global. @@ -322,7 +322,7 @@ storage(NS, YamlConfig) -> mg_core_storage_memory; <<"riak">> -> PoolSize = ?C:conf([storage, pool, size], YamlConfig, 100), - {mg_core_storage_riak, #{ + {mg_riak_storage, #{ host => ?C:conf([storage, host], YamlConfig), port => ?C:conf([storage, port], YamlConfig), bucket => NS, @@ -504,7 +504,7 @@ modernizer(Name, ModernizerYamlConfig) -> } }. --spec scheduler(mg_core_quota:share(), ?C:yaml_config()) -> mg_core_machine:scheduler_opt(). +-spec scheduler(mg_skd_quota:share(), ?C:yaml_config()) -> mg_core_machine:scheduler_opt(). scheduler(Share, Config) -> #{ max_scan_limit => ?C:conf([scan_limit], Config, 5000), @@ -543,18 +543,21 @@ event_sink({Name, ESYamlConfig}) -> event_sink(?C:atom(?C:conf([type], ESYamlConfig)), Name, ESYamlConfig). event_sink(kafka, Name, ESYamlConfig) -> - {mg_core_events_sink_kafka, #{ + {mg_event_sink_kafka, #{ name => ?C:atom(Name), client => ?C:atom(?C:conf([client], ESYamlConfig)), topic => ?C:conf([topic], ESYamlConfig) }}. procreg(YamlConfig) -> - % Use process_registry if it's set up or gproc otherwise + %% Use process_registry if it's set up or gproc otherwise + %% TODO Add support for aliases for procreg modules. It's + %% improper to expose internal module name in yaml + %% configuration file. conf_with( [process_registry], YamlConfig, - mg_core_procreg_gproc, + mg_procreg_gproc, fun(ProcRegYamlConfig) -> ?C:atom(?C:conf([module], ProcRegYamlConfig)) end ).