diff --git a/pkg/capabilities/base_trigger.go b/pkg/capabilities/base_trigger.go new file mode 100644 index 0000000000..6c31681efa --- /dev/null +++ b/pkg/capabilities/base_trigger.go @@ -0,0 +1,252 @@ +package capabilities + +import ( + "context" + "sync" + "time" + + "google.golang.org/protobuf/types/known/anypb" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" +) + +type PendingEvent struct { + TriggerId string + WorkflowId string + EventId string + AnyTypeURL string // Payload type + Payload []byte + FirstAt time.Time + LastSentAt time.Time + Attempts int +} + +type EventStore interface { + Insert(ctx context.Context, rec PendingEvent) error + Delete(ctx context.Context, triggerId, workflowId, eventId string) error + List(ctx context.Context) ([]PendingEvent, error) +} + +type OutboundSend func(ctx context.Context, te TriggerEvent, workflowId string) error +type LostHook func(ctx context.Context, rec PendingEvent) + +// key builds the composite lookup key used in b.pending +func key(triggerId, workflowId, eventId string) string { + return triggerId + "|" + workflowId + "|" + eventId +} + +type BaseTriggerCapability struct { + /* + Keeps track of workflow registrations (similar to LLO streams trigger). + Handles retransmits based on T_retransmit and T_max. + Persists pending events in the DB to be resilient to node restarts. + */ + // TODO: We will want these to be configurable per chain + tRetransmit time.Duration // time window for an event being ACKd before we retransmit + tMax time.Duration // timeout before events are considered lost if not ACKd + + store EventStore + send OutboundSend + lost LostHook + lggr logger.Logger + + mu sync.Mutex + pending map[string]*PendingEvent // key(triggerID|workflowID|eventID) + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewBaseTriggerCapability( + store EventStore, + send OutboundSend, + lost LostHook, + lggr logger.Logger, + tRetransmit, tMax time.Duration, +) *BaseTriggerCapability { + ctx, cancel := context.WithCancel(context.Background()) + return &BaseTriggerCapability{ + store: store, + send: send, + lost: lost, + lggr: lggr, + tRetransmit: tRetransmit, + tMax: tMax, + pending: make(map[string]*PendingEvent), + ctx: ctx, + cancel: cancel, + } +} + +func (b *BaseTriggerCapability) Start(ctx context.Context) error { + b.ctx, b.cancel = context.WithCancel(ctx) + + recs, err := b.store.List(ctx) + if err != nil { + return err + } + + // Initialize in-memory persistence + b.pending = make(map[string]*PendingEvent) + for i := range recs { + r := recs[i] + b.pending[key(r.TriggerId, r.WorkflowId, r.EventId)] = &r + } + + b.wg.Add(1) + go func() { + defer b.wg.Done() + b.retransmitLoop() + }() + + for _, r := range recs { + _ = b.trySend(ctx, r.TriggerId, r.WorkflowId, r.EventId) + } + return nil +} + +func (b *BaseTriggerCapability) Stop() { + b.cancel() + b.wg.Wait() +} + +func (b *BaseTriggerCapability) DeliverEvent( + ctx context.Context, + te TriggerEvent, + workflowIds []string, +) error { + /* + Base Trigger Capability can interact with the Don2Don layer (in the remote capability setting) + as well as directly with a consumer (in the local setting). + */ + now := time.Now() + + for _, workflowId := range workflowIds { + rec := PendingEvent{ + TriggerId: te.TriggerType, + WorkflowId: workflowId, + EventId: te.ID, + AnyTypeURL: te.Payload.GetTypeUrl(), + Payload: te.Payload.GetValue(), + FirstAt: now, + } + + if err := b.store.Insert(ctx, rec); err != nil { + return err + } + + b.mu.Lock() + b.pending[key(te.TriggerType, workflowId, te.ID)] = &rec + b.mu.Unlock() + + _ = b.trySend(ctx, te.TriggerType, workflowId, te.ID) + } + return nil // only when the event is successfully persisted and ready to be reliably delivered +} + +func (b *BaseTriggerCapability) AckEvent( + ctx context.Context, + triggerId, workflowId, eventId string, +) error { + k := key(triggerId, workflowId, eventId) // NOTE: WorkflowID we want to start ;P + + b.mu.Lock() + delete(b.pending, k) + b.mu.Unlock() + + return b.store.Delete(ctx, triggerId, workflowId, eventId) +} + +func (b *BaseTriggerCapability) retransmitLoop() { + ticker := time.NewTicker(b.tRetransmit / 2) + defer ticker.Stop() + + for { + select { + case <-b.ctx.Done(): + return + case <-ticker.C: + b.scanPending() + } + } +} + +func (b *BaseTriggerCapability) scanPending() { + now := time.Now() + + b.mu.Lock() + toResend := make([]PendingEvent, 0, len(b.pending)) + toLost := make([]PendingEvent, 0) + for k, rec := range b.pending { + // LOST: exceeded max time without ACK + if now.Sub(rec.FirstAt) >= b.tMax { + toLost = append(toLost, *rec) + delete(b.pending, k) + continue + } + + // RESEND: hasn't been sent recently enough + if rec.LastSentAt.IsZero() || now.Sub(rec.LastSentAt) >= b.tRetransmit { + toResend = append(toResend, PendingEvent{ + TriggerId: rec.TriggerId, + WorkflowId: rec.WorkflowId, + EventId: rec.EventId, + }) + } + } + b.mu.Unlock() + + for _, rec := range toLost { + err := b.store.Delete(b.ctx, rec.TriggerId, rec.WorkflowId, rec.EventId) + if err != nil { + b.lggr.Errorw("failed to delete event from store") + } + b.lost(b.ctx, rec) + } + + for _, k := range toResend { + _ = b.trySend(b.ctx, k.TriggerId, k.WorkflowId, k.EventId) + } +} + +// trySend attempts a delivery for the given (triggerId, workflowId, eventId). +// It updates Attempts and LastSentAt on every attempt. Success is determined +// by a later AckEvent; this method does NOT remove the record from memory/DB. +func (b *BaseTriggerCapability) trySend(ctx context.Context, triggerId, workflowId, eventId string) error { + k := key(triggerId, workflowId, eventId) + + b.mu.Lock() + rec, ok := b.pending[k] + if !ok || rec == nil { + b.mu.Unlock() + return nil + } + rec.Attempts++ + rec.LastSentAt = time.Now() + + anyPayload := &anypb.Any{ + TypeUrl: rec.AnyTypeURL, + Value: append([]byte(nil), rec.Payload...), + } + + te := TriggerEvent{ + TriggerType: triggerId, + ID: eventId, + Payload: anyPayload, + } + b.mu.Unlock() + + if err := b.send(ctx, te, workflowId); err != nil { + if b.lggr != nil { + b.lggr.Errorf("trySend failed: trigger=%s workflow=%s event=%s attempt=%d err=%v", + triggerId, workflowId, eventId, rec.Attempts, err) + } + return err + } + if b.lggr != nil { + b.lggr.Debugf("trySend dispatched: trigger=%s workflow=%s event=%s attempt=%d", + triggerId, workflowId, eventId, rec.Attempts) + } + return nil +} diff --git a/pkg/capabilities/base_trigger_test.go b/pkg/capabilities/base_trigger_test.go new file mode 100644 index 0000000000..3183bd4066 --- /dev/null +++ b/pkg/capabilities/base_trigger_test.go @@ -0,0 +1,253 @@ +package capabilities + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/anypb" +) + +type memStore struct { + mu sync.Mutex + recs map[string]PendingEvent +} + +func newMemStore() *memStore { + return &memStore{recs: make(map[string]PendingEvent)} +} + +func (m *memStore) Insert(ctx context.Context, rec PendingEvent) error { + m.mu.Lock() + defer m.mu.Unlock() + m.recs[key(rec.TriggerId, rec.WorkflowId, rec.EventId)] = rec + return nil +} + +func (m *memStore) Delete(ctx context.Context, triggerId, workflowId, eventId string) error { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.recs, key(triggerId, workflowId, eventId)) + return nil +} + +func (m *memStore) List(ctx context.Context) ([]PendingEvent, error) { + m.mu.Lock() + defer m.mu.Unlock() + out := make([]PendingEvent, 0, len(m.recs)) + for _, r := range m.recs { + out = append(out, r) + } + return out, nil +} + +type sendProbe struct { + mu sync.Mutex + calls []struct { + TE TriggerEvent + WorkflowID string + } + // if set, return error for the first N sends + failFirstN int32 +} + +func (p *sendProbe) fn(ctx context.Context, te TriggerEvent, workflowId string) error { + // Optionally fail some sends + if atomic.LoadInt32(&p.failFirstN) > 0 { + atomic.AddInt32(&p.failFirstN, -1) + return assertErr // sentinel below + } + p.mu.Lock() + p.calls = append(p.calls, struct { + TE TriggerEvent + WorkflowID string + }{te, workflowId}) + p.mu.Unlock() + return nil +} + +func (p *sendProbe) count() int { + p.mu.Lock() + defer p.mu.Unlock() + return len(p.calls) +} + +var assertErr = &temporaryError{"boom"} + +type temporaryError struct{ s string } + +func (e *temporaryError) Error() string { return e.s } + +// lost hook probe +type lostProbe struct { + mu sync.Mutex + calls []PendingEvent +} + +func (p *lostProbe) fn(ctx context.Context, rec PendingEvent) { + p.mu.Lock() + p.calls = append(p.calls, rec) + p.mu.Unlock() +} + +func (p *lostProbe) count() int { + p.mu.Lock() + defer p.mu.Unlock() + return len(p.calls) +} + +func newBase(t *testing.T, store EventStore, send OutboundSend, lost LostHook) *BaseTriggerCapability { + t.Helper() + b := &BaseTriggerCapability{ + tRetransmit: 30 * time.Millisecond, + tMax: 120 * time.Millisecond, + store: store, + send: send, + lost: lost, + // lggr: your test logger if desired + } + return b +} + +func ctxWithCancel(t *testing.T) (context.Context, context.CancelFunc) { + t.Helper() + ctx, cancel := context.WithCancel(t.Context()) + return ctx, cancel +} + +func TestStart_LoadsAndSendsPersisted(t *testing.T) { + store := newMemStore() + probe := &sendProbe{} + lostp := &lostProbe{} + + // Preload store with one record + rec := PendingEvent{ + TriggerId: "trigA", + WorkflowId: "wf1", + EventId: "e1", + AnyTypeURL: "type.googleapis.com/some.Msg", + Payload: []byte("payload"), + FirstAt: time.Now().Add(-1 * time.Minute), + } + require.NoError(t, store.Insert(context.Background(), rec)) + + b := newBase(t, store, probe.fn, lostp.fn) + ctx, cancel := ctxWithCancel(t) + defer cancel() + + require.NoError(t, b.Start(ctx)) + t.Cleanup(func() { b.cancel(); b.wg.Wait() }) + + // Initial send triggered on Start + require.Eventually(t, func() bool { return probe.count() >= 1 }, 200*time.Millisecond, 5*time.Millisecond) +} + +func TestDeliverEvent_PersistsAndSends(t *testing.T) { + store := newMemStore() + probe := &sendProbe{} + lostp := &lostProbe{} + b := newBase(t, store, probe.fn, lostp.fn) + ctx, cancel := ctxWithCancel(t) + defer cancel() + + require.NoError(t, b.Start(ctx)) + t.Cleanup(func() { b.cancel(); b.wg.Wait() }) + + te := TriggerEvent{ + TriggerType: "trigB", + ID: "e2", + Payload: &anypb.Any{TypeUrl: "type.googleapis.com/thing", Value: []byte("x")}, + } + require.NoError(t, b.deliverEvent(ctx, te, []string{"wf1", "wf2"})) + + // Persisted twice (two workflows) + recs, _ := store.List(ctx) + require.Len(t, recs, 2) + + // Sent twice + require.Eventually(t, func() bool { return probe.count() >= 2 }, 200*time.Millisecond, 5*time.Millisecond) +} + +func TestAckEvent_StopsRetransmit(t *testing.T) { + store := newMemStore() + probe := &sendProbe{} + lostp := &lostProbe{} + b := newBase(t, store, probe.fn, lostp.fn) + ctx, cancel := ctxWithCancel(t) + defer cancel() + + require.NoError(t, b.Start(ctx)) + t.Cleanup(func() { b.cancel(); b.wg.Wait() }) + + te := TriggerEvent{ + TriggerType: "trigC", + ID: "e3", + Payload: &anypb.Any{TypeUrl: "type.googleapis.com/thing", Value: []byte("x")}, + } + require.NoError(t, b.deliverEvent(ctx, te, []string{"wf1"})) + require.Eventually(t, func() bool { return probe.count() >= 1 }, 200*time.Millisecond, 5*time.Millisecond) + + // Ack and ensure no more sends occur after a couple of retransmit periods + require.NoError(t, b.AckEvent(ctx, "trigC", "wf1", "e3")) + sentBefore := probe.count() + time.Sleep(3 * b.tRetransmit) + require.Equal(t, sentBefore, probe.count(), "no further retransmits after ACK") +} + +func TestRetryThenLost_AfterTmax(t *testing.T) { + store := newMemStore() + probe := &sendProbe{failFirstN: 1000} // always fail + lostp := &lostProbe{} + b := newBase(t, store, probe.fn, lostp.fn) + + // tighten timers for the test + b.tRetransmit = 20 * time.Millisecond + b.tMax = 80 * time.Millisecond + + ctx, cancel := ctxWithCancel(t) + defer cancel() + + require.NoError(t, b.Start(ctx)) + t.Cleanup(func() { b.cancel(); b.wg.Wait() }) + + te := TriggerEvent{ + TriggerType: "trigD", + ID: "e4", + Payload: &anypb.Any{TypeUrl: "type.googleapis.com/thing", Value: []byte("x")}, + } + require.NoError(t, b.deliverEvent(ctx, te, []string{"wf1"})) + + // Should attempt several sends, then mark lost and delete from store + require.Eventually(t, func() bool { return lostp.count() >= 1 }, 500*time.Millisecond, 5*time.Millisecond) + + // Ensure the record is gone from the store after lost + recs, _ := store.List(ctx) + require.Len(t, recs, 0) +} + +func TestTrySendErrorIsIgnoredByCallSites(t *testing.T) { + store := newMemStore() + probe := &sendProbe{failFirstN: 1} // first send fails + lostp := &lostProbe{} + b := newBase(t, store, probe.fn, lostp.fn) + ctx, cancel := ctxWithCancel(t) + defer cancel() + + require.NoError(t, b.Start(ctx)) + t.Cleanup(func() { b.cancel(); b.wg.Wait() }) + + te := TriggerEvent{ + TriggerType: "trigE", + ID: "e5", + Payload: &anypb.Any{TypeUrl: "type.googleapis.com/thing", Value: []byte("x")}, + } + // deliverEvent should not return an error even if the first send fails; + // retransmitLoop will retry later. + require.NoError(t, b.deliverEvent(ctx, te, []string{"wf1"})) + + // Eventually should succeed on a subsequent attempt (after the first forced failure) + require.Eventually(t, func() bool { return probe.count() >= 1 }, 300*time.Millisecond, 5*time.Millisecond) +} diff --git a/pkg/capabilities/capabilities.go b/pkg/capabilities/capabilities.go index ce5074678d..3ec8afdced 100644 --- a/pkg/capabilities/capabilities.go +++ b/pkg/capabilities/capabilities.go @@ -338,6 +338,7 @@ type OCRAttributedOnchainSignature struct { type TriggerExecutable interface { RegisterTrigger(ctx context.Context, request TriggerRegistrationRequest) (<-chan TriggerResponse, error) UnregisterTrigger(ctx context.Context, request TriggerRegistrationRequest) error + AckEvent(ctx context.Context, eventId string) error } // TriggerCapability interface needs to be implemented by all trigger capabilities. @@ -551,6 +552,7 @@ func MustNewRemoteCapabilityInfo( const ( DefaultRegistrationRefresh = 30 * time.Second DefaultRegistrationExpiry = 2 * time.Minute + DefaultEventTimeout = 2 * time.Minute // TODO: determine best value DefaultMessageExpiry = 2 * time.Minute DefaultBatchSize = 100 DefaultBatchCollectionPeriod = 100 * time.Millisecond @@ -561,6 +563,7 @@ const ( type RemoteTriggerConfig struct { RegistrationRefresh time.Duration RegistrationExpiry time.Duration + EventTimeout time.Duration MinResponsesToAggregate uint32 MessageExpiry time.Duration MaxBatchSize uint32 @@ -594,6 +597,9 @@ func (c *RemoteTriggerConfig) ApplyDefaults() { if c.RegistrationExpiry == 0 { c.RegistrationExpiry = DefaultRegistrationExpiry } + if c.EventTimeout == 0 { + c.EventTimeout = DefaultEventTimeout + } if c.MessageExpiry == 0 { c.MessageExpiry = DefaultMessageExpiry } diff --git a/pkg/capabilities/event_store.go b/pkg/capabilities/event_store.go new file mode 100644 index 0000000000..91d11325bf --- /dev/null +++ b/pkg/capabilities/event_store.go @@ -0,0 +1,108 @@ +package capabilities + +import ( + "context" + "database/sql" + "fmt" + "time" +) + +type pgEventStore struct { + db *sql.DB + tableName string +} + +// NewPostgresEventStore creates the table (if needed) and returns an EventStore backed by Postgres. +func NewPostgresEventStore(ctx context.Context, db *sql.DB, tableName string) (EventStore, error) { + s := &pgEventStore{db: db, tableName: tableName} + if err := s.ensureSchema(ctx); err != nil { + return nil, err + } + return s, nil +} + +func (s *pgEventStore) ensureSchema(ctx context.Context) error { + ddl := fmt.Sprintf(` +CREATE TABLE IF NOT EXISTS %s ( + trigger_id TEXT NOT NULL, + workflow_id TEXT NOT NULL, + event_id TEXT NOT NULL, + any_type_url TEXT NOT NULL, + payload BYTEA NOT NULL, + first_at TIMESTAMPTZ NOT NULL, + last_sent_at TIMESTAMPTZ, + attempts INT NOT NULL DEFAULT 0, + PRIMARY KEY (trigger_id, workflow_id, event_id) +); +CREATE INDEX IF NOT EXISTS %s_firstat_idx ON %s (first_at); +`, s.tableName, s.tableName, s.tableName) + + // Exec can run multiple statements on Postgres. + _, err := s.db.ExecContext(ctx, ddl) + return err +} + +func (s *pgEventStore) Insert(ctx context.Context, rec PendingEvent) error { + q := fmt.Sprintf(` +INSERT INTO %s (trigger_id, workflow_id, event_id, any_type_url, payload, first_at, last_sent_at, attempts) +VALUES ($1,$2,$3,$4,$5,$6,$7,$8) +ON CONFLICT (trigger_id, workflow_id, event_id) +DO UPDATE SET + any_type_url = EXCLUDED.any_type_url, + payload = EXCLUDED.payload, + first_at = LEAST(%s.first_at, EXCLUDED.first_at), + last_sent_at = EXCLUDED.last_sent_at, + attempts = EXCLUDED.attempts;`, s.tableName, s.tableName) + + _, err := s.db.ExecContext( + ctx, q, + rec.TriggerId, rec.WorkflowId, rec.EventId, + rec.AnyTypeURL, rec.Payload, + rec.FirstAt, nullTime(rec.LastSentAt), rec.Attempts, + ) + return err +} + +func (s *pgEventStore) Delete(ctx context.Context, triggerId, workflowId, eventId string) error { + q := fmt.Sprintf(`DELETE FROM %s WHERE trigger_id=$1 AND workflow_id=$2 AND event_id=$3;`, s.tableName) + _, err := s.db.ExecContext(ctx, q, triggerId, workflowId, eventId) + return err +} + +func (s *pgEventStore) List(ctx context.Context) ([]PendingEvent, error) { + q := fmt.Sprintf(` +SELECT trigger_id, workflow_id, event_id, any_type_url, payload, first_at, last_sent_at, attempts +FROM %s +ORDER BY first_at ASC;`, s.tableName) + + rows, err := s.db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer rows.Close() + + var out []PendingEvent + for rows.Next() { + var rec PendingEvent + var lastSent sql.NullTime + if err := rows.Scan( + &rec.TriggerId, &rec.WorkflowId, &rec.EventId, + &rec.AnyTypeURL, &rec.Payload, + &rec.FirstAt, &lastSent, &rec.Attempts, + ); err != nil { + return nil, err + } + if lastSent.Valid { + rec.LastSentAt = lastSent.Time + } + out = append(out, rec) + } + return out, rows.Err() +} + +func nullTime(t time.Time) sql.NullTime { + if t.IsZero() { + return sql.NullTime{Valid: false} + } + return sql.NullTime{Time: t, Valid: true} +} diff --git a/pkg/capabilities/pb/registry.pb.go b/pkg/capabilities/pb/registry.pb.go index d6b2eef92c..b75df4c8d1 100644 --- a/pkg/capabilities/pb/registry.pb.go +++ b/pkg/capabilities/pb/registry.pb.go @@ -165,10 +165,11 @@ type RemoteTriggerConfig struct { state protoimpl.MessageState `protogen:"open.v1"` RegistrationRefresh *durationpb.Duration `protobuf:"bytes,1,opt,name=registrationRefresh,proto3" json:"registrationRefresh,omitempty"` RegistrationExpiry *durationpb.Duration `protobuf:"bytes,2,opt,name=registrationExpiry,proto3" json:"registrationExpiry,omitempty"` - MinResponsesToAggregate uint32 `protobuf:"varint,3,opt,name=minResponsesToAggregate,proto3" json:"minResponsesToAggregate,omitempty"` - MessageExpiry *durationpb.Duration `protobuf:"bytes,4,opt,name=messageExpiry,proto3" json:"messageExpiry,omitempty"` - MaxBatchSize uint32 `protobuf:"varint,5,opt,name=maxBatchSize,proto3" json:"maxBatchSize,omitempty"` - BatchCollectionPeriod *durationpb.Duration `protobuf:"bytes,6,opt,name=batchCollectionPeriod,proto3" json:"batchCollectionPeriod,omitempty"` + EventTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=eventTimeout,proto3" json:"eventTimeout,omitempty"` + MinResponsesToAggregate uint32 `protobuf:"varint,4,opt,name=minResponsesToAggregate,proto3" json:"minResponsesToAggregate,omitempty"` + MessageExpiry *durationpb.Duration `protobuf:"bytes,5,opt,name=messageExpiry,proto3" json:"messageExpiry,omitempty"` + MaxBatchSize uint32 `protobuf:"varint,6,opt,name=maxBatchSize,proto3" json:"maxBatchSize,omitempty"` + BatchCollectionPeriod *durationpb.Duration `protobuf:"bytes,7,opt,name=batchCollectionPeriod,proto3" json:"batchCollectionPeriod,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -217,6 +218,13 @@ func (x *RemoteTriggerConfig) GetRegistrationExpiry() *durationpb.Duration { return nil } +func (x *RemoteTriggerConfig) GetEventTimeout() *durationpb.Duration { + if x != nil { + return x.EventTimeout + } + return nil +} + func (x *RemoteTriggerConfig) GetMinResponsesToAggregate() uint32 { if x != nil { return x.MinResponsesToAggregate @@ -658,14 +666,15 @@ var File_registry_proto protoreflect.FileDescriptor const file_registry_proto_rawDesc = "" + "\n" + - "\x0eregistry.proto\x12\x04loop\x1a\x16values/v1/values.proto\x1a\x1egoogle/protobuf/duration.proto\"\x9d\x03\n" + + "\x0eregistry.proto\x12\x04loop\x1a\x16values/v1/values.proto\x1a\x1egoogle/protobuf/duration.proto\"\xdc\x03\n" + "\x13RemoteTriggerConfig\x12K\n" + "\x13registrationRefresh\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\x13registrationRefresh\x12I\n" + - "\x12registrationExpiry\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x12registrationExpiry\x128\n" + - "\x17minResponsesToAggregate\x18\x03 \x01(\rR\x17minResponsesToAggregate\x12?\n" + - "\rmessageExpiry\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\rmessageExpiry\x12\"\n" + - "\fmaxBatchSize\x18\x05 \x01(\rR\fmaxBatchSize\x12O\n" + - "\x15batchCollectionPeriod\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x15batchCollectionPeriod\"Z\n" + + "\x12registrationExpiry\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x12registrationExpiry\x12=\n" + + "\feventTimeout\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\feventTimeout\x128\n" + + "\x17minResponsesToAggregate\x18\x04 \x01(\rR\x17minResponsesToAggregate\x12?\n" + + "\rmessageExpiry\x18\x05 \x01(\v2\x19.google.protobuf.DurationR\rmessageExpiry\x12\"\n" + + "\fmaxBatchSize\x18\x06 \x01(\rR\fmaxBatchSize\x12O\n" + + "\x15batchCollectionPeriod\x18\a \x01(\v2\x19.google.protobuf.DurationR\x15batchCollectionPeriod\"Z\n" + "\x12RemoteTargetConfig\x12D\n" + "\x1drequestHashExcludedAttributes\x18\x01 \x03(\tR\x1drequestHashExcludedAttributes\"\xc5\x03\n" + "\x16RemoteExecutableConfig\x12D\n" + @@ -740,28 +749,29 @@ var file_registry_proto_goTypes = []any{ var file_registry_proto_depIdxs = []int32{ 10, // 0: loop.RemoteTriggerConfig.registrationRefresh:type_name -> google.protobuf.Duration 10, // 1: loop.RemoteTriggerConfig.registrationExpiry:type_name -> google.protobuf.Duration - 10, // 2: loop.RemoteTriggerConfig.messageExpiry:type_name -> google.protobuf.Duration - 10, // 3: loop.RemoteTriggerConfig.batchCollectionPeriod:type_name -> google.protobuf.Duration - 0, // 4: loop.RemoteExecutableConfig.transmission_schedule:type_name -> loop.TransmissionSchedule - 10, // 5: loop.RemoteExecutableConfig.delta_stage:type_name -> google.protobuf.Duration - 10, // 6: loop.RemoteExecutableConfig.request_timeout:type_name -> google.protobuf.Duration - 1, // 7: loop.RemoteExecutableConfig.request_hasher_type:type_name -> loop.RequestHasherType - 2, // 8: loop.AggregatorConfig.aggregator_type:type_name -> loop.AggregatorType - 3, // 9: loop.CapabilityMethodConfig.remote_trigger_config:type_name -> loop.RemoteTriggerConfig - 5, // 10: loop.CapabilityMethodConfig.remote_executable_config:type_name -> loop.RemoteExecutableConfig - 6, // 11: loop.CapabilityMethodConfig.aggregator_config:type_name -> loop.AggregatorConfig - 11, // 12: loop.CapabilityConfig.default_config:type_name -> values.v1.Map - 3, // 13: loop.CapabilityConfig.remote_trigger_config:type_name -> loop.RemoteTriggerConfig - 4, // 14: loop.CapabilityConfig.remote_target_config:type_name -> loop.RemoteTargetConfig - 5, // 15: loop.CapabilityConfig.remote_executable_config:type_name -> loop.RemoteExecutableConfig - 11, // 16: loop.CapabilityConfig.restricted_config:type_name -> values.v1.Map - 9, // 17: loop.CapabilityConfig.method_configs:type_name -> loop.CapabilityConfig.MethodConfigsEntry - 7, // 18: loop.CapabilityConfig.MethodConfigsEntry.value:type_name -> loop.CapabilityMethodConfig - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 2: loop.RemoteTriggerConfig.eventTimeout:type_name -> google.protobuf.Duration + 10, // 3: loop.RemoteTriggerConfig.messageExpiry:type_name -> google.protobuf.Duration + 10, // 4: loop.RemoteTriggerConfig.batchCollectionPeriod:type_name -> google.protobuf.Duration + 0, // 5: loop.RemoteExecutableConfig.transmission_schedule:type_name -> loop.TransmissionSchedule + 10, // 6: loop.RemoteExecutableConfig.delta_stage:type_name -> google.protobuf.Duration + 10, // 7: loop.RemoteExecutableConfig.request_timeout:type_name -> google.protobuf.Duration + 1, // 8: loop.RemoteExecutableConfig.request_hasher_type:type_name -> loop.RequestHasherType + 2, // 9: loop.AggregatorConfig.aggregator_type:type_name -> loop.AggregatorType + 3, // 10: loop.CapabilityMethodConfig.remote_trigger_config:type_name -> loop.RemoteTriggerConfig + 5, // 11: loop.CapabilityMethodConfig.remote_executable_config:type_name -> loop.RemoteExecutableConfig + 6, // 12: loop.CapabilityMethodConfig.aggregator_config:type_name -> loop.AggregatorConfig + 11, // 13: loop.CapabilityConfig.default_config:type_name -> values.v1.Map + 3, // 14: loop.CapabilityConfig.remote_trigger_config:type_name -> loop.RemoteTriggerConfig + 4, // 15: loop.CapabilityConfig.remote_target_config:type_name -> loop.RemoteTargetConfig + 5, // 16: loop.CapabilityConfig.remote_executable_config:type_name -> loop.RemoteExecutableConfig + 11, // 17: loop.CapabilityConfig.restricted_config:type_name -> values.v1.Map + 9, // 18: loop.CapabilityConfig.method_configs:type_name -> loop.CapabilityConfig.MethodConfigsEntry + 7, // 19: loop.CapabilityConfig.MethodConfigsEntry.value:type_name -> loop.CapabilityMethodConfig + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_registry_proto_init() } diff --git a/pkg/capabilities/pb/registry.proto b/pkg/capabilities/pb/registry.proto index d4adbd2c39..3f46be5908 100644 --- a/pkg/capabilities/pb/registry.proto +++ b/pkg/capabilities/pb/registry.proto @@ -10,10 +10,11 @@ import "google/protobuf/duration.proto"; message RemoteTriggerConfig { google.protobuf.Duration registrationRefresh = 1; google.protobuf.Duration registrationExpiry = 2; - uint32 minResponsesToAggregate = 3; - google.protobuf.Duration messageExpiry = 4; - uint32 maxBatchSize = 5; - google.protobuf.Duration batchCollectionPeriod = 6; + google.protobuf.Duration eventTimeout =3; + uint32 minResponsesToAggregate = 4; + google.protobuf.Duration messageExpiry = 5; + uint32 maxBatchSize = 6; + google.protobuf.Duration batchCollectionPeriod = 7; } // deprecated - v1 only diff --git a/pkg/capabilities/registry/base.go b/pkg/capabilities/registry/base.go index 39b864186b..e344957940 100644 --- a/pkg/capabilities/registry/base.go +++ b/pkg/capabilities/registry/base.go @@ -244,6 +244,14 @@ func (a *atomicTriggerCapability) GetState() connectivity.State { return connectivity.State(-1) // unknown } +func (a *atomicTriggerCapability) AckEvent(ctx context.Context, eventId string) error { + c := a.Load() + if c == nil { + return errors.New("capability unavailable") + } + return (*c).AckEvent(ctx, eventId) +} + func (a *atomicTriggerCapability) RegisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { c := a.Load() if c == nil { @@ -360,6 +368,14 @@ func (a *atomicExecuteAndTriggerCapability) GetState() connectivity.State { return connectivity.State(-1) // unknown } +func (a *atomicExecuteAndTriggerCapability) AckEvent(ctx context.Context, eventId string) error { + c := a.Load() + if c == nil { + return errors.New("capability unavailable") + } + return (*c).AckEvent(ctx, eventId) +} + func (a *atomicExecuteAndTriggerCapability) RegisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { c := a.Load() if c == nil { diff --git a/pkg/capabilities/triggers/mercury_trigger.go b/pkg/capabilities/triggers/mercury_trigger.go index 0853482bd8..68643c3f53 100644 --- a/pkg/capabilities/triggers/mercury_trigger.go +++ b/pkg/capabilities/triggers/mercury_trigger.go @@ -91,6 +91,11 @@ func (o *MercuryTriggerService) ProcessReport(reports []datastreams.FeedReport) return nil } +func (o *MercuryTriggerService) AckEvent(ctx context.Context, eventId string) error { + // TODO Implement? + return nil +} + func (o *MercuryTriggerService) RegisterTrigger(ctx context.Context, req capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { wid := req.Metadata.WorkflowID diff --git a/pkg/capabilities/triggers/on_demand_trigger.go b/pkg/capabilities/triggers/on_demand_trigger.go index b3ffd04b95..3e91bd6976 100644 --- a/pkg/capabilities/triggers/on_demand_trigger.go +++ b/pkg/capabilities/triggers/on_demand_trigger.go @@ -72,6 +72,11 @@ func (o *OnDemand) SendEvent(ctx context.Context, wid string, event capabilities return nil } +func (o *OnDemand) AckEvent(ctx context.Context, eventId string) error { + //TODO Implement? + return nil +} + func (o *OnDemand) RegisterTrigger(ctx context.Context, req capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { wid := req.Metadata.WorkflowID o.mu.Lock() diff --git a/pkg/capabilities/v2/actions/confidentialhttp/server/client_server_gen.go b/pkg/capabilities/v2/actions/confidentialhttp/server/client_server_gen.go index be42c71b12..51e6db9507 100644 --- a/pkg/capabilities/v2/actions/confidentialhttp/server/client_server_gen.go +++ b/pkg/capabilities/v2/actions/confidentialhttp/server/client_server_gen.go @@ -107,6 +107,10 @@ func (c *clientCapability) UnregisterTrigger(ctx context.Context, request capabi return fmt.Errorf("trigger %s not found", request.Method) } +func (c *clientCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *clientCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/actions/http/server/client_server_gen.go b/pkg/capabilities/v2/actions/http/server/client_server_gen.go index 5eca8955ec..16d17c8bbb 100644 --- a/pkg/capabilities/v2/actions/http/server/client_server_gen.go +++ b/pkg/capabilities/v2/actions/http/server/client_server_gen.go @@ -107,6 +107,10 @@ func (c *clientCapability) UnregisterTrigger(ctx context.Context, request capabi return fmt.Errorf("trigger %s not found", request.Method) } +func (c *clientCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *clientCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/chain-capabilities/evm/server/client_server_gen.go b/pkg/capabilities/v2/chain-capabilities/evm/server/client_server_gen.go index c44e1c707b..06a974ef18 100644 --- a/pkg/capabilities/v2/chain-capabilities/evm/server/client_server_gen.go +++ b/pkg/capabilities/v2/chain-capabilities/evm/server/client_server_gen.go @@ -36,6 +36,7 @@ type ClientCapability interface { RegisterLogTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *evm.FilterLogTriggerRequest) (<-chan capabilities.TriggerAndId[*evm.Log], error) UnregisterLogTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *evm.FilterLogTriggerRequest) error + AckEvent(ctx context.Context, triggerId string, eventId string) error WriteReport(ctx context.Context, metadata capabilities.RequestMetadata, input *evm.WriteReportRequest) (*capabilities.ResponseAndMetadata[*evm.WriteReportReply], caperrors.Error) @@ -143,6 +144,10 @@ func (c *clientCapability) UnregisterTrigger(ctx context.Context, request capabi } } +func (c *clientCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.ClientCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *clientCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/chain-capabilities/solana/server/client_server_gen.go b/pkg/capabilities/v2/chain-capabilities/solana/server/client_server_gen.go index 086ef5058e..920992a5cd 100644 --- a/pkg/capabilities/v2/chain-capabilities/solana/server/client_server_gen.go +++ b/pkg/capabilities/v2/chain-capabilities/solana/server/client_server_gen.go @@ -38,6 +38,7 @@ type ClientCapability interface { RegisterLogTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *solana.FilterLogTriggerRequest) (<-chan capabilities.TriggerAndId[*solana.Log], error) UnregisterLogTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *solana.FilterLogTriggerRequest) error + AckEvent(ctx context.Context, triggerId string, eventId string) error WriteReport(ctx context.Context, metadata capabilities.RequestMetadata, input *solana.WriteReportRequest) (*capabilities.ResponseAndMetadata[*solana.WriteReportReply], caperrors.Error) @@ -145,6 +146,10 @@ func (c *clientCapability) UnregisterTrigger(ctx context.Context, request capabi } } +func (c *clientCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.ClientCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *clientCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/consensus/server/consensus_server_gen.go b/pkg/capabilities/v2/consensus/server/consensus_server_gen.go index 8fd928cf46..3c253ed1b3 100644 --- a/pkg/capabilities/v2/consensus/server/consensus_server_gen.go +++ b/pkg/capabilities/v2/consensus/server/consensus_server_gen.go @@ -110,6 +110,10 @@ func (c *consensusCapability) UnregisterTrigger(ctx context.Context, request cap return fmt.Errorf("trigger %s not found", request.Method) } +func (c *consensusCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *consensusCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/templates/server.go.tmpl b/pkg/capabilities/v2/protoc/pkg/templates/server.go.tmpl index 6b1c6ba9f1..0208950c5f 100644 --- a/pkg/capabilities/v2/protoc/pkg/templates/server.go.tmpl +++ b/pkg/capabilities/v2/protoc/pkg/templates/server.go.tmpl @@ -38,6 +38,7 @@ type {{.GoName}}Capability interface { {{ $hasTriggers = true }} Register{{.GoName}}(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *{{ImportAlias .Input.GoIdent.GoImportPath}}.{{.Input.GoIdent.GoName}}) (<- chan capabilities.TriggerAndId[*{{ImportAlias .Output.GoIdent.GoImportPath}}.{{.Output.GoIdent.GoName}}], error) Unregister{{.GoName}}(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *{{ImportAlias .Input.GoIdent.GoImportPath}}.{{.Input.GoIdent.GoName}}) error + AckEvent(ctx context.Context, triggerId string, eventId string) error {{- else }} {{ $hasActions = true }} {{.GoName}}(ctx context.Context, metadata capabilities.RequestMetadata, input *{{ImportAlias .Input.GoIdent.GoImportPath}}.{{.Input.GoIdent.GoName}} {{if ne "emptypb.Empty" (ConfigType $service)}}, {{(ConfigType $service)}}{{ end }}) (*capabilities.ResponseAndMetadata[*{{ImportAlias .Output.GoIdent.GoImportPath}}.{{.Output.GoIdent.GoName}}], caperrors.Error) @@ -169,6 +170,14 @@ func (c *{{.GoName|LowerFirst}}Capability) UnregisterTrigger(ctx context.Context {{- end }} } +func (c *{{.GoName|LowerFirst}}Capability) AckEvent(ctx context.Context, triggerId string, eventId string) error { +{{- if $hasTriggers }} + return c.{{.GoName}}Capability.AckEvent(ctx, triggerId, eventId) +{{- else }} + return fmt.Errorf("trigger %s not found", triggerId) +{{- end }} +} + func (c *{{.GoName|LowerFirst}}Capability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/actionandtrigger/server/action_and_trigger_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/actionandtrigger/server/action_and_trigger_server_gen.go index 7ea875038c..a17bb40127 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/actionandtrigger/server/action_and_trigger_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/actionandtrigger/server/action_and_trigger_server_gen.go @@ -23,6 +23,7 @@ type BasicCapability interface { RegisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *actionandtrigger.Config) (<-chan capabilities.TriggerAndId[*actionandtrigger.TriggerEvent], error) UnregisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *actionandtrigger.Config) error + AckEvent(ctx context.Context, triggerId string, eventId string) error Start(ctx context.Context) error Close() error @@ -126,6 +127,10 @@ func (c *basicCapability) UnregisterTrigger(ctx context.Context, request capabil } } +func (c *basicCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.BasicCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *basicCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/basicaction/server/basic_action_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/basicaction/server/basic_action_server_gen.go index 1984f13d5c..77f149d4d2 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/basicaction/server/basic_action_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/basicaction/server/basic_action_server_gen.go @@ -107,6 +107,10 @@ func (c *basicActionCapability) UnregisterTrigger(ctx context.Context, request c return fmt.Errorf("trigger %s not found", request.Method) } +func (c *basicActionCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *basicActionCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/basictrigger/server/basic_trigger_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/basictrigger/server/basic_trigger_server_gen.go index 74545198f5..a54bfd0006 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/basictrigger/server/basic_trigger_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/basictrigger/server/basic_trigger_server_gen.go @@ -20,6 +20,7 @@ var _ = emptypb.Empty{} type BasicCapability interface { RegisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *basictrigger.Config) (<-chan capabilities.TriggerAndId[*basictrigger.Outputs], error) UnregisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *basictrigger.Config) error + AckEvent(ctx context.Context, triggerId string, eventId string) error Start(ctx context.Context) error Close() error @@ -123,6 +124,10 @@ func (c *basicCapability) UnregisterTrigger(ctx context.Context, request capabil } } +func (c *basicCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.BasicCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *basicCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/consensus/server/consensus_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/consensus/server/consensus_server_gen.go index 8fd928cf46..3c253ed1b3 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/consensus/server/consensus_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/consensus/server/consensus_server_gen.go @@ -110,6 +110,10 @@ func (c *consensusCapability) UnregisterTrigger(ctx context.Context, request cap return fmt.Errorf("trigger %s not found", request.Method) } +func (c *consensusCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *consensusCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/importclash/server/clash_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/importclash/server/clash_server_gen.go index d1076bcc7f..b953cbe6b2 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/importclash/server/clash_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/importclash/server/clash_server_gen.go @@ -108,6 +108,10 @@ func (c *basicActionCapability) UnregisterTrigger(ctx context.Context, request c return fmt.Errorf("trigger %s not found", request.Method) } +func (c *basicActionCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *basicActionCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/protoc/pkg/test_capabilities/nodeaction/server/node_action_server_gen.go b/pkg/capabilities/v2/protoc/pkg/test_capabilities/nodeaction/server/node_action_server_gen.go index d90fcb8273..aafa70992d 100644 --- a/pkg/capabilities/v2/protoc/pkg/test_capabilities/nodeaction/server/node_action_server_gen.go +++ b/pkg/capabilities/v2/protoc/pkg/test_capabilities/nodeaction/server/node_action_server_gen.go @@ -107,6 +107,10 @@ func (c *basicActionCapability) UnregisterTrigger(ctx context.Context, request c return fmt.Errorf("trigger %s not found", request.Method) } +func (c *basicActionCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return fmt.Errorf("trigger %s not found", triggerId) +} + func (c *basicActionCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/triggers/cron/server/trigger_server_gen.go b/pkg/capabilities/v2/triggers/cron/server/trigger_server_gen.go index a74d49b8c2..5178f85804 100644 --- a/pkg/capabilities/v2/triggers/cron/server/trigger_server_gen.go +++ b/pkg/capabilities/v2/triggers/cron/server/trigger_server_gen.go @@ -20,9 +20,11 @@ var _ = emptypb.Empty{} type CronCapability interface { RegisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *cron.Config) (<-chan capabilities.TriggerAndId[*cron.Payload], error) UnregisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *cron.Config) error + AckEvent(ctx context.Context, triggerId string, eventId string) error RegisterLegacyTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *cron.Config) (<-chan capabilities.TriggerAndId[*cron.LegacyPayload], error) UnregisterLegacyTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *cron.Config) error + AckEvent(ctx context.Context, triggerId string, eventId string) error Start(ctx context.Context) error Close() error @@ -136,6 +138,10 @@ func (c *cronCapability) UnregisterTrigger(ctx context.Context, request capabili } } +func (c *cronCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.CronCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *cronCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/capabilities/v2/triggers/http/server/trigger_server_gen.go b/pkg/capabilities/v2/triggers/http/server/trigger_server_gen.go index 0d6fd2dd40..b59f21f006 100644 --- a/pkg/capabilities/v2/triggers/http/server/trigger_server_gen.go +++ b/pkg/capabilities/v2/triggers/http/server/trigger_server_gen.go @@ -20,6 +20,7 @@ var _ = emptypb.Empty{} type HTTPCapability interface { RegisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *http.Config) (<-chan capabilities.TriggerAndId[*http.Payload], error) UnregisterTrigger(ctx context.Context, triggerID string, metadata capabilities.RequestMetadata, input *http.Config) error + AckEvent(ctx context.Context, triggerId string, eventId string) error Start(ctx context.Context) error Close() error @@ -123,6 +124,10 @@ func (c *hTTPCapability) UnregisterTrigger(ctx context.Context, request capabili } } +func (c *hTTPCapability) AckEvent(ctx context.Context, triggerId string, eventId string) error { + return c.HTTPCapability.AckEvent(ctx, triggerId, eventId) +} + func (c *hTTPCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } diff --git a/pkg/loop/internal/core/services/capability/capabilities.go b/pkg/loop/internal/core/services/capability/capabilities.go index 92637e529f..8884a90567 100644 --- a/pkg/loop/internal/core/services/capability/capabilities.go +++ b/pkg/loop/internal/core/services/capability/capabilities.go @@ -287,6 +287,11 @@ type triggerExecutableClient struct { cancelFuncs map[string]func() } +func (t *triggerExecutableClient) AckEvent(ctx context.Context, eventId string) error { + //TODO implement + return nil +} + func (t *triggerExecutableClient) RegisterTrigger(ctx context.Context, req capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { ch, cancel, err := t.registerTrigger(ctx, req) if err != nil { diff --git a/pkg/loop/internal/core/services/capability/capabilities_registry.go b/pkg/loop/internal/core/services/capability/capabilities_registry.go index 8c9cee12ba..21164408f0 100644 --- a/pkg/loop/internal/core/services/capability/capabilities_registry.go +++ b/pkg/loop/internal/core/services/capability/capabilities_registry.go @@ -187,6 +187,7 @@ func decodeRemoteTriggerConfig(prtc *capabilitiespb.RemoteTriggerConfig) *capabi remoteTriggerConfig := &capabilities.RemoteTriggerConfig{} remoteTriggerConfig.RegistrationRefresh = prtc.RegistrationRefresh.AsDuration() remoteTriggerConfig.RegistrationExpiry = prtc.RegistrationExpiry.AsDuration() + remoteTriggerConfig.EventTimeout = prtc.EventTimeout.AsDuration() remoteTriggerConfig.MinResponsesToAggregate = prtc.MinResponsesToAggregate remoteTriggerConfig.MessageExpiry = prtc.MessageExpiry.AsDuration() remoteTriggerConfig.MaxBatchSize = prtc.MaxBatchSize