From 1a3a3158ee7f77bf5730473cc75f5144013ac65b Mon Sep 17 00:00:00 2001 From: Nikolay Petrov Date: Tue, 24 Feb 2026 08:51:44 -0500 Subject: [PATCH 1/4] endpoint expiry: keep endpoints alive to smooth out reconnects --- cmd/connet/control.go | 12 +++- cmd/connet/main.go | 10 +++ cmd/connet/server.go | 14 ++++ server/config.go | 16 ++++- server/control/clients.go | 139 +++++++++++++++++++------------------- server/control/server.go | 8 ++- server/control/store.go | 6 +- server/server.go | 5 +- 8 files changed, 131 insertions(+), 79 deletions(-) diff --git a/cmd/connet/control.go b/cmd/connet/control.go index 21f92261..a6ef3ad3 100644 --- a/cmd/connet/control.go +++ b/cmd/connet/control.go @@ -19,6 +19,9 @@ type ControlConfig struct { ClientsTokens []string `toml:"clients-tokens"` ClientsTokenRestrictions []TokenRestriction `toml:"clients-token-restriction"` + EndpointExpiryDisable bool `toml:"endpoint-expiry-disable"` + EndpointExpiryTimeout durationValue `toml:"endpoint-expiry-timeout"` + RelaysIngresses []ControlIngress `toml:"relays-ingress"` RelaysTokensFile string `toml:"relays-tokens-file"` RelaysTokens []string `toml:"relays-tokens"` @@ -72,6 +75,9 @@ func controlCmd() *cobra.Command { cmd.Flags().StringArrayVar(&clientIngress.AllowCIDRs, "clients-allow-cidr", nil, "list of allowed networks for client connections (CIDR format)") cmd.Flags().StringArrayVar(&clientIngress.DenyCIDRs, "clients-deny-cidr", nil, "list of denied networks for client connections (CIDR format)") + cmd.Flags().BoolVar(&flagsConfig.Control.EndpointExpiryDisable, "endpoint-expiry-disable", false, "disable keeping endpoint registrations alive after client disconnect (default false)") + cmd.Flags().Var(&flagsConfig.Control.EndpointExpiryTimeout, "endpoint-expiry-timeout", "how long to keep endpoint registrations after client disconnect (default '30s')") + cmd.Flags().StringVar(&flagsConfig.Control.RelaysTokensFile, "relays-tokens-file", "", "file containing a list of relay auth tokens (token per line)") cmd.Flags().StringArrayVar(&flagsConfig.Control.RelaysTokens, "relays-tokens", nil, "list of relay auth tokens (fallback when 'relay-tokens-file' is not specified)") @@ -118,7 +124,8 @@ func controlRun(ctx context.Context, cfg ControlConfig, logger *slog.Logger) err var err error controlCfg := control.Config{ - Logger: logger, + ClientsEndpointExpiry: resolveEndpointExpiry(cfg.EndpointExpiryDisable, cfg.EndpointExpiryTimeout), + Logger: logger, } var usedClientsDefault bool @@ -268,6 +275,9 @@ func (c *ControlConfig) merge(o ControlConfig) { } c.ClientsTokenRestrictions = mergeSlices(c.ClientsTokenRestrictions, o.ClientsTokenRestrictions) + c.EndpointExpiryDisable = c.EndpointExpiryDisable || o.EndpointExpiryDisable + c.EndpointExpiryTimeout = override(c.EndpointExpiryTimeout, o.EndpointExpiryTimeout) + c.RelaysIngresses = mergeSlices(c.RelaysIngresses, o.RelaysIngresses) if len(o.RelaysTokens) > 0 || o.RelaysTokensFile != "" { // new config completely overrides tokens c.RelaysTokens = o.RelaysTokens diff --git a/cmd/connet/main.go b/cmd/connet/main.go index a39de44e..28ddb5f7 100644 --- a/cmd/connet/main.go +++ b/cmd/connet/main.go @@ -307,3 +307,13 @@ func (d *durationValue) String() string { return (*time.Duration)(d).String() } func (d *durationValue) get() time.Duration { return time.Duration(*d) } + +func resolveEndpointExpiry(disabled bool, timeout durationValue) time.Duration { + if disabled { + return 0 + } + if d := timeout.get(); d > 0 { + return d + } + return 30 * time.Second +} diff --git a/cmd/connet/server.go b/cmd/connet/server.go index a92c5f63..c332706d 100644 --- a/cmd/connet/server.go +++ b/cmd/connet/server.go @@ -17,6 +17,9 @@ type ServerConfig struct { Tokens []string `toml:"tokens"` TokenRestrictions []TokenRestriction `toml:"token-restriction"` + EndpointExpiryDisable bool `toml:"endpoint-expiry-disable"` + EndpointExpiryTimeout durationValue `toml:"endpoint-expiry-timeout"` + RelayIngresses []RelayIngress `toml:"relay-ingress"` StatusAddr string `toml:"status-addr"` @@ -45,6 +48,9 @@ func serverCmd() *cobra.Command { cmd.Flags().StringArrayVar(&clientIngress.AllowCIDRs, "allow-cidr", nil, "list of allowed networks for client connections (CIDR format)") cmd.Flags().StringArrayVar(&clientIngress.DenyCIDRs, "deny-cidr", nil, "list of denied networks for client connections (CIDR format)") + cmd.Flags().BoolVar(&flagsConfig.Server.EndpointExpiryDisable, "endpoint-expiry-disable", false, "disable keeping endpoint registrations alive after client disconnect (default false)") + cmd.Flags().Var(&flagsConfig.Server.EndpointExpiryTimeout, "endpoint-expiry-timeout", "how long to keep endpoint registrations after client disconnect (default '30s')") + var relayIngress RelayIngress cmd.Flags().StringVar(&relayIngress.Addr, "relay-addr", "", "relay clients server address (UDP/QUIC, [host]:port) (defaults to ':19191')") cmd.Flags().StringArrayVar(&relayIngress.Hostports, "relay-hostport", nil, `list of host[:port]s advertised by the control server for clients to connect to this relay @@ -64,9 +70,11 @@ func serverCmd() *cobra.Command { if !clientIngress.isZero() { flagsConfig.Server.Ingresses = append(flagsConfig.Server.Ingresses, clientIngress) } + if !relayIngress.isZero() { flagsConfig.Server.RelayIngresses = append(flagsConfig.Server.RelayIngresses, relayIngress) } + cfg.merge(flagsConfig) logger, err := logger(cfg) @@ -116,6 +124,9 @@ func serverRun(ctx context.Context, cfg ServerConfig, logger *slog.Logger) error } opts = append(opts, server.ClientsAuthenticator(clientAuth)) + endpointExpiry := resolveEndpointExpiry(cfg.EndpointExpiryDisable, cfg.EndpointExpiryTimeout) + opts = append(opts, server.ClientsEndpointExpiry(endpointExpiry)) + var usedRelayDefault bool for ix, ingressCfg := range cfg.RelayIngresses { if ingressCfg.Addr == "" && !usedRelayDefault { @@ -159,6 +170,9 @@ func (c *ServerConfig) merge(o ServerConfig) { } c.TokenRestrictions = mergeSlices(c.TokenRestrictions, o.TokenRestrictions) + c.EndpointExpiryDisable = c.EndpointExpiryDisable || o.EndpointExpiryDisable + c.EndpointExpiryTimeout = override(c.EndpointExpiryTimeout, o.EndpointExpiryTimeout) + c.RelayIngresses = mergeSlices(c.RelayIngresses, o.RelayIngresses) c.StatusAddr = override(c.StatusAddr, o.StatusAddr) diff --git a/server/config.go b/server/config.go index 946f639d..d4a30e98 100644 --- a/server/config.go +++ b/server/config.go @@ -5,6 +5,7 @@ import ( "log/slog" "net" "os" + "time" "github.com/connet-dev/connet/model" "github.com/connet-dev/connet/server/control" @@ -13,8 +14,9 @@ import ( ) type serverConfig struct { - clientsIngresses []control.Ingress - clientsAuth control.ClientAuthenticator + clientsIngresses []control.Ingress + clientsAuth control.ClientAuthenticator + clientsEndpointExpiry time.Duration relayIngresses []relay.Ingress @@ -24,7 +26,8 @@ type serverConfig struct { func newServerConfig(opts []Option) (*serverConfig, error) { cfg := &serverConfig{ - logger: slog.Default(), + clientsEndpointExpiry: 30 * time.Second, + logger: slog.Default(), } for _, opt := range opts { if err := opt(cfg); err != nil { @@ -100,6 +103,13 @@ func ClientsAuthenticator(clientsAuth control.ClientAuthenticator) Option { } } +func ClientsEndpointExpiry(d time.Duration) Option { + return func(cfg *serverConfig) error { + cfg.clientsEndpointExpiry = d + return nil + } +} + func RelayIngress(icfg relay.Ingress) Option { return func(cfg *serverConfig) error { cfg.relayIngresses = append(cfg.relayIngresses, icfg) diff --git a/server/control/clients.go b/server/control/clients.go index 55c9f3a8..6b387fb0 100644 --- a/server/control/clients.go +++ b/server/control/clients.go @@ -52,6 +52,7 @@ func newClientServer( relays ClientRelays, config logc.KV[ConfigKey, ConfigValue], stores Stores, + endpointExpiry time.Duration, logger *slog.Logger, ) (*clientServer, error) { conns, err := stores.ClientConns() @@ -69,11 +70,11 @@ func newClientServer( return nil, fmt.Errorf("client snapshot: %w", err) } - reactivate := map[ClientID]reactivateValue{} + // delete stale conn entries for _, msg := range connsMsgs { - v := reactivate[msg.Key.ID] - v.conns = append(v.conns, msg.Key) - reactivate[msg.Key.ID] = v + if err := conns.Del(msg.Key); err != nil { + return nil, fmt.Errorf("delete stale conn: %w", err) + } } peersMsgs, peersOffset, err := peers.Snapshot() @@ -83,19 +84,20 @@ func newClientServer( peersCache := map[peerKey][]peerValue{} for _, msg := range peersMsgs { - if reactivePeers, ok := reactivate[msg.Key.ID]; ok { - key := peerKey{msg.Key.Endpoint, msg.Key.Role} - peersCache[key] = append(peersCache[key], peerValue{msg.Key.ConnID, &pbclient.RemotePeer{ - Id: msg.Key.ID.string, - Metadata: msg.Value.Metadata, - Peer: msg.Value.Peer, - }}) - reactivePeers.peers = append(reactivePeers.peers, msg.Key) - reactivate[msg.Key.ID] = reactivePeers - } else { - logger.Warn("peer without corresponding client, deleting", "endpoint", msg.Key.Endpoint, "role", msg.Key.Role, "id", msg.Key.ID) - if err := peers.Del(msg.Key); err != nil { - return nil, fmt.Errorf("delete unowned peer: %w", err) + // Add ALL peers to cache (they remain visible during grace period) + key := peerKey{msg.Key.Endpoint, msg.Key.Role} + peersCache[key] = append(peersCache[key], peerValue{msg.Key.ConnID, &pbclient.RemotePeer{ + Id: msg.Key.ID.string, + Metadata: msg.Value.Metadata, + Peer: msg.Value.Peer, + }}) + + // Mark as expired if not already + if msg.Value.ExpiredAt == nil { + now := time.Now() + msg.Value.ExpiredAt = &now + if err := peers.Put(msg.Key, msg.Value); err != nil { + return nil, fmt.Errorf("expire stale peer: %w", err) } } } @@ -140,7 +142,7 @@ func newClientServer( peersCache: peersCache, peersOffset: peersOffset, - reactivate: reactivate, + endpointExpiry: endpointExpiry, }, nil } @@ -161,8 +163,7 @@ type clientServer struct { peersOffset int64 peersMu sync.RWMutex - reactivate map[ClientID]reactivateValue - reactivateMu sync.RWMutex + endpointExpiry time.Duration } type peerKey struct { @@ -175,16 +176,7 @@ type peerValue struct { peer *pbclient.RemotePeer } -type reactivateValue struct { - conns []ClientConnKey - peers []ClientPeerKey -} - func (s *clientServer) connected(id ClientID, connID ConnID, auth ClientAuthentication, remote net.Addr, metadata string) error { - s.reactivateMu.Lock() - delete(s.reactivate, id) - s.reactivateMu.Unlock() - return s.conns.Put(ClientConnKey{id, connID}, ClientConnValue{auth, remote.String(), metadata}) } @@ -193,7 +185,12 @@ func (s *clientServer) disconnected(id ClientID, connID ConnID) error { } func (s *clientServer) announce(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID, metadata string, peer *pbclient.Peer) error { - return s.peers.Put(ClientPeerKey{endpoint, role, id, connID}, ClientPeerValue{peer, metadata}) + return s.peers.Put(ClientPeerKey{endpoint, role, id, connID}, ClientPeerValue{Peer: peer, Metadata: metadata}) +} + +func (s *clientServer) expire(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID, metadata string, peer *pbclient.Peer) error { + now := time.Now() + return s.peers.Put(ClientPeerKey{endpoint, role, id, connID}, ClientPeerValue{Peer: peer, Metadata: metadata, ExpiredAt: &now}) } func (s *clientServer) revoke(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID) error { @@ -272,7 +269,9 @@ func (s *clientServer) run(ctx context.Context) error { g.Go(reliable.Bind(ingress, s.runListener)) } g.Go(s.runPeerCache) - g.Go(s.runCleaner) + if s.endpointExpiry > 0 { + g.Go(s.runPeerExpiry) + } g.Go(logc.ScheduleCompact(s.conns)) g.Go(logc.ScheduleCompact(s.peers)) @@ -402,53 +401,49 @@ func (s *clientServer) runPeerCache(ctx context.Context) error { } } -func (s *clientServer) runCleaner(ctx context.Context) error { - switch inactive, err := s.waitToReactivate(ctx); { - case err != nil: - return err - case inactive == 0: - s.logger.Debug("all clients reactivated") - return nil +func (s *clientServer) runPeerExpiry(ctx context.Context) error { + // Process existing expired entries from the snapshot + msgs, offset, err := s.peers.Snapshot() + if err != nil { + return fmt.Errorf("expiry snapshot: %w", err) } - - s.reactivateMu.Lock() - defer s.reactivateMu.Unlock() - - for key, value := range s.reactivate { - s.logger.Warn("force disconnecting client", "id", key) - for _, conn := range value.conns { - if err := s.disconnected(conn.ID, conn.ConnID); err != nil { - return err + for _, msg := range msgs { + if expiredAt := msg.Value.ExpiredAt; expiredAt != nil { + if err := s.waitAndRevoke(ctx, msg.Key, *expiredAt); err != nil { + return fmt.Errorf("expiry wait and revoke: %w", err) } } - for _, peer := range value.peers { - if err := s.revoke(peer.Endpoint, peer.Role, peer.ID, peer.ConnID); err != nil { - return err + } + + // Watch for new expired entries + for { + msgs, nextOffset, err := s.peers.Consume(ctx, offset) + if err != nil { + return fmt.Errorf("expiry consume: %w", err) + } + for _, msg := range msgs { + if expiredAt := msg.Value.ExpiredAt; !msg.Delete && expiredAt != nil { + if err := s.waitAndRevoke(ctx, msg.Key, *expiredAt); err != nil { + return fmt.Errorf("expiry wait and revoke: %w", err) + } } } + offset = nextOffset } - - return nil } -func (s *clientServer) waitToReactivate(ctx context.Context) (int, error) { - s.reactivateMu.RLock() - waitToReactivate := len(s.reactivate) - s.reactivateMu.RUnlock() - - if waitToReactivate == 0 { - return 0, nil +func (s *clientServer) waitAndRevoke(ctx context.Context, key ClientPeerKey, expiredAt time.Time) error { + remaining := s.endpointExpiry - time.Since(expiredAt) + if remaining > 0 { + s.logger.Debug("waiting to expire endpoint", "endpoint", key.Endpoint, "role", key.Role, "wait", remaining) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(remaining): + } } - s.logger.Debug("waiting for clients to reactivate", "count", waitToReactivate) - select { - case <-ctx.Done(): - return 0, ctx.Err() - case <-time.After(time.Minute): - s.reactivateMu.RLock() - defer s.reactivateMu.RUnlock() - return len(s.reactivate), nil - } + return s.revoke(key.Endpoint, key.Role, key.ID, key.ConnID) } type clientConn struct { @@ -660,6 +655,14 @@ func (s *clientStream) announce(ctx context.Context, req *pbclient.Request_Annou return err } defer func() { + if s.conn.server.endpointExpiry > 0 && s.conn.conn.Context().Err() != nil { + // Connection dead — mark as expired, consumer will delete after timeout + if err := s.conn.server.expire(endpoint, role, s.conn.id, s.conn.connID, s.conn.metadata, req.Peer); err != nil { + s.conn.logger.Warn("failed to expire peer", "id", s.conn.id, "err", err) + } + return + } + // Connection alive or feature disabled — revoke immediately if err := s.conn.server.revoke(endpoint, role, s.conn.id, s.conn.connID); err != nil { s.conn.logger.Warn("failed to revoke client", "id", s.conn.id, "err", err) } diff --git a/server/control/server.go b/server/control/server.go index b7dbffd8..1d6e4a15 100644 --- a/server/control/server.go +++ b/server/control/server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log/slog" + "time" "github.com/connet-dev/connet/model" "github.com/connet-dev/connet/pkg/iterc" @@ -13,8 +14,9 @@ import ( ) type Config struct { - ClientsIngress []Ingress - ClientsAuth ClientAuthenticator + ClientsIngress []Ingress + ClientsAuth ClientAuthenticator + ClientsEndpointExpiry time.Duration RelaysIngress []Ingress RelaysAuth RelayAuthenticator @@ -39,7 +41,7 @@ func NewServer(cfg Config) (*Server, error) { return nil, fmt.Errorf("create relay server: %w", err) } - clients, err := newClientServer(cfg.ClientsIngress, cfg.ClientsAuth, relays, configStore, cfg.Stores, cfg.Logger) + clients, err := newClientServer(cfg.ClientsIngress, cfg.ClientsAuth, relays, configStore, cfg.Stores, cfg.ClientsEndpointExpiry, cfg.Logger) if err != nil { return nil, fmt.Errorf("create client server: %w", err) } diff --git a/server/control/store.go b/server/control/store.go index 74848109..29f7b207 100644 --- a/server/control/store.go +++ b/server/control/store.go @@ -6,6 +6,7 @@ import ( "errors" "os" "path/filepath" + "time" "github.com/connet-dev/connet/model" "github.com/connet-dev/connet/pkg/logc" @@ -92,8 +93,9 @@ type ClientPeerKey struct { } type ClientPeerValue struct { - Peer *pbclient.Peer `json:"peer"` - Metadata string `json:"metadata"` + Peer *pbclient.Peer `json:"peer"` + Metadata string `json:"metadata"` + ExpiredAt *time.Time `json:"expired_at,omitempty"` } type RelayConnKey struct { diff --git a/server/server.go b/server/server.go index ff008014..a14ffa66 100644 --- a/server/server.go +++ b/server/server.go @@ -57,8 +57,9 @@ func New(opts ...Option) (*Server, error) { } control, err := control.NewServer(control.Config{ - ClientsIngress: cfg.clientsIngresses, - ClientsAuth: cfg.clientsAuth, + ClientsIngress: cfg.clientsIngresses, + ClientsAuth: cfg.clientsAuth, + ClientsEndpointExpiry: cfg.clientsEndpointExpiry, RelaysIngress: []control.Ingress{{ Addr: relaysAddr, From 8d0e30a03b78247fee8199725b98f8525526888a Mon Sep 17 00:00:00 2001 From: Nikolay Petrov Date: Wed, 25 Feb 2026 17:31:36 -0500 Subject: [PATCH 2/4] delete immedately; post expiry based on latest change --- server/control/clients.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/server/control/clients.go b/server/control/clients.go index 6b387fb0..4bf573a1 100644 --- a/server/control/clients.go +++ b/server/control/clients.go @@ -84,6 +84,14 @@ func newClientServer( peersCache := map[peerKey][]peerValue{} for _, msg := range peersMsgs { + if endpointExpiry == 0 { + // Expiry disabled — delete stale peers immediately + if err := peers.Del(msg.Key); err != nil { + return nil, fmt.Errorf("delete stale peer: %w", err) + } + continue + } + // Add ALL peers to cache (they remain visible during grace period) key := peerKey{msg.Key.Endpoint, msg.Key.Role} peersCache[key] = append(peersCache[key], peerValue{msg.Key.ConnID, &pbclient.RemotePeer{ @@ -188,9 +196,15 @@ func (s *clientServer) announce(endpoint model.Endpoint, role model.Role, id Cli return s.peers.Put(ClientPeerKey{endpoint, role, id, connID}, ClientPeerValue{Peer: peer, Metadata: metadata}) } -func (s *clientServer) expire(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID, metadata string, peer *pbclient.Peer) error { +func (s *clientServer) expire(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID) error { + key := ClientPeerKey{endpoint, role, id, connID} + val, err := s.peers.Get(key) + if err != nil { + return fmt.Errorf("get peer for expire: %w", err) + } now := time.Now() - return s.peers.Put(ClientPeerKey{endpoint, role, id, connID}, ClientPeerValue{Peer: peer, Metadata: metadata, ExpiredAt: &now}) + val.ExpiredAt = &now + return s.peers.Put(key, val) } func (s *clientServer) revoke(endpoint model.Endpoint, role model.Role, id ClientID, connID ConnID) error { @@ -657,7 +671,7 @@ func (s *clientStream) announce(ctx context.Context, req *pbclient.Request_Annou defer func() { if s.conn.server.endpointExpiry > 0 && s.conn.conn.Context().Err() != nil { // Connection dead — mark as expired, consumer will delete after timeout - if err := s.conn.server.expire(endpoint, role, s.conn.id, s.conn.connID, s.conn.metadata, req.Peer); err != nil { + if err := s.conn.server.expire(endpoint, role, s.conn.id, s.conn.connID); err != nil { s.conn.logger.Warn("failed to expire peer", "id", s.conn.id, "err", err) } return From baa7713522891a19553ae6ee4a4c3bad8ff855cd Mon Sep 17 00:00:00 2001 From: Nikolay Petrov Date: Wed, 25 Feb 2026 18:05:54 -0500 Subject: [PATCH 3/4] cleanup --- cmd/connet/control.go | 5 +++-- server/control/clients.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/connet/control.go b/cmd/connet/control.go index a6ef3ad3..15447414 100644 --- a/cmd/connet/control.go +++ b/cmd/connet/control.go @@ -124,8 +124,7 @@ func controlRun(ctx context.Context, cfg ControlConfig, logger *slog.Logger) err var err error controlCfg := control.Config{ - ClientsEndpointExpiry: resolveEndpointExpiry(cfg.EndpointExpiryDisable, cfg.EndpointExpiryTimeout), - Logger: logger, + Logger: logger, } var usedClientsDefault bool @@ -153,6 +152,8 @@ func controlRun(ctx context.Context, cfg ControlConfig, logger *slog.Logger) err return err } + controlCfg.ClientsEndpointExpiry = resolveEndpointExpiry(cfg.EndpointExpiryDisable, cfg.EndpointExpiryTimeout) + var usedRelaysDefault bool for ix, ingressCfg := range cfg.RelaysIngresses { if ingressCfg.Addr == "" && !usedRelaysDefault { diff --git a/server/control/clients.go b/server/control/clients.go index 4bf573a1..fd46fa1a 100644 --- a/server/control/clients.go +++ b/server/control/clients.go @@ -678,7 +678,7 @@ func (s *clientStream) announce(ctx context.Context, req *pbclient.Request_Annou } // Connection alive or feature disabled — revoke immediately if err := s.conn.server.revoke(endpoint, role, s.conn.id, s.conn.connID); err != nil { - s.conn.logger.Warn("failed to revoke client", "id", s.conn.id, "err", err) + s.conn.logger.Warn("failed to revoke peer", "id", s.conn.id, "err", err) } }() From 25a7643747d50686d632611376ca473ca0cffa1d Mon Sep 17 00:00:00 2001 From: Nikolay Petrov Date: Fri, 27 Feb 2026 06:58:52 -0500 Subject: [PATCH 4/4] update readme --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 18e39c49..e0d0afdb 100644 --- a/README.md +++ b/README.md @@ -213,6 +213,9 @@ tokens-file = "path/to/client/tokens" # file that contains a list of client auth tokens = ["client-token-1", "client-token-n"] # set of recognized client auth tokens # one of tokens or tokens-file is required +endpoint-expiry-disable = false # disable keeping endpoint registrations alive after client disconnect (default false) +endpoint-expiry-timeout = "30s" # how long to keep endpoint registrations after client disconnect (default '30s') + status-addr = "127.0.0.1:19180" # address to listen for incoming status connections (TCP/HTTP, [host]:port) (disabled by default) store-dir = "path/to/server-store" # directory for this server to persist runtime information, see Storage section for more info @@ -247,6 +250,9 @@ clients-tokens-file = "path/to/client/tokens" # file containing a list of client clients-tokens = ["client-token-1", "client-token-n"] # list of recognized client auth tokens # one of client-tokens-file or client-tokens is required +endpoint-expiry-disable = false # disable keeping endpoint registrations alive after client disconnect (default false) +endpoint-expiry-timeout = "30s" # how long to keep endpoint registrations after client disconnect (default '30s') + relays-tokens-file = "path/to/relay/token" # file containing a list of relay auth tokens, one token per line relays-tokens = ["relay-token-1", "relay-token-n"] # list of recognized relay auth tokens # one of relay-tokens or relay-tokens-file is required if connecting relays @@ -587,7 +593,6 @@ by adding account management and it is one of the easiest ways to start. ## Planlog ### Next - - [ ] do not discard client/peer/endpoint info immediately - [ ] controlled server shutdown - [ ] peer identity and support for options in p2p - [ ] raw endpoint protocols @@ -607,6 +612,9 @@ by adding account management and it is one of the easiest ways to start. ## Changelog +### v0.15.0 + - [x] endpoint expiry: keep endpoint registrations alive for a grace period after client disconnect to smooth out reconnects + ### v0.14.0 - [x] rewrite relay to not depend on control connection - [x] complete migration to relay v2