From a114059759ec1d70ce04acfce028da54428689a9 Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 18:48:03 +0000 Subject: [PATCH 01/46] Add RootPair() and serialization routines to userns idmap Signed-off-by: Henry Wang --- internal/cri/server/container_start_linux.go | 5 +- internal/userns/idmap.go | 79 +++++- internal/userns/idmap_test.go | 240 +++++++++++++++++++ 3 files changed, 321 insertions(+), 3 deletions(-) diff --git a/internal/cri/server/container_start_linux.go b/internal/cri/server/container_start_linux.go index 0f0a49cf0ce1..048b61e759b4 100644 --- a/internal/cri/server/container_start_linux.go +++ b/internal/cri/server/container_start_linux.go @@ -48,10 +48,11 @@ func updateContainerIOOwner(ctx context.Context, cntr containerd.Container, conf return nil, fmt.Errorf("invalid linux platform oci runtime spec") } - hostID, err := userns.IDMap{ + idMap := userns.IDMap{ UidMap: spec.Linux.UIDMappings, GidMap: spec.Linux.GIDMappings, - }.ToHost(userns.User{ + } + hostID, err := idMap.ToHost(userns.User{ Uid: spec.Process.User.UID, Gid: spec.Process.User.GID, }) diff --git a/internal/userns/idmap.go b/internal/userns/idmap.go index e547419a89d3..71d3bd89d50d 100644 --- a/internal/userns/idmap.go +++ b/internal/userns/idmap.go @@ -24,6 +24,7 @@ package userns import ( "errors" "fmt" + "strings" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -48,8 +49,21 @@ type IDMap struct { GidMap []specs.LinuxIDMapping `json:"GidMap"` } +// RootPair returns the ID pair for the root user +func (i *IDMap) RootPair() (User, error) { + uid, err := toHost(0, i.UidMap) + if err != nil { + return invalidUser, err + } + gid, err := toHost(0, i.GidMap) + if err != nil { + return invalidUser, err + } + return User{Uid: uid, Gid: gid}, nil +} + // ToHost returns the host user ID pair for the container ID pair. -func (i IDMap) ToHost(pair User) (User, error) { +func (i *IDMap) ToHost(pair User) (User, error) { var ( target User err error @@ -65,6 +79,45 @@ func (i IDMap) ToHost(pair User) (User, error) { return target, nil } +// Marshal serializes the IDMap object into two strings: +// one uidmap list and another one for gidmap list +func (i *IDMap) Marshal() (string, string) { + marshal := func(mappings []specs.LinuxIDMapping) string { + var arr []string + for _, m := range mappings { + arr = append(arr, serializeLinuxIDMapping(m)) + } + return strings.Join(arr, ",") + } + return marshal(i.UidMap), marshal(i.GidMap) +} + +// Unmarshal deserialize the passed uidmap and gidmap strings +// into a IDMap object. Error is returned in case of failure +func (i *IDMap) Unmarshal(uidMap, gidMap string) error { + unmarshal := func(str string, fn func(m specs.LinuxIDMapping)) error { + if len(str) == 0 { + return nil + } + for _, mapping := range strings.Split(str, ",") { + m, err := deserializeLinuxIDMapping(mapping) + if err != nil { + return err + } + fn(m) + } + return nil + } + if err := unmarshal(uidMap, func(m specs.LinuxIDMapping) { + i.UidMap = append(i.UidMap, m) + }); err != nil { + return err + } + return unmarshal(gidMap, func(m specs.LinuxIDMapping) { + i.GidMap = append(i.GidMap, m) + }) +} + // toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # @@ -96,3 +149,27 @@ func safeSum(x, y uint32) (uint32, error) { } return z, nil } + +// serializeLinuxIDMapping marshals a LinuxIDMapping object to string +func serializeLinuxIDMapping(m specs.LinuxIDMapping) string { + return fmt.Sprintf("%d:%d:%d", m.ContainerID, m.HostID, m.Size) +} + +// deserializeLinuxIDMapping unmarshals a string to a LinuxIDMapping object +func deserializeLinuxIDMapping(str string) (specs.LinuxIDMapping, error) { + var ( + hostID, ctrID, length int64 + ) + _, err := fmt.Sscanf(str, "%d:%d:%d", &ctrID, &hostID, &length) + if err != nil { + return specs.LinuxIDMapping{}, fmt.Errorf("input value %s unparsable: %w", str, err) + } + if ctrID < 0 || ctrID >= invalidID || hostID < 0 || hostID >= invalidID || length < 0 || length >= invalidID { + return specs.LinuxIDMapping{}, fmt.Errorf("invalid mapping \"%s\"", str) + } + return specs.LinuxIDMapping{ + ContainerID: uint32(ctrID), + HostID: uint32(hostID), + Size: uint32(length), + }, nil +} diff --git a/internal/userns/idmap_test.go b/internal/userns/idmap_test.go index 30375ad65eab..54de2b89d7f6 100644 --- a/internal/userns/idmap_test.go +++ b/internal/userns/idmap_test.go @@ -23,6 +23,56 @@ import ( "github.com/stretchr/testify/assert" ) +func TestRootPair(t *testing.T) { + for _, test := range []struct { + idmap IDMap + expUID uint32 + expGID uint32 + expErr bool + }{ + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 100, + Size: 1, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 200, + Size: 1, + }, + }, + }, + expUID: 100, + expGID: 200, + expErr: false, + }, + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 1, + HostID: 100, + Size: 1, + }, + }, + }, + expUID: invalidID, + expGID: invalidID, + expErr: true, + }, + } { + user, err := test.idmap.RootPair() + assert.Equal(t, test.expErr, err != nil) + assert.Equal(t, test.expUID, user.Uid) + assert.Equal(t, test.expGID, user.Gid) + } +} + func TestToHost(t *testing.T) { idmap := IDMap{ UidMap: []specs.LinuxIDMapping{ @@ -250,3 +300,193 @@ func TestToHostOverflow(t *testing.T) { assert.Equal(t, r, invalidUser) } } + +func TestMarshal(t *testing.T) { + for _, test := range []struct { + idmap IDMap + expUID string + expGID string + }{ + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + { + ContainerID: 1000, + HostID: 20000, + Size: 10000, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 1000, + Size: 10000, + }, + }, + }, + expUID: "0:1000:10000,1000:20000:10000", + expGID: "0:1000:1000,1000:1000:10000", + }, + { + idmap: IDMap{ + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + }, + }, + expUID: "", + expGID: "0:1000:10000", + }, + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + }, + }, + expUID: "0:1000:10000", + expGID: "", + }, + { + idmap: IDMap{}, + expUID: "", + expGID: "", + }, + } { + uid, gid := test.idmap.Marshal() + assert.Equal(t, test.expUID, uid) + assert.Equal(t, test.expGID, gid) + } +} + +func TestUnmarshal(t *testing.T) { + for _, test := range []struct { + idmap IDMap + uid string + gid string + expErr bool + }{ + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + { + ContainerID: 1000, + HostID: 20000, + Size: 10000, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 1000, + Size: 10000, + }, + }, + }, + uid: "0:1000:10000,1000:20000:10000", + gid: "0:1000:1000,1000:1000:10000", + expErr: false, + }, + { + idmap: IDMap{ + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + }, + }, + uid: "", + gid: "0:1000:10000", + expErr: false, + }, + { + idmap: IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 1000, + Size: 10000, + }, + }, + }, + uid: "0:1000:10000", + gid: "", + expErr: false, + }, + { + idmap: IDMap{}, + uid: "0-0-0", + gid: "0-0-0", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "-1:1000:10000", + gid: "", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "0:1000:", + gid: "", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "0:1000", + gid: "", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "1:4294967295:10000", + gid: "", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "", + gid: "1:1:4294967295", + expErr: true, + }, + { + idmap: IDMap{}, + uid: "", + gid: "", + expErr: false, + }, + } { + var idmap IDMap + err := idmap.Unmarshal(test.uid, test.gid) + assert.Equal(t, test.expErr, err != nil) + assert.Equal(t, test.idmap, idmap) + } +} From 168ec21dbd6254088a47257d1a44812155d6d54c Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 18:51:30 +0000 Subject: [PATCH 02/46] Update idmapped mount to support multiple uid/gid mappings Signed-off-by: Henry Wang --- core/mount/mount_idmapped_linux.go | 52 ++++++++++++++++--------- core/mount/mount_idmapped_linux_test.go | 10 +++++ 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/core/mount/mount_idmapped_linux.go b/core/mount/mount_idmapped_linux.go index d929157d76f8..e152f7ab721b 100644 --- a/core/mount/mount_idmapped_linux.go +++ b/core/mount/mount_idmapped_linux.go @@ -26,39 +26,55 @@ import ( "golang.org/x/sys/unix" ) -// TODO: Support multiple mappings in future -func parseIDMapping(mapping string) ([]syscall.SysProcIDMap, error) { +func parseIDMapping(mapping string) (syscall.SysProcIDMap, error) { + var retval syscall.SysProcIDMap + parts := strings.Split(mapping, ":") if len(parts) != 3 { - return nil, fmt.Errorf("user namespace mappings require the format `container-id:host-id:size`") + return retval, fmt.Errorf("user namespace mappings require the format `container-id:host-id:size`") } cID, err := strconv.Atoi(parts[0]) if err != nil { - return nil, fmt.Errorf("invalid container id for user namespace remapping, %w", err) + return retval, fmt.Errorf("invalid container id for user namespace remapping, %w", err) } hID, err := strconv.Atoi(parts[1]) if err != nil { - return nil, fmt.Errorf("invalid host id for user namespace remapping, %w", err) + return retval, fmt.Errorf("invalid host id for user namespace remapping, %w", err) } size, err := strconv.Atoi(parts[2]) if err != nil { - return nil, fmt.Errorf("invalid size for user namespace remapping, %w", err) + return retval, fmt.Errorf("invalid size for user namespace remapping, %w", err) } if cID < 0 || hID < 0 || size < 0 { - return nil, fmt.Errorf("invalid mapping %s, all IDs and size must be positive integers", mapping) + return retval, fmt.Errorf("invalid mapping %s, all IDs and size must be positive integers", mapping) + } + + retval = syscall.SysProcIDMap{ + ContainerID: cID, + HostID: hID, + Size: size, } - return []syscall.SysProcIDMap{ - { - ContainerID: cID, - HostID: hID, - Size: size, - }, - }, nil + return retval, nil +} + +func parseIDMappingList(mappings string) ([]syscall.SysProcIDMap, error) { + var ( + res []syscall.SysProcIDMap + maplist = strings.Split(mappings, ",") + ) + for _, m := range maplist { + r, err := parseIDMapping(m) + if err != nil { + return nil, err + } + res = append(res, r) + } + return res, nil } // IDMapMount applies GID/UID shift according to gidmap/uidmap for target path @@ -88,15 +104,15 @@ func IDMapMount(source, target string, usernsFd int) (err error) { return nil } -// GetUsernsFD forks the current process and creates a user namespace using -// the specified mappings. +// GetUsernsFD forks the current process and creates a user namespace using the specified mappings. +// Expected syntax of ID mapping parameter is "%d:%d:%d[,%d:%d:%d,...]" func GetUsernsFD(uidmap, gidmap string) (_usernsFD *os.File, _ error) { - uidMaps, err := parseIDMapping(uidmap) + uidMaps, err := parseIDMappingList(uidmap) if err != nil { return nil, err } - gidMaps, err := parseIDMapping(gidmap) + gidMaps, err := parseIDMappingList(gidmap) if err != nil { return nil, err } diff --git a/core/mount/mount_idmapped_linux_test.go b/core/mount/mount_idmapped_linux_test.go index 90e3a61e82ab..5b8b295daa87 100644 --- a/core/mount/mount_idmapped_linux_test.go +++ b/core/mount/mount_idmapped_linux_test.go @@ -97,6 +97,11 @@ func testGetUsernsFD(t *testing.T) { gidMaps: "0:1000:100", hasErr: false, }, + { + uidMaps: "0:1000:100,100:2000:200", + gidMaps: "0:1000:100,100:2000:200", + hasErr: false, + }, { uidMaps: "100:1000:100", gidMaps: "0:-1:100", @@ -112,6 +117,11 @@ func testGetUsernsFD(t *testing.T) { gidMaps: "0:1000:-1", hasErr: true, }, + { + uidMaps: "100:1000:100", + gidMaps: "0:1000:-1,100:1000:100", + hasErr: true, + }, } { t.Run(fmt.Sprintf("#%v", idx), func(t *testing.T) { _, err := GetUsernsFD(tc.uidMaps, tc.gidMaps) From 8a030d6537e42194cca894ebf89556af09dfade8 Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 19:27:49 +0000 Subject: [PATCH 03/46] Update overlay snapshotter to support multiple uid/gid mappings Signed-off-by: Henry Wang --- plugins/snapshots/overlay/overlay.go | 63 +-- plugins/snapshots/overlay/overlay_test.go | 443 ++++++++++++++-------- 2 files changed, 314 insertions(+), 192 deletions(-) diff --git a/plugins/snapshots/overlay/overlay.go b/plugins/snapshots/overlay/overlay.go index a0ea673ae24a..80535ba823ad 100644 --- a/plugins/snapshots/overlay/overlay.go +++ b/plugins/snapshots/overlay/overlay.go @@ -29,6 +29,7 @@ import ( "github.com/containerd/containerd/v2/core/mount" "github.com/containerd/containerd/v2/core/snapshots" "github.com/containerd/containerd/v2/core/snapshots/storage" + "github.com/containerd/containerd/v2/internal/userns" "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils" "github.com/containerd/continuity/fs" "github.com/containerd/log" @@ -424,41 +425,6 @@ func (o *snapshotter) getCleanupDirectories(ctx context.Context) ([]string, erro return cleanup, nil } -func validateIDMapping(mapping string) error { - var ( - hostID int - ctrID int - length int - ) - - if _, err := fmt.Sscanf(mapping, "%d:%d:%d", &ctrID, &hostID, &length); err != nil { - return err - } - // Almost impossible, but snapshots.WithLabels doesn't check it - if ctrID < 0 || hostID < 0 || length < 0 { - return fmt.Errorf("invalid mapping \"%d:%d:%d\"", ctrID, hostID, length) - } - if ctrID != 0 { - return fmt.Errorf("container mapping of 0 is only supported") - } - return nil -} - -func hostID(mapping string) (int, error) { - var ( - hostID int - ctrID int - length int - ) - if err := validateIDMapping(mapping); err != nil { - return -1, fmt.Errorf("invalid mapping: %w", err) - } - if _, err := fmt.Sscanf(mapping, "%d:%d:%d", &ctrID, &hostID, &length); err != nil { - return -1, err - } - return hostID, nil -} - func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { var ( s storage.Snapshot @@ -499,22 +465,35 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return fmt.Errorf("failed to get snapshot info: %w", err) } - mappedUID := -1 - mappedGID := -1 + var ( + mappedUID, mappedGID = -1, -1 + uidmapLabel, gidmapLabel string + needsRemap = false + ) // NOTE: if idmapped mounts' supported by hosted kernel there may be // no parents at all, so overlayfs will not work and snapshotter // will use bind mount. To be able to create file objects inside the // rootfs -- just chown this only bound directory according to provided // {uid,gid}map. In case of one/multiple parents -- chown upperdir. if v, ok := info.Labels[snapshots.LabelSnapshotUIDMapping]; ok { - if mappedUID, err = hostID(v); err != nil { - return fmt.Errorf("failed to parse UID mapping: %w", err) - } + uidmapLabel = v + needsRemap = true } if v, ok := info.Labels[snapshots.LabelSnapshotGIDMapping]; ok { - if mappedGID, err = hostID(v); err != nil { - return fmt.Errorf("failed to parse GID mapping: %w", err) + gidmapLabel = v + needsRemap = true + } + + if needsRemap { + var idMap userns.IDMap + if err = idMap.Unmarshal(uidmapLabel, gidmapLabel); err != nil { + return fmt.Errorf("failed to unmarshal snapshot ID mapped labels: %w", err) + } + root, err := idMap.RootPair() + if err != nil { + return fmt.Errorf("failed to find root pair: %w", err) } + mappedUID, mappedGID = int(root.Uid), int(root.Gid) } if mappedUID == -1 || mappedGID == -1 { diff --git a/plugins/snapshots/overlay/overlay_test.go b/plugins/snapshots/overlay/overlay_test.go index 5cb6bc30a49a..08a8c83d7c51 100644 --- a/plugins/snapshots/overlay/overlay_test.go +++ b/plugins/snapshots/overlay/overlay_test.go @@ -31,6 +31,7 @@ import ( "github.com/containerd/containerd/v2/core/snapshots" "github.com/containerd/containerd/v2/core/snapshots/storage" "github.com/containerd/containerd/v2/core/snapshots/testsuite" + "github.com/containerd/containerd/v2/internal/userns" "github.com/containerd/containerd/v2/pkg/testutil" "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils" "github.com/opencontainers/runtime-spec/specs-go" @@ -199,186 +200,292 @@ func testOverlayOverlayMount(t *testing.T, newSnapshotter testsuite.SnapshotterF } func testOverlayRemappedBind(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { - var ( - opts []snapshots.Opt - mounts []mount.Mount - ) + for _, test := range []struct { + idMap userns.IDMap + snapOpt func(idMap userns.IDMap) snapshots.Opt + expUID uint32 + expGID uint32 + expUIDMntOpt string + expGIDMntOpt string + }{ + { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 65536, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 65536, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithRemapperLabels( + idMap.UidMap[0].ContainerID, idMap.UidMap[0].HostID, + idMap.GidMap[0].ContainerID, idMap.GidMap[0].HostID, + idMap.UidMap[0].Size, + ) + }, + expUID: 666, + expGID: 666, + expUIDMntOpt: "uidmap=0:666:65536", + expGIDMntOpt: "gidmap=0:666:65536", + }, + { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 6666, + Size: 64536, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 888, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 8888, + Size: 64536, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithUserNSRemapperLabels(idMap.UidMap, idMap.GidMap) + }, + expUID: 666, + expGID: 888, + expUIDMntOpt: "uidmap=0:666:1000,1000:6666:64536", + expGIDMntOpt: "gidmap=0:888:1000,1000:8888:64536", + }, + } { + var ( + opts []snapshots.Opt + mounts []mount.Mount + ) + + ctx := context.TODO() + root := t.TempDir() + o, _, err := newSnapshotter(ctx, root) + if err != nil { + t.Fatal(err) + } - ctx := context.TODO() - root := t.TempDir() - o, _, err := newSnapshotter(ctx, root) - if err != nil { - t.Fatal(err) - } + if sn, ok := o.(*snapshotter); !ok || !sn.remapIDs { + t.Skip("overlayfs doesn't support idmapped mounts") + } - if sn, ok := o.(*snapshotter); !ok || !sn.remapIDs { - t.Skip("overlayfs doesn't support idmapped mounts") - } + opts = append(opts, test.snapOpt(test.idMap)) - hostID := uint32(666) - contID := uint32(0) - length := uint32(65536) + key := "/tmp/test" + if mounts, err = o.Prepare(ctx, key, "", opts...); err != nil { + t.Fatal(err) + } - uidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - gidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - opts = append(opts, containerd.WithRemapperLabels( - uidMap.ContainerID, uidMap.HostID, - gidMap.ContainerID, gidMap.HostID, - length), - ) + bp := getBasePath(ctx, o, root, key) + expected := []string{test.expUIDMntOpt, test.expGIDMntOpt, "rw", "rbind"} - key := "/tmp/test" - if mounts, err = o.Prepare(ctx, key, "", opts...); err != nil { - t.Fatal(err) - } + checkMountOpts := func() { + if len(mounts) != 1 { + t.Errorf("should only have 1 mount but received %d", len(mounts)) + } - bp := getBasePath(ctx, o, root, key) - expected := []string{ - fmt.Sprintf("uidmap=%d:%d:%d", uidMap.ContainerID, uidMap.HostID, uidMap.Size), - fmt.Sprintf("gidmap=%d:%d:%d", gidMap.ContainerID, gidMap.HostID, gidMap.Size), - "rw", - "rbind", - } + if len(mounts[0].Options) != len(expected) { + t.Errorf("expected %d options, but received %d", len(expected), len(mounts[0].Options)) + } - checkMountOpts := func() { - if len(mounts) != 1 { - t.Errorf("should only have 1 mount but received %d", len(mounts)) - } + m := mounts[0] + for i, v := range expected { + if m.Options[i] != v { + t.Errorf("mount option %q is not valid, expected %q", m.Options[i], v) + } + } - if len(mounts[0].Options) != len(expected) { - t.Errorf("expected %d options, but received %d", len(expected), len(mounts[0].Options)) - } + st, err := os.Stat(filepath.Join(bp, "fs")) + if err != nil { + t.Errorf("failed to stat %s", filepath.Join(bp, "fs")) + } - m := mounts[0] - for i, v := range expected { - if m.Options[i] != v { - t.Errorf("mount option %q is not valid, expected %q", m.Options[i], v) + if stat, ok := st.Sys().(*syscall.Stat_t); !ok { + t.Errorf("incompatible types after stat call: *syscall.Stat_t expected") + } else if stat.Uid != test.expUID || stat.Gid != test.expGID { + t.Errorf("bad mapping: expected {uid: %d, gid: %d}; real {uid: %d, gid: %d}", test.expUID, test.expGID, int(stat.Uid), int(stat.Gid)) } } + checkMountOpts() - st, err := os.Stat(filepath.Join(bp, "fs")) - if err != nil { - t.Errorf("failed to stat %s", filepath.Join(bp, "fs")) + expected[2] = "ro" + if err = o.Commit(ctx, "base", key, opts...); err != nil { + t.Fatal(err) } - - if stat, ok := st.Sys().(*syscall.Stat_t); !ok { - t.Errorf("incompatible types after stat call: *syscall.Stat_t expected") - } else if stat.Uid != uidMap.HostID || stat.Gid != gidMap.HostID { - t.Errorf("bad mapping: expected {uid: %d, gid: %d}; real {uid: %d, gid: %d}", uidMap.HostID, gidMap.HostID, int(stat.Uid), int(stat.Gid)) + if mounts, err = o.View(ctx, key, "base", opts...); err != nil { + t.Fatal(err) } - } - checkMountOpts() + bp = getBasePath(ctx, o, root, key) + checkMountOpts() - expected[2] = "ro" - if err = o.Commit(ctx, "base", key, opts...); err != nil { - t.Fatal(err) - } - if mounts, err = o.View(ctx, key, "base", opts...); err != nil { - t.Fatal(err) - } - bp = getBasePath(ctx, o, root, key) - checkMountOpts() - - key = "/tmp/test1" - if mounts, err = o.Prepare(ctx, key, ""); err != nil { - t.Fatal(err) - } + key = "/tmp/test1" + if mounts, err = o.Prepare(ctx, key, ""); err != nil { + t.Fatal(err) + } - bp = getBasePath(ctx, o, root, key) + bp = getBasePath(ctx, o, root, key) - expected = expected[2:] - expected[0] = "rw" + expected = expected[2:] + expected[0] = "rw" - uidMap.HostID = 0 - gidMap.HostID = 0 + test.expUID = 0 + test.expGID = 0 - checkMountOpts() + checkMountOpts() + } } func testOverlayRemappedActive(t *testing.T, newSnapshotter testsuite.SnapshotterFunc) { - var ( - opts []snapshots.Opt - mounts []mount.Mount - ) - - ctx := context.TODO() - root := t.TempDir() - o, _, err := newSnapshotter(ctx, root) - if err != nil { - t.Fatal(err) - } - - if sn, ok := o.(*snapshotter); !ok || !sn.remapIDs { - t.Skip("overlayfs doesn't support idmapped mounts") - } + for _, test := range []struct { + idMap userns.IDMap + snapOpt func(idMap userns.IDMap) snapshots.Opt + expUID uint32 + expGID uint32 + expUIDMntOpt string + expGIDMntOpt string + }{ + { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 65536, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 65536, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithRemapperLabels( + idMap.UidMap[0].ContainerID, idMap.UidMap[0].HostID, + idMap.GidMap[0].ContainerID, idMap.GidMap[0].HostID, + idMap.UidMap[0].Size, + ) + }, + expUID: 666, + expGID: 666, + expUIDMntOpt: "uidmap=0:666:65536", + expGIDMntOpt: "gidmap=0:666:65536", + }, + { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 666, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 6666, + Size: 64536, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 888, + Size: 1000, + }, + { + ContainerID: 1000, + HostID: 8888, + Size: 64536, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithUserNSRemapperLabels(idMap.UidMap, idMap.GidMap) + }, + expUID: 666, + expGID: 888, + expUIDMntOpt: "uidmap=0:666:1000,1000:6666:64536", + expGIDMntOpt: "gidmap=0:888:1000,1000:8888:64536", + }, + } { + var ( + opts []snapshots.Opt + mounts []mount.Mount + ) + + ctx := context.TODO() + root := t.TempDir() + o, _, err := newSnapshotter(ctx, root) + if err != nil { + t.Fatal(err) + } - hostID := uint32(666) - contID := uint32(0) - length := uint32(65536) + if sn, ok := o.(*snapshotter); !ok || !sn.remapIDs { + t.Skip("overlayfs doesn't support idmapped mounts") + } - uidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - gidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - opts = append(opts, containerd.WithRemapperLabels( - uidMap.ContainerID, uidMap.HostID, - gidMap.ContainerID, gidMap.HostID, - length), - ) + opts = append(opts, test.snapOpt(test.idMap)) - key := "/tmp/test" - if _, err = o.Prepare(ctx, key, "", opts...); err != nil { - t.Fatal(err) - } - if err = o.Commit(ctx, "base", key, opts...); err != nil { - t.Fatal(err) - } - if mounts, err = o.Prepare(ctx, key, "base", opts...); err != nil { - t.Fatal(err) - } + key := "/tmp/test" + if _, err = o.Prepare(ctx, key, "", opts...); err != nil { + t.Fatal(err) + } + if err = o.Commit(ctx, "base", key, opts...); err != nil { + t.Fatal(err) + } + if mounts, err = o.Prepare(ctx, key, "base", opts...); err != nil { + t.Fatal(err) + } - if len(mounts) != 1 { - t.Errorf("should only have 1 mount but received %d", len(mounts)) - } + if len(mounts) != 1 { + t.Errorf("should only have 1 mount but received %d", len(mounts)) + } - bp := getBasePath(ctx, o, root, key) - expected := []string{ - fmt.Sprintf("uidmap=%d:%d:%d", uidMap.ContainerID, uidMap.HostID, uidMap.Size), - fmt.Sprintf("gidmap=%d:%d:%d", gidMap.ContainerID, gidMap.HostID, gidMap.Size), - fmt.Sprintf("workdir=%s", filepath.Join(bp, "work")), - fmt.Sprintf("upperdir=%s", filepath.Join(bp, "fs")), - fmt.Sprintf("lowerdir=%s", getParents(ctx, o, root, key)[0]), - } + bp := getBasePath(ctx, o, root, key) + expected := []string{ + test.expUIDMntOpt, test.expGIDMntOpt, + fmt.Sprintf("workdir=%s", filepath.Join(bp, "work")), + fmt.Sprintf("upperdir=%s", filepath.Join(bp, "fs")), + fmt.Sprintf("lowerdir=%s", getParents(ctx, o, root, key)[0]), + } - m := mounts[0] - for i, v := range expected { - if m.Options[i] != v { - t.Errorf("mount option %q is invalid, expected %q", m.Options[i], v) + m := mounts[0] + for i, v := range expected { + if m.Options[i] != v { + t.Errorf("mount option %q is invalid, expected %q", m.Options[i], v) + } } - } - st, err := os.Stat(filepath.Join(bp, "fs")) - if err != nil { - t.Errorf("failed to stat %s", filepath.Join(bp, "fs")) - } - if stat, ok := st.Sys().(*syscall.Stat_t); !ok { - t.Errorf("incompatible types after stat call: *syscall.Stat_t expected") - } else if stat.Uid != uidMap.HostID || stat.Gid != gidMap.HostID { - t.Errorf("bad mapping: expected {uid: %d, gid: %d}; received {uid: %d, gid: %d}", uidMap.HostID, gidMap.HostID, int(stat.Uid), int(stat.Gid)) + st, err := os.Stat(filepath.Join(bp, "fs")) + if err != nil { + t.Errorf("failed to stat %s", filepath.Join(bp, "fs")) + } + if stat, ok := st.Sys().(*syscall.Stat_t); !ok { + t.Errorf("incompatible types after stat call: *syscall.Stat_t expected") + } else if stat.Uid != test.expUID || stat.Gid != test.expGID { + t.Errorf("bad mapping: expected {uid: %d, gid: %d}; received {uid: %d, gid: %d}", test.expUID, test.expGID, int(stat.Uid), int(stat.Gid)) + } } } @@ -414,6 +521,24 @@ func testOverlayRemappedInvalidMapping(t *testing.T, newSnapshotter testsuite.Sn snapshots.LabelSnapshotGIDMapping: "-666:-666:-666", }), }, + "WithLabels: negative UID in multiple mappings must fail": { + snapshots.WithLabels(map[string]string{ + snapshots.LabelSnapshotUIDMapping: "1:1:1,-1:-1:-2", + snapshots.LabelSnapshotGIDMapping: "0:0:66666", + }), + }, + "WithLabels: negative GID in multiple mappings must fail": { + snapshots.WithLabels(map[string]string{ + snapshots.LabelSnapshotUIDMapping: "0:0:66666", + snapshots.LabelSnapshotGIDMapping: "-1:-1:-2,6:6:6", + }), + }, + "WithLabels: negative GID/UID in multiple mappings must fail": { + snapshots.WithLabels(map[string]string{ + snapshots.LabelSnapshotUIDMapping: "-666:-666:-666,1:1:1", + snapshots.LabelSnapshotGIDMapping: "-666:-666:-666,2:2:2", + }), + }, "WithRemapperLabels: container ID (GID/UID) other than 0 must fail": { containerd.WithRemapperLabels(666, 666, 666, 666, 666), }, @@ -423,6 +548,24 @@ func testOverlayRemappedInvalidMapping(t *testing.T, newSnapshotter testsuite.Sn "WithRemapperLabels: container ID (GID) other than 0 must fail": { containerd.WithRemapperLabels(0, 0, 666, 0, 4294967295), }, + "WithUserNSRemapperLabels: container ID (GID/UID) other than 0 must fail": { + containerd.WithUserNSRemapperLabels( + []specs.LinuxIDMapping{{ContainerID: 666, HostID: 666, Size: 666}}, + []specs.LinuxIDMapping{{ContainerID: 666, HostID: 666, Size: 666}}, + ), + }, + "WithUserNSRemapperLabels: container ID (UID) other than 0 must fail": { + containerd.WithUserNSRemapperLabels( + []specs.LinuxIDMapping{{ContainerID: 666, HostID: 0, Size: 65536}}, + []specs.LinuxIDMapping{{ContainerID: 0, HostID: 0, Size: 65536}}, + ), + }, + "WithUserNSRemapperLabels: container ID (GID) other than 0 must fail": { + containerd.WithUserNSRemapperLabels( + []specs.LinuxIDMapping{{ContainerID: 0, HostID: 0, Size: 4294967295}}, + []specs.LinuxIDMapping{{ContainerID: 666, HostID: 0, Size: 4294967295}}, + ), + }, } { t.Log(desc) if _, err = o.Prepare(ctx, key, "", opts...); err == nil { From 8bbfb65289f3a32fd5358bf7419f8b860a08fbed Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 19:30:21 +0000 Subject: [PATCH 04/46] Update snapshotter opts to support multiple uid/gid mapping entries Signed-off-by: Henry Wang --- client/snapshotter_opts_unix.go | 41 ++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/client/snapshotter_opts_unix.go b/client/snapshotter_opts_unix.go index 5984b2176dbd..4e7cca6e858f 100644 --- a/client/snapshotter_opts_unix.go +++ b/client/snapshotter_opts_unix.go @@ -39,9 +39,24 @@ const ( // to shift the filesystem ownership (user namespace mapping) automatically; currently // supported by the fuse-overlayfs and overlay snapshotters func WithRemapperLabels(ctrUID, hostUID, ctrGID, hostGID, length uint32) snapshots.Opt { + uidMap := []specs.LinuxIDMapping{{ContainerID: ctrUID, HostID: hostUID, Size: length}} + gidMap := []specs.LinuxIDMapping{{ContainerID: ctrGID, HostID: hostGID, Size: length}} + return WithUserNSRemapperLabels(uidMap, gidMap) +} + +// WithUserNSRemapperLabels creates the labels used by any supporting snapshotter +// to shift the filesystem ownership (user namespace mapping) automatically; currently +// supported by the fuse-overlayfs and overlay snapshotters +func WithUserNSRemapperLabels(uidmaps, gidmaps []specs.LinuxIDMapping) snapshots.Opt { + idMap := userns.IDMap{ + UidMap: uidmaps, + GidMap: gidmaps, + } + uidmapLabel, gidmapLabel := idMap.Marshal() return snapshots.WithLabels(map[string]string{ - snapshots.LabelSnapshotUIDMapping: fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length), - snapshots.LabelSnapshotGIDMapping: fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length)}) + snapshots.LabelSnapshotUIDMapping: uidmapLabel, + snapshots.LabelSnapshotGIDMapping: gidmapLabel, + }) } func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName string, snapshotter snapshots.Snapshotter, parent string, opts ...snapshots.Opt) (string, error) { @@ -89,27 +104,15 @@ func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName return "", fmt.Errorf("snapshotter %q doesn't support idmap mounts on this host, configure `slow_chown` to allow a slower and expensive fallback", snapshotterName) } - var uidMap, gidMap specs.LinuxIDMapping - _, err = fmt.Sscanf(uidMapLabel, "%d:%d:%d", &uidMap.ContainerID, &uidMap.HostID, &uidMap.Size) - if err != nil { - return "", fmt.Errorf("uidMapLabel unparsable: %w", err) - } - _, err = fmt.Sscanf(gidMapLabel, "%d:%d:%d", &gidMap.ContainerID, &gidMap.HostID, &gidMap.Size) - if err != nil { - return "", fmt.Errorf("gidMapLabel unparsable: %w", err) + rsn := remappedSnapshot{Parent: parent} + if err = rsn.IDMap.Unmarshal(uidMapLabel, gidMapLabel); err != nil { + return "", fmt.Errorf("failed to unmarshal uid/gid map snapshotter labels: %w", err) } - if uidMap.ContainerID != 0 || gidMap.ContainerID != 0 { - return "", fmt.Errorf("Container UID/GID of 0 only supported currently (%d/%d)", uidMap.ContainerID, gidMap.ContainerID) + if _, err := rsn.IDMap.RootPair(); err != nil { + return "", fmt.Errorf("container UID/GID mapping entries of 0 are required but not found") } - rsn := remappedSnapshot{ - Parent: parent, - IDMap: userns.IDMap{ - UidMap: []specs.LinuxIDMapping{uidMap}, - GidMap: []specs.LinuxIDMapping{gidMap}, - }, - } usernsID, err := rsn.ID() if err != nil { return "", fmt.Errorf("failed to remap snapshot: %w", err) From ec231cdcf27b4bfad8fd51dbe4a3a328158aeb86 Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 19:31:58 +0000 Subject: [PATCH 05/46] Update ctr to support remapper labels with multiple uid/gid mapping entries Signed-off-by: Henry Wang --- cmd/ctr/commands/run/run_unix.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/ctr/commands/run/run_unix.go b/cmd/ctr/commands/run/run_unix.go index af58ed1c360a..492c19d733e4 100644 --- a/cmd/ctr/commands/run/run_unix.go +++ b/cmd/ctr/commands/run/run_unix.go @@ -175,11 +175,7 @@ func NewContainer(ctx context.Context, client *containerd.Client, cliContext *cl // fuse-overlayfs - https://github.com/containerd/fuse-overlayfs-snapshotter // overlay - in case of idmapped mount points are supported by host kernel (Linux kernel 5.19) if cliContext.Bool("remap-labels") { - // TODO: the optimization code path on id mapped mounts only supports single mapping entry today. - if len(uidSpec) > 1 || len(gidSpec) > 1 { - return nil, errors.New("'remap-labels' option does not support multiple mappings") - } - cOpts = append(cOpts, containerd.WithNewSnapshot(id, image, containerd.WithRemapperLabels(0, uidSpec[0].HostID, 0, gidSpec[0].HostID, uidSpec[0].Size))) + cOpts = append(cOpts, containerd.WithNewSnapshot(id, image, containerd.WithUserNSRemapperLabels(uidSpec, gidSpec))) } else { cOpts = append(cOpts, containerd.WithUserNSRemappedSnapshot(id, image, uidSpec, gidSpec)) } From ff0d99e02873ac04b4f73054d92d22683a501b7d Mon Sep 17 00:00:00 2001 From: Henry Wang Date: Wed, 13 Nov 2024 19:33:03 +0000 Subject: [PATCH 06/46] Add multiple uid/gid mapping test cases to integration tests Signed-off-by: Henry Wang --- .../client/container_idmapped_linux_test.go | 228 +++++++++++------- 1 file changed, 142 insertions(+), 86 deletions(-) diff --git a/integration/client/container_idmapped_linux_test.go b/integration/client/container_idmapped_linux_test.go index b7aa8a7efeae..f1f63468a446 100644 --- a/integration/client/container_idmapped_linux_test.go +++ b/integration/client/container_idmapped_linux_test.go @@ -23,101 +23,157 @@ import ( "testing" containerd "github.com/containerd/containerd/v2/client" + "github.com/containerd/containerd/v2/core/snapshots" + "github.com/containerd/containerd/v2/internal/userns" "github.com/containerd/containerd/v2/pkg/oci" "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils" "github.com/opencontainers/runtime-spec/specs-go" ) func TestIDMappedOverlay(t *testing.T) { - var ( - upperPath string - lowerPaths []string - snapshotter = "overlayfs" - ctx, cancel = testContext(t) - id = t.Name() - ) - defer cancel() - if ok, err := overlayutils.SupportsIDMappedMounts(); err != nil || !ok { t.Skip("overlayfs doesn't support idmapped mounts") } - client, err := newClient(t, address) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - image, err := client.Pull(ctx, testMultiLayeredImage, containerd.WithPullUnpack) - if err != nil { - t.Fatal(err) - } - t.Logf("image %s pulled!", testMultiLayeredImage) - - hostID := uint32(33) - contID := uint32(0) - length := uint32(65536) - - uidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - gidMap := specs.LinuxIDMapping{ - ContainerID: contID, - HostID: hostID, - Size: length, - } - - container, err := client.NewContainer(ctx, id, - containerd.WithImage(image), - containerd.WithImageConfigLabels(image), - containerd.WithSnapshotter(snapshotter), - containerd.WithNewSnapshot(id, image, containerd.WithRemapperLabels(uidMap.ContainerID, uidMap.HostID, gidMap.ContainerID, gidMap.HostID, length)), - containerd.WithNewSpec(oci.WithImageConfig(image), - oci.WithUserNamespace([]specs.LinuxIDMapping{uidMap}, []specs.LinuxIDMapping{gidMap}), - longCommand)) - if err != nil { - t.Fatal(err) - } - defer container.Delete(ctx, containerd.WithSnapshotCleanup) - - t.Logf("container %s created!", id) - o := client.SnapshotService(snapshotter) - mounts, err := o.Mounts(ctx, id) - if err != nil { - t.Fatal(err) - } - - m := mounts[0] - if m.Type != "overlay" { - t.Fatalf("invalid mount -- %s; expected %s", m.Type, snapshotter) - } - - for _, o := range m.Options { - if strings.HasPrefix(o, "upperdir=") { - upperPath = strings.TrimPrefix(o, "upperdir=") - } else if strings.HasPrefix(o, "lowerdir=") { - lowerPaths = strings.Split(strings.TrimPrefix(o, "lowerdir="), ",") - } - } - - t.Log("check lowerdirs") - for _, l := range lowerPaths { - if _, err := os.Stat(l); err == nil { - t.Fatalf("lowerdir=%s should not exist", l) - } - } - - t.Logf("check stats of uppedir=%s", upperPath) - st, err := os.Stat(upperPath) - if err != nil { - t.Fatalf("failed to stat %s", upperPath) - } - - if stat, ok := st.Sys().(*syscall.Stat_t); !ok { - t.Fatalf("incompatible types after stat call: *syscall.Stat_t expected") - } else if stat.Uid != uidMap.HostID || stat.Gid != gidMap.HostID { - t.Fatalf("bad mapping: expected {uid: %d, gid: %d}; real {uid: %d, gid: %d}", uidMap.HostID, gidMap.HostID, int(stat.Uid), int(stat.Gid)) + for name, test := range map[string]struct { + idMap userns.IDMap + snapOpt func(idMap userns.IDMap) snapshots.Opt + expUID uint32 + expGID uint32 + }{ + "TestIDMappedOverlay-SingleMapping": { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 33, + Size: 65535, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 33, + Size: 65535, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithRemapperLabels( + idMap.UidMap[0].ContainerID, idMap.UidMap[0].HostID, + idMap.GidMap[0].ContainerID, idMap.GidMap[0].HostID, + idMap.UidMap[0].Size) + }, + expUID: 33, + expGID: 33, + }, + "TestIDMappedOverlay-MultiMapping": { + idMap: userns.IDMap{ + UidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 33, + Size: 100, + }, + { + ContainerID: 100, + HostID: 333, + Size: 65536, + }, + }, + GidMap: []specs.LinuxIDMapping{ + { + ContainerID: 0, + HostID: 66, + Size: 100, + }, + { + ContainerID: 100, + HostID: 666, + Size: 65536, + }, + }, + }, + snapOpt: func(idMap userns.IDMap) snapshots.Opt { + return containerd.WithUserNSRemapperLabels(idMap.UidMap, idMap.GidMap) + }, + expUID: 33, + expGID: 66, + }, + } { + t.Run(name, func(t *testing.T) { + var ( + upperPath string + lowerPaths []string + snapshotter = "overlayfs" + ctx, cancel = testContext(t) + id = name + ) + defer cancel() + + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + image, err := client.Pull(ctx, testMultiLayeredImage, containerd.WithPullUnpack) + if err != nil { + t.Fatal(err) + } + t.Logf("image %s pulled!", testMultiLayeredImage) + + container, err := client.NewContainer(ctx, id, + containerd.WithImage(image), + containerd.WithImageConfigLabels(image), + containerd.WithSnapshotter(snapshotter), + containerd.WithNewSnapshot(id, image, test.snapOpt(test.idMap)), + containerd.WithNewSpec(oci.WithImageConfig(image), + oci.WithUserNamespace(test.idMap.UidMap, test.idMap.GidMap), + longCommand)) + if err != nil { + t.Fatal(err) + } + defer container.Delete(ctx, containerd.WithSnapshotCleanup) + + t.Logf("container %s created!", id) + o := client.SnapshotService(snapshotter) + mounts, err := o.Mounts(ctx, id) + if err != nil { + t.Fatal(err) + } + + m := mounts[0] + if m.Type != "overlay" { + t.Fatalf("invalid mount -- %s; expected %s", m.Type, snapshotter) + } + + for _, o := range m.Options { + if strings.HasPrefix(o, "upperdir=") { + upperPath = strings.TrimPrefix(o, "upperdir=") + } else if strings.HasPrefix(o, "lowerdir=") { + lowerPaths = strings.Split(strings.TrimPrefix(o, "lowerdir="), ",") + } + } + + t.Log("check lowerdirs") + for _, l := range lowerPaths { + if _, err := os.Stat(l); err == nil { + t.Fatalf("lowerdir=%s should not exist", l) + } + } + + t.Logf("check stats of uppedir=%s", upperPath) + st, err := os.Stat(upperPath) + if err != nil { + t.Fatalf("failed to stat %s", upperPath) + } + + if stat, ok := st.Sys().(*syscall.Stat_t); !ok { + t.Fatalf("incompatible types after stat call: *syscall.Stat_t expected") + } else if stat.Uid != test.expUID || stat.Gid != test.expGID { + t.Fatalf("bad mapping: expected {uid: %d, gid: %d}; real {uid: %d, gid: %d}", test.expUID, test.expGID, int(stat.Uid), int(stat.Gid)) + } + }) } } From 47c4dba40935f8c887a7d43f6fbfca5fafadeb7f Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Mon, 16 Dec 2024 02:18:24 +0000 Subject: [PATCH 07/46] Unify default transport in docker resolver The default transport are used in 3 places: 1. `ConfigureDefaultRegistries` (no `hosts_dir` is set) 2. `ConfigureHosts` (when `hosts_dir` is set) 3. in cri service 2 and 3 use/duplicate the same default transport, whereas 1 uses go's default Client/Transport This PR moves the default transport to a common funcion (can pass in tls config). Signed-off-by: Jin Dong --- core/remotes/docker/config/hosts.go | 15 +-------------- core/remotes/docker/registry.go | 22 +++++++++++++++++++++- internal/cri/server/images/image_pull.go | 20 +------------------- 3 files changed, 23 insertions(+), 34 deletions(-) diff --git a/core/remotes/docker/config/hosts.go b/core/remotes/docker/config/hosts.go index 86ea23238cb4..6c7d91edd2d1 100644 --- a/core/remotes/docker/config/hosts.go +++ b/core/remotes/docker/config/hosts.go @@ -28,7 +28,6 @@ import ( "path" "path/filepath" "strings" - "time" "github.com/containerd/errdefs" "github.com/containerd/log" @@ -144,19 +143,7 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos defaultTLSConfig = &tls.Config{} } - defaultTransport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - FallbackDelay: 300 * time.Millisecond, - }).DialContext, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: defaultTLSConfig, - ExpectContinueTimeout: 5 * time.Second, - } + defaultTransport := docker.DefaultHTTPTransport(defaultTLSConfig) client := &http.Client{ Transport: defaultTransport, diff --git a/core/remotes/docker/registry.go b/core/remotes/docker/registry.go index 98cafcd069e6..bbae768b15d8 100644 --- a/core/remotes/docker/registry.go +++ b/core/remotes/docker/registry.go @@ -17,9 +17,11 @@ package docker import ( + "crypto/tls" "errors" "net" "net/http" + "time" ) // HostCapabilities represent the capabilities of the registry @@ -170,7 +172,9 @@ func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { } if config.Client == nil { - config.Client = http.DefaultClient + config.Client = &http.Client{ + Transport: DefaultHTTPTransport(nil), + } } if opts.plainHTTP != nil { @@ -242,3 +246,19 @@ func MatchLocalhost(host string) (bool, error) { return ip.IsLoopback(), nil } + +func DefaultHTTPTransport(defaultTLSConfig *tls.Config) *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + FallbackDelay: 300 * time.Millisecond, + }).DialContext, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: defaultTLSConfig, + ExpectContinueTimeout: 5 * time.Second, + } +} diff --git a/internal/cri/server/images/image_pull.go b/internal/cri/server/images/image_pull.go index de8445932a11..408d4fa1d4d4 100644 --- a/internal/cri/server/images/image_pull.go +++ b/internal/cri/server/images/image_pull.go @@ -22,7 +22,6 @@ import ( "encoding/base64" "fmt" "io" - "net" "net/http" "net/url" "path/filepath" @@ -448,7 +447,7 @@ func (c *CRIImageService) registryHosts(ctx context.Context, credentials func(ho } var ( - transport = newTransport() + transport = docker.DefaultHTTPTransport(nil) // no tls config client = &http.Client{Transport: transport} config = c.config.Registry.Configs[u.Host] ) @@ -564,23 +563,6 @@ func (c *CRIImageService) registryEndpoints(host string) ([]string, error) { return append(endpoints, defaultScheme(defaultHost)+"://"+defaultHost), nil } -// newTransport returns a new HTTP transport used to pull image. -// TODO(random-liu): Create a library and share this code with `ctr`. -func newTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - FallbackDelay: 300 * time.Millisecond, - }).DialContext, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 5 * time.Second, - } -} - // encryptedImagesPullOpts returns the necessary list of pull options required // for decryption of encrypted images based on the cri decryption configuration. // Temporarily removed for v2 upgrade From 1e3d10dc29616f7e81b3fef3314d7a44d593c48c Mon Sep 17 00:00:00 2001 From: Mike Baynton Date: Wed, 30 Oct 2024 22:25:18 -0500 Subject: [PATCH 08/46] Make ovl idmap mounts read-only This is a planned follow-on from #10721 primarily at the request of @fuweid, exchanging MNT_DETACH at unmount time for MOUNT_ATTR_RDONLY at mount time. The effect is to increase risk of unmount failure due to EBUSY (as observed in the wild) but add an additional protection that the then-leaked bind mount does not act as a conduit for inadvertent modification of the underlying data, including our own efforts to clean up the mountpoint. Tests covering the lifecycle of the temporary idmap mounts and integrity of the underlying lower layer data is also included in the normal and failed-unmount case. Fixes #10704 Signed-off-by: Mike Baynton --- core/mount/mount_idmapped_linux.go | 13 +- core/mount/mount_idmapped_linux_test.go | 70 +++++++- core/mount/mount_linux.go | 29 ++-- core/mount/mount_linux_test.go | 152 ++++++++++++++++++ .../snapshots/overlay/overlayutils/check.go | 3 +- 5 files changed, 245 insertions(+), 22 deletions(-) diff --git a/core/mount/mount_idmapped_linux.go b/core/mount/mount_idmapped_linux.go index d929157d76f8..775db2b3a0e7 100644 --- a/core/mount/mount_idmapped_linux.go +++ b/core/mount/mount_idmapped_linux.go @@ -61,14 +61,19 @@ func parseIDMapping(mapping string) ([]syscall.SysProcIDMap, error) { }, nil } -// IDMapMount applies GID/UID shift according to gidmap/uidmap for target path +// IDMapMount clones the mount at source to target, applying GID/UID idmapping of the user namespace for target path func IDMapMount(source, target string, usernsFd int) (err error) { + return IDMapMountWithAttrs(source, target, usernsFd, 0, 0) +} + +// IDMapMountWithAttrs clones the mount at source to target with the provided mount options and idmapping of the user namespace. +func IDMapMountWithAttrs(source, target string, usernsFd int, attrSet uint64, attrClr uint64) (err error) { var ( attr unix.MountAttr ) - attr.Attr_set = unix.MOUNT_ATTR_IDMAP - attr.Attr_clr = 0 + attr.Attr_set = unix.MOUNT_ATTR_IDMAP | attrSet + attr.Attr_clr = attrClr attr.Propagation = 0 attr.Userns_fd = uint64(usernsFd) @@ -79,7 +84,7 @@ func IDMapMount(source, target string, usernsFd int) (err error) { defer unix.Close(dFd) if err = unix.MountSetattr(dFd, "", unix.AT_EMPTY_PATH, &attr); err != nil { - return fmt.Errorf("Unable to shift GID/UID for %s: %w", target, err) + return fmt.Errorf("Unable to shift GID/UID or set mount attrs for %s: %w", target, err) } if err = unix.MoveMount(dFd, "", -int(unix.EBADF), target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil { diff --git a/core/mount/mount_idmapped_linux_test.go b/core/mount/mount_idmapped_linux_test.go index 90e3a61e82ab..319e349cd58f 100644 --- a/core/mount/mount_idmapped_linux_test.go +++ b/core/mount/mount_idmapped_linux_test.go @@ -18,12 +18,15 @@ package mount import ( "fmt" + "io/fs" "os" "path/filepath" "sync" "syscall" "testing" + "golang.org/x/sys/unix" + kernel "github.com/containerd/containerd/v2/pkg/kernelversion" "github.com/containerd/continuity/testutil" "github.com/stretchr/testify/require" @@ -84,6 +87,8 @@ func TestIdmappedMount(t *testing.T) { t.Run("GetUsernsFD", testGetUsernsFD) t.Run("IDMapMount", testIDMapMount) + + t.Run("IDMapMountWithAttrs", testIDMapMountWithAttrs) } func testGetUsernsFD(t *testing.T) { @@ -129,19 +134,60 @@ func testIDMapMount(t *testing.T) { require.NoError(t, err) defer usernsFD.Close() - srcDir, checkFunc := initIDMappedChecker(t, testUIDMaps, testGIDMaps) + srcDir, checkFunc := initIDMappedChecker(t, testUIDMaps, testGIDMaps, true) destDir := t.TempDir() defer func() { require.NoError(t, UnmountAll(destDir, 0)) }() err = IDMapMount(srcDir, destDir, int(usernsFD.Fd())) - usernsFD.Close() require.NoError(t, err) checkFunc(destDir) } -func initIDMappedChecker(t *testing.T, uidMaps, gidMaps []syscall.SysProcIDMap) (_srcDir string, _verifyFunc func(destDir string)) { +func testIDMapMountWithAttrs(t *testing.T) { + usernsFD, err := getUsernsFD(testUIDMaps, testGIDMaps) + require.NoError(t, err) + defer usernsFD.Close() + + type testCase struct { + name string + srcDir string + setAttr uint64 + checkFunc func(destDir string) + } + + cases := make([]testCase, 0) + srcDir, checkFunc := initIDMappedChecker(t, testUIDMaps, testGIDMaps, true) + cases = append(cases, testCase{ + name: "Writable idmapped mount", + srcDir: srcDir, + setAttr: 0, + checkFunc: checkFunc, + }) + + srcDir, checkFunc = initIDMappedChecker(t, testUIDMaps, testGIDMaps, false) + cases = append(cases, testCase{ + name: "Readonly idmapped mount", + srcDir: srcDir, + setAttr: unix.MOUNT_ATTR_RDONLY, + checkFunc: checkFunc, + }) + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + destDir := t.TempDir() + defer func() { + require.NoError(t, UnmountAll(destDir, 0)) + }() + err := IDMapMountWithAttrs(tc.srcDir, destDir, int(usernsFD.Fd()), tc.setAttr, 0) + require.NoError(t, err) + tc.checkFunc(destDir) + }) + } +} + +func initIDMappedChecker(t *testing.T, uidMaps, gidMaps []syscall.SysProcIDMap, expectWritable bool) (_srcDir string, _verifyFunc func(destDir string)) { testutil.RequiresRoot(t) srcDir := t.TempDir() @@ -159,6 +205,11 @@ func initIDMappedChecker(t *testing.T, uidMaps, gidMaps []syscall.SysProcIDMap) require.NoError(t, err, fmt.Sprintf("chown %v:%v for file %s", uid, gid, file)) } + writableDir := filepath.Join(srcDir, "write-test") + require.NoError(t, os.Mkdir(writableDir, os.ModePerm)) + require.NoError(t, os.Chmod(writableDir, os.ModePerm)) + require.NoError(t, os.Chown(writableDir, uidMaps[0].ContainerID, gidMaps[0].ContainerID)) + return srcDir, func(destDir string) { for idx := range uidMaps { file := filepath.Join(destDir, fmt.Sprintf("%v", idx)) @@ -177,5 +228,18 @@ func initIDMappedChecker(t *testing.T, uidMaps, gidMaps []syscall.SysProcIDMap) require.Equal(t, uint32(gid), sysStat.Gid, fmt.Sprintf("check file %s gid", file)) t.Logf("IDMapped File %s uid=%v, gid=%v", file, uid, gid) } + + wf, err := os.Create(filepath.Join(destDir, "write-test", "1")) + if err == nil { + defer wf.Close() + } + if expectWritable { + require.NoError(t, err, "create write-test file") + } else { + require.Error(t, err) + pathErr, isPathErr := err.(*fs.PathError) + require.True(t, isPathErr, "Expecting path error") + require.Equal(t, unix.EROFS, pathErr.Err, "Expecting read-only filesystem error") + } } } diff --git a/core/mount/mount_linux.go b/core/mount/mount_linux.go index a14d545fd0cf..6d63bfad14e6 100644 --- a/core/mount/mount_linux.go +++ b/core/mount/mount_linux.go @@ -66,7 +66,12 @@ func prepareIDMappedOverlay(usernsFd int, options []string) ([]string, func(), e return options, nil, fmt.Errorf("failed to parse overlay lowerdir's from given options") } - tmpLowerdirs, idMapCleanUp, err := doPrepareIDMappedOverlay(lowerDirs, usernsFd) + tempRemountsLocation, err := os.MkdirTemp(tempMountLocation, "ovl-idmapped") + if err != nil { + return options, nil, fmt.Errorf("failed to create temporary overlay lowerdir mount location: %w", err) + } + + tmpLowerdirs, idMapCleanUp, err := doPrepareIDMappedOverlay(tempRemountsLocation, lowerDirs, usernsFd) if err != nil { return options, idMapCleanUp, fmt.Errorf("failed to create idmapped mount: %w", err) } @@ -244,39 +249,35 @@ func getUnprivilegedMountFlags(path string) (int, error) { return flags, nil } -func doPrepareIDMappedOverlay(lowerDirs []string, usernsFd int) (tmpLowerDirs []string, _ func(), _ error) { - td, err := os.MkdirTemp(tempMountLocation, "ovl-idmapped") - if err != nil { - return nil, nil, err - } +func doPrepareIDMappedOverlay(tempRemountsLocation string, lowerDirs []string, usernsFd int) ([]string, func(), error) { + tmpLowerDirs := make([]string, 0, len(lowerDirs)) + cleanUp := func() { for _, lowerDir := range tmpLowerDirs { - // Do a detached unmount so even if the resource is busy, the mount will be - // gone (eventually) and we can safely delete the directory too. - if err := unix.Unmount(lowerDir, unix.MNT_DETACH); err != nil { + if err := unix.Unmount(lowerDir, 0); err != nil { log.L.WithError(err).Warnf("failed to unmount temp lowerdir %s", lowerDir) continue } // Using os.Remove() so if it's not empty, we don't delete files in the // rootfs. if err := os.Remove(lowerDir); err != nil { - log.L.WithError(err).Warnf("failed to remove temporary overlay lowerdir's") + log.L.WithError(err).Warnf("failed to remove temporary overlay lowerdir") } } // This dir should be empty now. Otherwise, we don't do anything. - if err := os.Remove(filepath.Join(tmpLowerDirs[0], "..")); err != nil { + if err := os.Remove(tempRemountsLocation); err != nil { log.L.WithError(err).Infof("failed to remove temporary overlay dir") } } for i, lowerDir := range lowerDirs { - tmpLowerDir := filepath.Join(td, strconv.Itoa(i)) + tmpLowerDir := filepath.Join(tempRemountsLocation, strconv.Itoa(i)) tmpLowerDirs = append(tmpLowerDirs, tmpLowerDir) - if err = os.MkdirAll(tmpLowerDir, 0700); err != nil { + if err := os.MkdirAll(tmpLowerDir, 0700); err != nil { return nil, cleanUp, fmt.Errorf("failed to create temporary dir: %w", err) } - if err = IDMapMount(lowerDir, tmpLowerDir, usernsFd); err != nil { + if err := IDMapMountWithAttrs(lowerDir, tmpLowerDir, usernsFd, unix.MOUNT_ATTR_RDONLY, 0); err != nil { return nil, cleanUp, err } } diff --git a/core/mount/mount_linux_test.go b/core/mount/mount_linux_test.go index ec340e831663..988d6be66e97 100644 --- a/core/mount/mount_linux_test.go +++ b/core/mount/mount_linux_test.go @@ -18,12 +18,18 @@ package mount import ( "fmt" + "io/fs" "os" "os/exec" "path/filepath" "reflect" + "syscall" "testing" + kernel "github.com/containerd/containerd/v2/pkg/kernelversion" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/containerd/continuity/testutil" "golang.org/x/sys/unix" ) @@ -198,6 +204,109 @@ func TestUnmountRecursive(t *testing.T) { } } +func TestDoPrepareIDMappedOverlay(t *testing.T) { + testutil.RequiresRoot(t) + + k512 := kernel.KernelVersion{Kernel: 5, Major: 12} + ok, err := kernel.GreaterEqualThan(k512) + require.NoError(t, err) + if !ok { + t.Skip("GetUsernsFD requires kernel >= 5.12") + } + + usernsFD, err := getUsernsFD(testUIDMaps, testGIDMaps) + require.NoError(t, err) + defer usernsFD.Close() + + type testCase struct { + name string + injectUmountFault bool + } + + tcases := []testCase{ + { + name: "normal", + injectUmountFault: false, + }, + { + name: "umount-fault", + injectUmountFault: true, + }, + } + + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + fakeLowerDirsDir := t.TempDir() + if !supportsIDMap(fakeLowerDirsDir) { + t.Skip("IDmapped mounts not supported on filesystem selected by t.TempDir()") + } + + lowerDirs := []string{filepath.Join(fakeLowerDirsDir, "lower1"), filepath.Join(fakeLowerDirsDir, "lower2")} + for _, dir := range lowerDirs { + require.NoError(t, os.Mkdir(dir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(dir, filepath.Base(dir)), []byte("foo"), 0644)) + } + + remountsLocation := t.TempDir() + + tmpLowerDirs, cleanup, err := doPrepareIDMappedOverlay(remountsLocation, lowerDirs, int(usernsFD.Fd())) + require.NoError(t, err) + require.Len(t, tmpLowerDirs, len(lowerDirs)) + + lowerContents := make([][]byte, len(lowerDirs)) + + for i, dir := range lowerDirs { + correspondingRemount := tmpLowerDirs[i] + filename := filepath.Base(dir) + + expectedFile, err := os.ReadFile(filepath.Join(dir, filename)) + require.NoError(t, err, "reading comparison test fixture file") + lowerContents[i] = expectedFile + + actualFile, err := os.ReadFile(filepath.Join(correspondingRemount, filename)) + require.NoError(t, err, "reading file in temporary remount") + + assert.Equal(t, expectedFile, actualFile, "file content in temporary remount") + } + + var busyDh *os.File + if tc.injectUmountFault { + busyDh, err = os.Open(tmpLowerDirs[0]) + require.NoError(t, err) + defer busyDh.Close() + } + + cleanup() + + _, err = os.Stat(remountsLocation) + + if tc.injectUmountFault { + // We should have failed to remove the remounts location if the unmount failed. + assert.NoError(t, err, "expected remounts location to still exist after unmount failure") + } else { + pathErr, isPathErr := err.(*fs.PathError) + require.True(t, isPathErr, "expected a PathError") + assert.Equal(t, unix.ENOENT, pathErr.Err, "temporary remounts should be cleaned up") + } + + // Original lowerdirs should be unaffected. + for i, dir := range lowerDirs { + filename := filepath.Base(dir) + + actualFile, err := os.ReadFile(filepath.Join(dir, filename)) + require.NoError(t, err, "reading file in original lowerdir") + assert.Equal(t, lowerContents[i], actualFile, "file content in original lowerdir") + } + + // If we blocked cleanup, allow it now so the test stays tidy. + if tc.injectUmountFault { + require.NoError(t, busyDh.Close()) + cleanup() + } + }) + } +} + func setupMounts(t *testing.T) (target string, mounts []Mount) { dir1 := t.TempDir() dir2 := t.TempDir() @@ -243,3 +352,46 @@ func setupMounts(t *testing.T) (target string, mounts []Mount) { return target, mounts } + +func supportsIDMap(path string) bool { + treeFD, err := unix.OpenTree(-1, path, uint(unix.OPEN_TREE_CLONE|unix.OPEN_TREE_CLOEXEC)) + if err != nil { + return false + } + defer unix.Close(treeFD) + + // We want to test if idmap mounts are supported. + // So we use just some random mapping, it doesn't really matter which one. + // For the helper command, we just need something that is alive while we + // test this, a sleep 5 will do it. + cmd := exec.Command("sleep", "5") + cmd.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: syscall.CLONE_NEWUSER, + UidMappings: []syscall.SysProcIDMap{{ContainerID: 0, HostID: 65536, Size: 65536}}, + GidMappings: []syscall.SysProcIDMap{{ContainerID: 0, HostID: 65536, Size: 65536}}, + } + if err := cmd.Start(); err != nil { + return false + } + defer func() { + _ = cmd.Process.Kill() + _ = cmd.Wait() + }() + + usernsFD := fmt.Sprintf("/proc/%d/ns/user", cmd.Process.Pid) + var usernsFile *os.File + if usernsFile, err = os.Open(usernsFD); err != nil { + return false + } + defer usernsFile.Close() + + attr := unix.MountAttr{ + Attr_set: unix.MOUNT_ATTR_IDMAP, + Userns_fd: uint64(usernsFile.Fd()), + } + if err := unix.MountSetattr(treeFD, "", unix.AT_EMPTY_PATH, &attr); err != nil { + return false + } + + return true +} diff --git a/plugins/snapshots/overlay/overlayutils/check.go b/plugins/snapshots/overlay/overlayutils/check.go index fd943777d282..2d9a6b9ea3fc 100644 --- a/plugins/snapshots/overlay/overlayutils/check.go +++ b/plugins/snapshots/overlay/overlayutils/check.go @@ -261,7 +261,8 @@ func SupportsIDMappedMounts() (bool, error) { } defer usernsFd.Close() - if err = mount.IDMapMount(lowerDir, lowerDir, int(usernsFd.Fd())); err != nil { + // MOUNT_ATTR_RDONLY to replicate our actual construction of overlayfs + if err = mount.IDMapMountWithAttrs(lowerDir, lowerDir, int(usernsFd.Fd()), unix.MOUNT_ATTR_RDONLY, 0); err != nil { return false, fmt.Errorf("failed to remap lowerdir %s: %w", lowerDir, err) } defer func() { From 9fc711a8a0f5ca61007c855d087c5a806d2273cc Mon Sep 17 00:00:00 2001 From: Austin Vazquez Date: Tue, 3 Dec 2024 21:59:36 -0700 Subject: [PATCH 09/46] Clarify Go client API guidance Signed-off-by: Austin Vazquez --- RELEASES.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index d469cc448ed0..2a8a486db79d 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -347,17 +347,14 @@ follow that format. ### Go client API -The Go client API, documented in -[godoc](https://godoc.org/github.com/containerd/containerd/v2/client), is currently -considered unstable. It is recommended to vendor the necessary components to -stabilize your project build. Note that because the Go API interfaces with the -GRPC API, clients written against a 1.0 Go API should remain compatible with -future 1.x series releases. - -We intend to stabilize the API in a future release when more integrations have -been carried out. - -Any changes to the API should be detectable at compile time, so upgrading will +As of containerd 2.0, the Go client API documented in +[godoc](https://godoc.org/github.com/containerd/containerd/v2/client) is stable. +Note that because the Go client interfaces with the GRPC API, clients building on top +of the Go client should remain compatible with future server releases implementing the +same major GRPC API series. For backwards compatability and as a general rule of thumb, +it is the client's responsibility to handle not implemented errors returned by the containerd daemon. + +Any changes to the Go client API should be detectable at compile time, so upgrading will be a matter of fixing compilation errors and moving from there. ### CRI GRPC API From 58bd48ecff5418efbeacf27134d8adb3e58ab17d Mon Sep 17 00:00:00 2001 From: ningmingxiao Date: Wed, 27 Mar 2024 10:06:13 +0800 Subject: [PATCH 10/46] add some doc for shim reap orphan process Signed-off-by: ningmingxiao --- core/runtime/v2/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/runtime/v2/README.md b/core/runtime/v2/README.md index 8b15bd25150e..44927e903574 100644 --- a/core/runtime/v2/README.md +++ b/core/runtime/v2/README.md @@ -537,3 +537,10 @@ It works with standard protobufs and GRPC services as well as generating clients The only difference between grpc and ttrpc is the wire protocol. ttrpc removes the http stack in order to save memory and binary size to keep shims small. It is recommended to use ttrpc in your shim but grpc support is currently an experimental feature. + +#### containerd-shim-runc-v2 as sub-reaper +The shim process takes responsibility as a sub-reaper to cleanup exited containers or setns(2) processes. +When container is running in new PID namespace, the container should cleanup orphaned processes before it exits. +If container uses the same PID namespace with shim process, its descendant processes will be reparented to shim process. The shim process will reap them when they exit. +However, [\[PATCH\] exit: fix the setns() && PR_SET_CHILD_SUBREAPER interaction](https://lore.kernel.org/all/20170130181735.GA11285@redhat.com/#r) prevents any cross-namespace reparenting in kernel. Assume that container is in X-namespace and P in root-namespace setns into X-namespace. P forks child C. The child C forks a grandchild G and exits. The G will be reparented to X instead of P's reaper. +If the PID namespace is different from shim process, the container init process should cleanup any orphaned reparented processes created by setns process (exec operation). From ffbe1b5738951aed8945bf58c23e634433e77eb1 Mon Sep 17 00:00:00 2001 From: Alfred Wingate Date: Tue, 10 Dec 2024 21:34:57 +0200 Subject: [PATCH 11/46] Use a order-only-prerequisite for mandir creation Otherwise its a matter of luck that the man directory is created before man dir generation. Bug: https://bugs.gentoo.org/880057 Signed-off-by: Alfred Wingate --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 2b199266d2a7..0625e20ef46b 100644 --- a/Makefile +++ b/Makefile @@ -280,7 +280,7 @@ bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and om binaries: $(BINARIES) ## build binaries @echo "$(WHALE) $@" -man: mandir $(addprefix man/,$(MANPAGES)) +man: $(addprefix man/,$(MANPAGES)) @echo "$(WHALE) $@" mandir: @@ -289,15 +289,15 @@ mandir: # Kept for backwards compatibility genman: man/containerd.8 man/ctr.8 -man/containerd.8: bin/gen-manpages FORCE +man/containerd.8: bin/gen-manpages FORCE | mandir @echo "$(WHALE) $@" $< $(@F) $(@D) -man/ctr.8: bin/gen-manpages FORCE +man/ctr.8: bin/gen-manpages FORCE | mandir @echo "$(WHALE) $@" $< $(@F) $(@D) -man/%: docs/man/%.md FORCE +man/%: docs/man/%.md FORCE | mandir @echo "$(WHALE) $@" go-md2man -in "$<" -out "$@" From b7a117b4648c981275e7e7ac944bfabec45fc56a Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Sat, 11 Jan 2025 01:08:22 +0000 Subject: [PATCH 12/46] Fix fuzz integration tests Fuzz integration tests on Github Action panics because it cannot find the containerd PATH. ``` failed to start daemon: failed to start daemon: exec: "containerd": executable file not found in $PATH: panic: fatal [recovered] panic: fatal ``` It's because in Action the `OUT` env var (/github/workspace/build-out) is different compared to oss-fuzz. Signed-off-by: Jin Dong --- integration/client/container_fuzz_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/integration/client/container_fuzz_test.go b/integration/client/container_fuzz_test.go index ecfe20bd5164..3a26a80ce6c1 100644 --- a/integration/client/container_fuzz_test.go +++ b/integration/client/container_fuzz_test.go @@ -223,10 +223,21 @@ func updatePathEnv() error { oldPathEnv := os.Getenv("PATH") newPathEnv := oldPathEnv + ":" + fuzzBinDir + + if ghWorkspace := os.Getenv("GITHUB_WORKSPACE"); ghWorkspace != "" { + // In `oss_fuzz_build.sh`, we build and install containerd binaries to + // `$OUT/containerd-binaries`, where `OUT=/out` in oss-fuzz environment. + // However in GitHub Actions, oss-fuzz maps `OUT` to `$GITHUB_WORKSPACE/build-out`. + // So here we add this to `$PATH` at the end of `PATH` so the fuzz works on + // both environments. + newPathEnv = newPathEnv + ":" + filepath.Join(ghWorkspace, "build-out", "containerd-binaries") + } + err = os.Setenv("PATH", newPathEnv) if err != nil { return err } + haveChangedOSSFuzzPATH = true return nil } From 6019bcdfbbed387b366e4e368c30475f5c31f054 Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Sat, 11 Jan 2025 17:40:28 +0000 Subject: [PATCH 13/46] move FuzzContainerdImport to go native fuzz Signed-off-by: Jin Dong --- ...zzer.go => containerd_import_fuzz_test.go} | 51 ++++++++++--------- contrib/fuzz/daemon.go | 2 - 2 files changed, 28 insertions(+), 25 deletions(-) rename contrib/fuzz/{containerd_import_fuzzer.go => containerd_import_fuzz_test.go} (62%) diff --git a/contrib/fuzz/containerd_import_fuzzer.go b/contrib/fuzz/containerd_import_fuzz_test.go similarity index 62% rename from contrib/fuzz/containerd_import_fuzzer.go rename to contrib/fuzz/containerd_import_fuzz_test.go index 33edbefbb0ae..abdd1496c946 100644 --- a/contrib/fuzz/containerd_import_fuzzer.go +++ b/contrib/fuzz/containerd_import_fuzz_test.go @@ -1,5 +1,3 @@ -//go:build gofuzz - /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +16,8 @@ package fuzz import ( "bytes" "context" + "os" + "testing" fuzz "github.com/AdaLogics/go-fuzz-headers" @@ -31,30 +31,35 @@ func fuzzContext() (context.Context, context.CancelFunc) { return ctx, cancel } -func FuzzContainerdImport(data []byte) int { - initDaemon.Do(startDaemon) - - client, err := containerd.New(defaultAddress) - if err != nil { - return 0 +func FuzzContainerdImport(f *testing.F) { + if os.Getuid() != 0 { + f.Skip("skipping fuzz test that requires root") } - defer client.Close() - f := fuzz.NewConsumer(data) + f.Fuzz(func(t *testing.T, data []byte) { + initDaemon.Do(startDaemon) - noOfImports, err := f.GetInt() - if err != nil { - return 0 - } - maxImports := 20 - ctx, cancel := fuzzContext() - defer cancel() - for i := 0; i < noOfImports%maxImports; i++ { - tarBytes, err := f.GetBytes() + client, err := containerd.New(defaultAddress) if err != nil { - return 0 + return } - _, _ = client.Import(ctx, bytes.NewReader(tarBytes)) - } - return 1 + defer client.Close() + + f := fuzz.NewConsumer(data) + + noOfImports, err := f.GetInt() + if err != nil { + return + } + maxImports := 20 + ctx, cancel := fuzzContext() + defer cancel() + for i := 0; i < noOfImports%maxImports; i++ { + tarBytes, err := f.GetBytes() + if err != nil { + return + } + _, _ = client.Import(ctx, bytes.NewReader(tarBytes)) + } + }) } diff --git a/contrib/fuzz/daemon.go b/contrib/fuzz/daemon.go index 96f7e74ca0a8..59e0ee036044 100644 --- a/contrib/fuzz/daemon.go +++ b/contrib/fuzz/daemon.go @@ -1,5 +1,3 @@ -//go:build gofuzz - /* Copyright The containerd Authors. From fb44e37ff27325edda8e8ad178e1c057139cd4f2 Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Thu, 9 Jan 2025 02:01:35 +0000 Subject: [PATCH 14/46] Remove confusing warning in cri runtime config migration Signed-off-by: Jin Dong --- plugins/cri/runtime/plugin.go | 90 ++++++++++++++++++++++++------ plugins/cri/runtime/plugin_test.go | 69 +++++++++++++++++++++++ 2 files changed, 142 insertions(+), 17 deletions(-) create mode 100644 plugins/cri/runtime/plugin_test.go diff --git a/plugins/cri/runtime/plugin.go b/plugins/cri/runtime/plugin.go index adc64d937f4b..6afd0ddb5f10 100644 --- a/plugins/cri/runtime/plugin.go +++ b/plugins/cri/runtime/plugin.go @@ -51,20 +51,8 @@ func init() { Requires: []plugin.Type{ plugins.WarningPlugin, }, - ConfigMigration: func(ctx context.Context, configVersion int, pluginConfigs map[string]interface{}) error { - if configVersion >= version.ConfigVersion { - return nil - } - c, ok := pluginConfigs[string(plugins.GRPCPlugin)+".cri"] - if !ok { - return nil - } - conf := c.(map[string]interface{}) - migrateConfig(conf) - pluginConfigs[string(plugins.CRIServicePlugin)+".runtime"] = conf - return nil - }, - InitFn: initCRIRuntime, + ConfigMigration: configMigration, + InitFn: initCRIRuntime, }) } @@ -198,12 +186,79 @@ func setGLogLevel() error { return nil } -func migrateConfig(conf map[string]interface{}) { - containerdConf, ok := conf["containerd"] +func configMigration(ctx context.Context, configVersion int, pluginConfigs map[string]interface{}) error { + if configVersion >= version.ConfigVersion { + return nil + } + src, ok := pluginConfigs[string(plugins.GRPCPlugin)+".cri"].(map[string]interface{}) + if !ok { + return nil + } + dst, ok := pluginConfigs[string(plugins.CRIServicePlugin)+".runtime"].(map[string]interface{}) + if !ok { + dst = make(map[string]interface{}) + } + migrateConfig(dst, src) + pluginConfigs[string(plugins.CRIServicePlugin)+".runtime"] = dst + return nil +} + +func migrateConfig(dst, src map[string]interface{}) { + for k, v := range src { + switch k { + case "containerd": + // skip (handled separately below) + continue + case + "sandbox_image", + "registry", + "image_decryption", + "max_concurrent_downloads", + "image_pull_progress_timeout", + "image_pull_with_sync_fs", + "stats_collect_period": + // skip (moved to cri image service plugin) + continue + case + "disable_tcp_service", + "stream_server_address", + "stream_server_port", + "stream_idle_timeout", + "enable_tls_streaming", + "x509_key_pair_streaming": + // skip (moved to cri ServerConfig) + continue + default: + if _, ok := dst[k]; !ok { + dst[k] = v + } + } + } + + // migrate cri containerd configs + containerdConf, ok := src["containerd"].(map[string]interface{}) if !ok { return } - runtimesConf, ok := containerdConf.(map[string]interface{})["runtimes"] + newContainerdConf, ok := dst["containerd"].(map[string]interface{}) + if !ok { + newContainerdConf = map[string]interface{}{} + } + for k, v := range containerdConf { + switch k { + case "snapshotter", "disable_snapshot_annotations", "discard_unpacked_layers": + // skip (moved to cri image service plugin) + continue + default: + if _, ok := newContainerdConf[k]; !ok { + newContainerdConf[k] = v + } + } + } + dst["containerd"] = newContainerdConf + + // migrate runtimes configs + runtimesConf, ok := newContainerdConf["runtimes"] if !ok { return } @@ -212,6 +267,7 @@ func migrateConfig(conf map[string]interface{}) { if sandboxMode, ok := runtimeConf["sandbox_mode"]; ok { if _, ok := runtimeConf["sandboxer"]; !ok { runtimeConf["sandboxer"] = sandboxMode + delete(runtimeConf, "sandbox_mode") } } } diff --git a/plugins/cri/runtime/plugin_test.go b/plugins/cri/runtime/plugin_test.go new file mode 100644 index 000000000000..711888bd3dd9 --- /dev/null +++ b/plugins/cri/runtime/plugin_test.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/containerd/containerd/v2/plugins" +) + +func TestCRIRuntimePluginConfigMigration(t *testing.T) { + runcSandboxer := "podsandbox" + + grpcCri := map[string]interface{}{ + "enable_selinux": true, + "max_container_log_line_size": 100, + "max_concurrent_downloads": 3, // removed since it's moved to cri image service + "disable_tcp_service": true, // removed since it's moved to cri grpc service + "containerd": map[string]interface{}{ + "runtimes": map[string]interface{}{ + "runc": map[string]interface{}{ + "sandbox_mode": runcSandboxer, + }, + }, + }, + } + + pluginConfigs := map[string]interface{}{ + string(plugins.GRPCPlugin) + ".cri": grpcCri, + } + configMigration(context.Background(), 2, pluginConfigs) + + runtimeConf, ok := pluginConfigs[string(plugins.CRIServicePlugin)+".runtime"].(map[string]interface{}) + require.True(t, ok) + require.NotNil(t, runtimeConf) + assert.Equal(t, grpcCri["enable_selinux"], runtimeConf["enable_selinux"]) + assert.Equal(t, grpcCri["max_container_log_line_size"], runtimeConf["max_container_log_line_size"]) + assert.NotContains(t, runtimeConf, "max_concurrent_downloads") + assert.NotContains(t, runtimeConf, "disable_tcp_service") + + ctd, ok := runtimeConf["containerd"].(map[string]interface{}) + require.True(t, ok) + require.NotNil(t, ctd) + + runtimes := ctd["runtimes"].(map[string]interface{}) + runc, ok := runtimes["runc"].(map[string]interface{}) + require.True(t, ok) + require.NotNil(t, runc) + assert.Equal(t, runcSandboxer, runc["sandboxer"]) + assert.NotContains(t, runc, "sandbox_mode") +} From c73c8e5d526aba6acf0eb75976bfc5a1037d64ac Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 3 Sep 2024 20:19:27 +0800 Subject: [PATCH 15/46] Introduce EROFS differ The EROFS differ only applies to EROFS layers which are marked by a special file `.erofslayer` generated by the EROFS snapshotter. Why it's needed? Since we'd like to parse []mount.Mount directly without actual mounting and convert OCI layers into EROFS blobs, `.erofslayer` gives a hint that the active snapshotter supports the output blob generated by the EROFS differ. I'd suggest it could be read together with the next commit. Signed-off-by: cardy.tang Signed-off-by: Gao Xiang --- cmd/containerd/builtins/builtins_linux.go | 1 + plugins/diff/erofs/differ_linux.go | 189 ++++++++++++++++++++++ plugins/diff/erofs/plugin/plugin_linux.go | 63 ++++++++ 3 files changed, 253 insertions(+) create mode 100644 plugins/diff/erofs/differ_linux.go create mode 100644 plugins/diff/erofs/plugin/plugin_linux.go diff --git a/cmd/containerd/builtins/builtins_linux.go b/cmd/containerd/builtins/builtins_linux.go index deed083b843c..02577bc4e870 100644 --- a/cmd/containerd/builtins/builtins_linux.go +++ b/cmd/containerd/builtins/builtins_linux.go @@ -20,6 +20,7 @@ import ( _ "github.com/containerd/containerd/api/types/runc/options" _ "github.com/containerd/containerd/v2/core/metrics/cgroups" _ "github.com/containerd/containerd/v2/core/metrics/cgroups/v2" + _ "github.com/containerd/containerd/v2/plugins/diff/erofs/plugin" _ "github.com/containerd/containerd/v2/plugins/diff/walking/plugin" _ "github.com/containerd/containerd/v2/plugins/snapshots/blockfile/plugin" _ "github.com/containerd/containerd/v2/plugins/snapshots/native/plugin" diff --git a/plugins/diff/erofs/differ_linux.go b/plugins/diff/erofs/differ_linux.go new file mode 100644 index 000000000000..5686435f0e62 --- /dev/null +++ b/plugins/diff/erofs/differ_linux.go @@ -0,0 +1,189 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package erofs + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/diff" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/errdefs" + "github.com/containerd/log" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var emptyDesc = ocispec.Descriptor{} + +type differ interface { + diff.Applier + diff.Comparer +} + +// erofsDiff does erofs comparison and application +type erofsDiff struct { + store content.Store + mkfsExtraOpts []string +} + +func NewErofsDiffer(store content.Store, mkfsExtraOpts []string) differ { + return &erofsDiff{ + store: store, + mkfsExtraOpts: mkfsExtraOpts, + } +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s erofsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + return emptyDesc, fmt.Errorf("erofsDiff does not implement Compare method: %w", errdefs.ErrNotImplemented) +} + +func convertTarErofs(ctx context.Context, r io.Reader, layerPath string, mkfsExtraOpts []string) error { + args := append([]string{"--tar=f", "--aufs", "--quiet", "-Enoinline_data"}, mkfsExtraOpts...) + args = append(args, layerPath) + cmd := exec.CommandContext(ctx, "mkfs.erofs", args...) + cmd.Stdin = r + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("erofs apply failed: %s: %w", out, err) + } + log.G(ctx).Debugf("running %s %s %v", cmd.Path, cmd.Args, string(out)) + return nil +} + +// Get the snapshot layer directory in order to generate EROFS-formatted blobs; +// +// If mount[0].Type is `bind` or `erofs`, it just tries the source dir; Or if +// mount[0].Type is `overlayfs`, it tries the parent of the upperdir; +// +// The candidate will be checked with ".erofslayer" to make sure this active +// snapshot is really generated by the EROFS snapshotter instead of others. +func mountsToLayer(mounts []mount.Mount) (string, error) { + var layer string + mnt := mounts[0] + if mnt.Type == "bind" || mnt.Type == "erofs" { + layer = filepath.Dir(mnt.Source) + } else if mnt.Type == "overlay" { + layer = "" + for _, o := range mnt.Options { + if strings.HasPrefix(o, "upperdir=") { + layer = filepath.Dir(strings.TrimPrefix(o, "upperdir=")) + } + } + if layer == "" { + return "", fmt.Errorf("unsupported overlay layer for erofs differ: %w", errdefs.ErrNotImplemented) + } + } else { + return "", fmt.Errorf("invalid filesystem type for erofs differ: %w", errdefs.ErrNotImplemented) + } + // If the layer is not prepared by the EROFS snapshotter, fall back to the next differ + if _, err := os.Stat(filepath.Join(layer, ".erofslayer")); err != nil { + return "", fmt.Errorf("mount layer type must be erofs-layer: %w", errdefs.ErrNotImplemented) + } + return layer, nil +} + +func (s erofsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + t1 := time.Now() + defer func() { + if err == nil { + log.G(ctx).WithFields(log.Fields{ + "d": time.Since(t1), + "digest": desc.Digest, + "size": desc.Size, + "media": desc.MediaType, + }).Debugf("diff applied") + } + }() + + if _, err := images.DiffCompression(ctx, desc.MediaType); err != nil { + return emptyDesc, fmt.Errorf("currently unsupported media type: %s", desc.MediaType) + } + + var config diff.ApplyConfig + for _, o := range opts { + if err := o(ctx, desc, &config); err != nil { + return emptyDesc, fmt.Errorf("failed to apply config opt: %w", err) + } + } + + layer, err := mountsToLayer(mounts) + if err != nil { + return emptyDesc, err + } + + ra, err := s.store.ReaderAt(ctx, desc) + if err != nil { + return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) + } + defer ra.Close() + + processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) + for { + if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { + return emptyDesc, fmt.Errorf("failed to get stream processor for %s: %w", desc.MediaType, err) + } + if processor.MediaType() == ocispec.MediaTypeImageLayer { + break + } + } + defer processor.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(processor, digester.Hash()), + } + + layerBlobPath := path.Join(layer, "layer.erofs") + err = convertTarErofs(ctx, rc, layerBlobPath, s.mkfsExtraOpts) + if err != nil { + return emptyDesc, fmt.Errorf("failed to convert erofs: %w", err) + } + + // Read any trailing data + if _, err := io.Copy(io.Discard, rc); err != nil { + return emptyDesc, err + } + + return ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + }, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} diff --git a/plugins/diff/erofs/plugin/plugin_linux.go b/plugins/diff/erofs/plugin/plugin_linux.go new file mode 100644 index 000000000000..735afd131ec6 --- /dev/null +++ b/plugins/diff/erofs/plugin/plugin_linux.go @@ -0,0 +1,63 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "os/exec" + + "github.com/containerd/containerd/v2/core/metadata" + "github.com/containerd/containerd/v2/plugins" + "github.com/containerd/containerd/v2/plugins/diff/erofs" + "github.com/containerd/platforms" + "github.com/containerd/plugin" + "github.com/containerd/plugin/registry" +) + +// Config represents configuration for the erofs plugin. +type Config struct { + // MkfsOptions are extra options used for the applier + MkfsOptions []string `toml:"mkfs_options"` +} + +func init() { + registry.Register(&plugin.Registration{ + Type: plugins.DiffPlugin, + ID: "erofs", + Requires: []plugin.Type{ + plugins.MetadataPlugin, + }, + Config: &Config{}, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + _, err := exec.LookPath("mkfs.erofs") + if err != nil { + return nil, fmt.Errorf("could not find mkfs.erofs: %v: %w", err, plugin.ErrSkipPlugin) + } + + md, err := ic.GetSingle(plugins.MetadataPlugin) + if err != nil { + return nil, err + } + + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + cs := md.(*metadata.DB).ContentStore() + config := ic.Config.(*Config) + + return erofs.NewErofsDiffer(cs, config.MkfsOptions), nil + }, + }) +} From 2486d542a5a96d71e3c8bb36517479e0a81f0131 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 18 Sep 2024 18:48:54 +0800 Subject: [PATCH 16/46] Introduce EROFS Snapshotter It allows us to mount each EROFS blob layer (generated by the EROFS differ) independently, or use the "unpacked" fs/ directories (if some other differ is used.) Currently, it's somewhat like the overlay snapshotter, but I tend to separate the new EROFS logic into a self-contained component, rather than keeping it tangled in the very beginning. Existing users who use the overlay snapshotter won't be impacted at all but they have a chance to use this new snapshotter to leverage the EROFS filesystem. Signed-off-by: cardy.tang Signed-off-by: Gao Xiang --- cmd/containerd/builtins/builtins_linux.go | 1 + plugins/diff/erofs/differ_linux.go | 54 +- plugins/snapshots/erofs/erofs_linux.go | 538 ++++++++++++++++++ .../snapshots/erofs/erofsutils/mount_linux.go | 88 +++ .../snapshots/erofs/plugin/plugin_linux.go | 65 +++ 5 files changed, 695 insertions(+), 51 deletions(-) create mode 100644 plugins/snapshots/erofs/erofs_linux.go create mode 100644 plugins/snapshots/erofs/erofsutils/mount_linux.go create mode 100644 plugins/snapshots/erofs/plugin/plugin_linux.go diff --git a/cmd/containerd/builtins/builtins_linux.go b/cmd/containerd/builtins/builtins_linux.go index 02577bc4e870..19e762090bc6 100644 --- a/cmd/containerd/builtins/builtins_linux.go +++ b/cmd/containerd/builtins/builtins_linux.go @@ -23,6 +23,7 @@ import ( _ "github.com/containerd/containerd/v2/plugins/diff/erofs/plugin" _ "github.com/containerd/containerd/v2/plugins/diff/walking/plugin" _ "github.com/containerd/containerd/v2/plugins/snapshots/blockfile/plugin" + _ "github.com/containerd/containerd/v2/plugins/snapshots/erofs/plugin" _ "github.com/containerd/containerd/v2/plugins/snapshots/native/plugin" _ "github.com/containerd/containerd/v2/plugins/snapshots/overlay/plugin" ) diff --git a/plugins/diff/erofs/differ_linux.go b/plugins/diff/erofs/differ_linux.go index 5686435f0e62..52f9fa0bf247 100644 --- a/plugins/diff/erofs/differ_linux.go +++ b/plugins/diff/erofs/differ_linux.go @@ -20,17 +20,14 @@ import ( "context" "fmt" "io" - "os" - "os/exec" "path" - "path/filepath" - "strings" "time" "github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/core/diff" "github.com/containerd/containerd/v2/core/images" "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/containerd/v2/plugins/snapshots/erofs/erofsutils" "github.com/containerd/errdefs" "github.com/containerd/log" digest "github.com/opencontainers/go-digest" @@ -63,51 +60,6 @@ func (s erofsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts return emptyDesc, fmt.Errorf("erofsDiff does not implement Compare method: %w", errdefs.ErrNotImplemented) } -func convertTarErofs(ctx context.Context, r io.Reader, layerPath string, mkfsExtraOpts []string) error { - args := append([]string{"--tar=f", "--aufs", "--quiet", "-Enoinline_data"}, mkfsExtraOpts...) - args = append(args, layerPath) - cmd := exec.CommandContext(ctx, "mkfs.erofs", args...) - cmd.Stdin = r - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("erofs apply failed: %s: %w", out, err) - } - log.G(ctx).Debugf("running %s %s %v", cmd.Path, cmd.Args, string(out)) - return nil -} - -// Get the snapshot layer directory in order to generate EROFS-formatted blobs; -// -// If mount[0].Type is `bind` or `erofs`, it just tries the source dir; Or if -// mount[0].Type is `overlayfs`, it tries the parent of the upperdir; -// -// The candidate will be checked with ".erofslayer" to make sure this active -// snapshot is really generated by the EROFS snapshotter instead of others. -func mountsToLayer(mounts []mount.Mount) (string, error) { - var layer string - mnt := mounts[0] - if mnt.Type == "bind" || mnt.Type == "erofs" { - layer = filepath.Dir(mnt.Source) - } else if mnt.Type == "overlay" { - layer = "" - for _, o := range mnt.Options { - if strings.HasPrefix(o, "upperdir=") { - layer = filepath.Dir(strings.TrimPrefix(o, "upperdir=")) - } - } - if layer == "" { - return "", fmt.Errorf("unsupported overlay layer for erofs differ: %w", errdefs.ErrNotImplemented) - } - } else { - return "", fmt.Errorf("invalid filesystem type for erofs differ: %w", errdefs.ErrNotImplemented) - } - // If the layer is not prepared by the EROFS snapshotter, fall back to the next differ - if _, err := os.Stat(filepath.Join(layer, ".erofslayer")); err != nil { - return "", fmt.Errorf("mount layer type must be erofs-layer: %w", errdefs.ErrNotImplemented) - } - return layer, nil -} - func (s erofsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { t1 := time.Now() defer func() { @@ -132,7 +84,7 @@ func (s erofsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [] } } - layer, err := mountsToLayer(mounts) + layer, err := erofsutils.MountsToLayer(mounts) if err != nil { return emptyDesc, err } @@ -160,7 +112,7 @@ func (s erofsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [] } layerBlobPath := path.Join(layer, "layer.erofs") - err = convertTarErofs(ctx, rc, layerBlobPath, s.mkfsExtraOpts) + err = erofsutils.ConvertTarErofs(ctx, rc, layerBlobPath, s.mkfsExtraOpts) if err != nil { return emptyDesc, fmt.Errorf("failed to convert erofs: %w", err) } diff --git a/plugins/snapshots/erofs/erofs_linux.go b/plugins/snapshots/erofs/erofs_linux.go new file mode 100644 index 000000000000..224418e5951a --- /dev/null +++ b/plugins/snapshots/erofs/erofs_linux.go @@ -0,0 +1,538 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package erofs + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/containerd/v2/core/snapshots" + "github.com/containerd/containerd/v2/core/snapshots/storage" + "github.com/containerd/containerd/v2/plugins/snapshots/erofs/erofsutils" + "github.com/containerd/continuity/fs" + "github.com/containerd/log" + "github.com/containerd/plugin" + "golang.org/x/sys/unix" +) + +// SnapshotterConfig is used to configure the erofs snapshotter instance +type SnapshotterConfig struct { + // ovlOptions are the base options added to the overlayfs mount (defaults to [""]) + ovlOptions []string +} + +// Opt is an option to configure the erofs snapshotter +type Opt func(config *SnapshotterConfig) + +// WithOvlOptions defines the extra mount options for overlayfs +func WithOvlOptions(options []string) Opt { + return func(config *SnapshotterConfig) { + config.ovlOptions = options + } +} + +type MetaStore interface { + TransactionContext(ctx context.Context, writable bool) (context.Context, storage.Transactor, error) + WithTransaction(ctx context.Context, writable bool, fn storage.TransactionCallback) error + Close() error +} + +type snapshotter struct { + root string + ms *storage.MetaStore + ovlOptions []string +} + +// check if EROFS kernel filesystem is registered or not +func findErofs() bool { + fs, err := os.ReadFile("/proc/filesystems") + if err != nil { + return false + } + return bytes.Contains(fs, []byte("\terofs\n")) +} + +// we have to claim it as uint32, otherwise s390x CI will complain.. :( +const erofsSuperMagic = uint32(0xE0F5E1E2) + +// Check if a directory is actually an EROFS mount, which is used to setup or +// recover EROFS mounts for lowerdirs. +func isErofs(dir string) bool { + var st unix.Statfs_t + if err := unix.Statfs(dir, &st); err != nil { + return false + } + return uint32(st.Type) == erofsSuperMagic +} + +// NewSnapshotter returns a Snapshotter which uses EROFS+OverlayFS. The layers +// are stored under the provided root. A metadata file is stored under the root. +func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { + var config SnapshotterConfig + for _, opt := range opts { + opt(&config) + } + + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return nil, err + } + if !supportsDType { + return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + + if !findErofs() { + return nil, fmt.Errorf("EROFS unsupported, please `modprobe erofs`: %w", plugin.ErrSkipPlugin) + } + + ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + return &snapshotter{ + root: root, + ms: ms, + ovlOptions: config.ovlOptions, + }, nil +} + +// Close closes the snapshotter +func (s *snapshotter) Close() error { + return s.ms.Close() +} + +func (s *snapshotter) upperPath(id string) string { + return filepath.Join(s.root, "snapshots", id, "fs") +} + +func (s *snapshotter) workPath(id string) string { + return filepath.Join(s.root, "snapshots", id, "work") +} + +// A committed layer blob generated by the EROFS differ +func (s *snapshotter) layerBlobPath(id string) string { + return filepath.Join(s.root, "snapshots", id, "layer.erofs") +} + +func (s *snapshotter) lowerPath(id string) (mount.Mount, string, error) { + layerBlob := s.layerBlobPath(id) + if _, err := os.Stat(layerBlob); err != nil { + return mount.Mount{}, "", fmt.Errorf("failed to find valid erofs layer blob: %w", err) + } + + return mount.Mount{ + Source: layerBlob, + Type: "erofs", + Options: []string{"ro"}, + }, s.upperPath(id), nil +} + +func (s *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { + td, err := os.MkdirTemp(snapshotDir, "new-") + if err != nil { + return "", fmt.Errorf("failed to create temp dir: %w", err) + } + + if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { + return td, err + } + + if kind == snapshots.KindActive { + if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { + return td, err + } + } + // Create a special file for the EROFS differ to indicate it will be + // prepared as an EROFS layer by the EROFS snapshotter. + if err := os.WriteFile(filepath.Join(td, ".erofslayer"), []byte{}, 0644); err != nil { + return td, err + } + return td, nil +} + +func (s *snapshotter) mounts(snap storage.Snapshot, info snapshots.Info) ([]mount.Mount, error) { + var options []string + + if len(snap.ParentIDs) == 0 { + // If the EROFS layer blob is valid, only snapshots.KindView is allowed. + m, _, err := s.lowerPath(snap.ID) + if err == nil { + if snap.Kind != snapshots.KindView { + return nil, fmt.Errorf("only works for snapshots.KindView on a committed snapshot: %w", err) + } + // We have to force a loop device here since mount[] is static. + m.Options = append(m.Options, "loop") + return []mount.Mount{m}, nil + } + // if we only have one layer/no parents then just return a bind mount as overlay + // will not work + roFlag := "rw" + if snap.Kind == snapshots.KindView { + roFlag = "ro" + } + return []mount.Mount{ + { + Source: s.upperPath(snap.ID), + Type: "bind", + Options: append(options, + roFlag, + "rbind", + ), + }, + }, nil + } + + if snap.Kind == snapshots.KindActive { + options = append(options, + fmt.Sprintf("workdir=%s", s.workPath(snap.ID)), + fmt.Sprintf("upperdir=%s", s.upperPath(snap.ID)), + ) + } else if len(snap.ParentIDs) == 1 { + m, _, err := s.lowerPath(snap.ParentIDs[0]) + if err != nil { + return nil, err + } + return []mount.Mount{m}, nil + } + + var lowerdirs []string + for i := range snap.ParentIDs { + m, mntpoint, err := s.lowerPath(snap.ParentIDs[i]) + if err != nil { + return nil, err + } + + // If the lowerdir is actually an EROFS committed layer but + // doesn't have an EROFS mount. Let's recover now. + if mntpoint != m.Source && !isErofs(mntpoint) { + err := m.Mount(mntpoint) + // Use loop if the current kernel (6.12+) doesn't support file-backed mount + if err == unix.ENOTBLK { + m.Options = append(m.Options, "loop") + err = m.Mount(mntpoint) + } + if err != nil { + return nil, err + } + } + lowerdirs = append(lowerdirs, mntpoint) + } + options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(lowerdirs, ":"))) + options = append(options, s.ovlOptions...) + + return []mount.Mount{{ + Type: "overlay", + Source: "overlay", + Options: options, + }}, nil +} + +func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { + var ( + snap storage.Snapshot + td, path string + info snapshots.Info + ) + + defer func() { + if err != nil { + if td != "" { + if err1 := os.RemoveAll(td); err1 != nil { + log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory") + } + } + if path != "" { + if err1 := os.RemoveAll(path); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") + err = fmt.Errorf("failed to remove path: %v: %w", err1, err) + } + } + } + }() + + if err := s.ms.WithTransaction(ctx, true, func(ctx context.Context) (err error) { + snapshotDir := filepath.Join(s.root, "snapshots") + td, err = s.prepareDirectory(ctx, snapshotDir, kind) + if err != nil { + return fmt.Errorf("failed to create prepare snapshot dir: %w", err) + } + + snap, err = storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + return fmt.Errorf("failed to create snapshot: %w", err) + } + + _, info, _, err = storage.GetInfo(ctx, key) + if err != nil { + return fmt.Errorf("failed to get snapshot info: %w", err) + } + + if len(snap.ParentIDs) > 0 { + st, err := os.Stat(s.upperPath(snap.ParentIDs[0])) + if err != nil { + return fmt.Errorf("failed to stat parent: %w", err) + } + + stat := st.Sys().(*syscall.Stat_t) + if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { + return fmt.Errorf("failed to chown: %w", err) + } + } + + path = filepath.Join(snapshotDir, snap.ID) + if err = os.Rename(td, path); err != nil { + return fmt.Errorf("failed to rename: %w", err) + } + td = "" + + return nil + }); err != nil { + return nil, err + } + return s.mounts(snap, info) +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + var layerBlob, upperDir string + + // Apply the overlayfs upperdir (generated by non-EROFS differs) into a EROFS blob + // in a read transaction first since conversion could be slow. + err := s.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + // If the layer blob doesn't exist, which means this layer wasn't applied by + // the EROFS differ (possibly the walking differ), convert the upperdir instead. + layerBlob = s.layerBlobPath(id) + if _, err := os.Stat(layerBlob); err != nil { + upperDir = s.upperPath(id) + err = erofsutils.ConvertErofs(ctx, layerBlob, upperDir, nil) + if err != nil { + return err + } + + // Remove all sub-directories in the overlayfs upperdir. Leave the + // overlayfs upperdir itself since it's used for Lchown. + fd, err := os.Open(upperDir) + if err != nil { + return err + } + defer fd.Close() + + dirs, err := fd.Readdirnames(0) + if err != nil { + return err + } + + for _, d := range dirs { + dir := filepath.Join(upperDir, d) + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } + } + } + return nil + }) + + if err != nil { + return err + } + return s.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + if _, err := os.Stat(layerBlob); err != nil { + return fmt.Errorf("failed to get the converted erofs blob: %w", err) + } + + usage, err := fs.DiskUsage(ctx, layerBlob) + if err != nil { + return err + } + if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + return fmt.Errorf("failed to commit snapshot %s: %w", key, err) + } + return nil + }) +} + +func (s *snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, err error) { + var snap storage.Snapshot + var info snapshots.Info + if err := s.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + snap, err = storage.GetSnapshot(ctx, key) + if err != nil { + return fmt.Errorf("failed to get active mount: %w", err) + } + + _, info, _, err = storage.GetInfo(ctx, key) + if err != nil { + return fmt.Errorf("failed to get snapshot info: %w", err) + } + return nil + }); err != nil { + return nil, err + } + return s.mounts(snap, info) +} + +func (s *snapshotter) getCleanupDirectories(ctx context.Context) ([]string, error) { + ids, err := storage.IDMap(ctx) + if err != nil { + return nil, err + } + + snapshotDir := filepath.Join(s.root, "snapshots") + fd, err := os.Open(snapshotDir) + if err != nil { + return nil, err + } + defer fd.Close() + + dirs, err := fd.Readdirnames(0) + if err != nil { + return nil, err + } + + cleanup := []string{} + for _, d := range dirs { + if _, ok := ids[d]; ok { + continue + } + cleanup = append(cleanup, filepath.Join(snapshotDir, d)) + } + + return cleanup, nil +} + +// Remove abandons the snapshot identified by key. The snapshot will +// immediately become unavailable and unrecoverable. Disk space will +// be freed up on the next call to `Cleanup`. +func (s *snapshotter) Remove(ctx context.Context, key string) (err error) { + var removals []string + var id string + // Remove directories after the transaction is closed, failures must not + // return error since the transaction is committed with the removal + // key no longer available. + defer func() { + if err == nil { + if err := mount.UnmountAll(s.upperPath(id), 0); err != nil { + log.G(ctx).Warnf("failed to unmount EROFS mount for %v", id) + } + + for _, dir := range removals { + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } + } + } + }() + return s.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + id, _, err = storage.Remove(ctx, key) + if err != nil { + return fmt.Errorf("failed to remove snapshot %s: %w", key, err) + } + + removals, err = s.getCleanupDirectories(ctx) + if err != nil { + return fmt.Errorf("unable to get directories for removal: %w", err) + } + return nil + }) +} + +func (s *snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info, err error) { + err = s.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + _, info, _, err = storage.GetInfo(ctx, key) + return err + }) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (_ snapshots.Info, err error) { + err = s.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + info, err = storage.UpdateInfo(ctx, info, fieldpaths...) + return err + }) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (s *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + return s.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + return storage.WalkInfo(ctx, fn, fs...) + }) +} + +// Usage returns the resources taken by the snapshot identified by key. +// +// For active snapshots, this will scan the usage of the overlay "diff" (aka +// "upper") directory and may take some time. +// +// For committed snapshots, the value is returned from the metadata database. +func (s *snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, err error) { + var ( + usage snapshots.Usage + info snapshots.Info + id string + ) + if err := s.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + id, info, usage, err = storage.GetInfo(ctx, key) + return err + }); err != nil { + return usage, err + } + + if info.Kind == snapshots.KindActive { + upperPath := s.upperPath(id) + du, err := fs.DiskUsage(ctx, upperPath) + if err != nil { + // TODO(stevvooe): Consider not reporting an error in this case. + return snapshots.Usage{}, err + } + usage = snapshots.Usage(du) + } + return usage, nil +} diff --git a/plugins/snapshots/erofs/erofsutils/mount_linux.go b/plugins/snapshots/erofs/erofsutils/mount_linux.go new file mode 100644 index 000000000000..583c32a15f0a --- /dev/null +++ b/plugins/snapshots/erofs/erofsutils/mount_linux.go @@ -0,0 +1,88 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package erofsutils + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/errdefs" + "github.com/containerd/log" +) + +func ConvertTarErofs(ctx context.Context, r io.Reader, layerPath string, mkfsExtraOpts []string) error { + args := append([]string{"--tar=f", "--aufs", "--quiet", "-Enoinline_data"}, mkfsExtraOpts...) + args = append(args, layerPath) + cmd := exec.CommandContext(ctx, "mkfs.erofs", args...) + cmd.Stdin = r + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("erofs apply failed: %s: %w", out, err) + } + log.G(ctx).Infof("running %s %s %v", cmd.Path, cmd.Args, string(out)) + return nil +} + +func ConvertErofs(ctx context.Context, layerPath string, srcDir string, mkfsExtraOpts []string) error { + args := append([]string{"--quiet", "-Enoinline_data"}, mkfsExtraOpts...) + args = append(args, layerPath, srcDir) + cmd := exec.CommandContext(ctx, "mkfs.erofs", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("erofs apply failed: %s: %w", out, err) + } + log.G(ctx).Infof("running %s %s %v", cmd.Path, cmd.Args, string(out)) + return nil +} + +// Get the snapshot layer directory in order to generate EROFS-formatted blobs; +// +// If mount[0].Type is `bind` or `erofs`, it just tries the source dir; Or if +// mount[0].Type is `overlayfs`, it tries the parent of the upperdir; +// +// The candidate will be checked with ".erofslayer" to make sure this active +// snapshot is really generated by the EROFS snapshotter instead of others. +func MountsToLayer(mounts []mount.Mount) (string, error) { + var layer string + mnt := mounts[0] + if mnt.Type == "bind" || mnt.Type == "erofs" { + layer = filepath.Dir(mnt.Source) + } else if mnt.Type == "overlay" { + layer = "" + for _, o := range mnt.Options { + if strings.HasPrefix(o, "upperdir=") { + layer = filepath.Dir(strings.TrimPrefix(o, "upperdir=")) + } + } + if layer == "" { + return "", fmt.Errorf("unsupported overlay layer for erofs differ: %w", errdefs.ErrNotImplemented) + } + } else { + return "", fmt.Errorf("invalid filesystem type for erofs differ: %w", errdefs.ErrNotImplemented) + } + // If the layer is not prepared by the EROFS snapshotter, fall back to the next differ + if _, err := os.Stat(filepath.Join(layer, ".erofslayer")); err != nil { + return "", fmt.Errorf("mount layer type must be erofs-layer: %w", errdefs.ErrNotImplemented) + } + return layer, nil +} diff --git a/plugins/snapshots/erofs/plugin/plugin_linux.go b/plugins/snapshots/erofs/plugin/plugin_linux.go new file mode 100644 index 000000000000..8263d5dc1340 --- /dev/null +++ b/plugins/snapshots/erofs/plugin/plugin_linux.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "errors" + + "github.com/containerd/containerd/v2/plugins" + "github.com/containerd/containerd/v2/plugins/snapshots/erofs" + "github.com/containerd/platforms" + "github.com/containerd/plugin" + "github.com/containerd/plugin/registry" +) + +// Config represents configuration for the native plugin. +type Config struct { + // Root directory for the plugin + RootPath string `toml:"root_path"` + + // MountOptions are options used for the EROFS overlayfs mount + OvlOptions []string `toml:"ovl_mount_options"` +} + +func init() { + registry.Register(&plugin.Registration{ + Type: plugins.SnapshotPlugin, + ID: "erofs", + Config: &Config{}, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + + config, ok := ic.Config.(*Config) + if !ok { + return nil, errors.New("invalid erofs configuration") + } + + var opts []erofs.Opt + root := ic.Properties[plugins.PropertyRootDir] + if len(config.RootPath) != 0 { + root = config.RootPath + } + + if len(config.OvlOptions) > 0 { + opts = append(opts, erofs.WithOvlOptions(config.OvlOptions)) + } + + ic.Meta.Exports[plugins.SnapshotterRootDir] = root + return erofs.NewSnapshotter(root, opts...) + }, + }) +} From fd4caef7866306f9e654f54ba0209c7f4a554ad9 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 19 Sep 2024 10:55:10 +0800 Subject: [PATCH 17/46] Add EROFS snapshotter documentation Signed-off-by: Gao Xiang --- docs/snapshotters/README.md | 1 + docs/snapshotters/erofs.md | 143 ++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+) create mode 100644 docs/snapshotters/erofs.md diff --git a/docs/snapshotters/README.md b/docs/snapshotters/README.md index 96d143970f85..05bf326df9f4 100644 --- a/docs/snapshotters/README.md +++ b/docs/snapshotters/README.md @@ -17,6 +17,7 @@ Block-based: Filesystem-specific: - `btrfs`: btrfs. Needs the plugin root (`/var/lib/containerd/io.containerd.snapshotter.v1.btrfs`) to be mounted as btrfs. - `zfs`: ZFS. Needs the plugin root (`/var/lib/containerd/io.containerd.snapshotter.v1.zfs`) to be mounted as ZFS. See also https://github.com/containerd/zfs . +- `erofs`: EROFS. `OverlayFS` kernel module needs to be enabled for active snapshots. See also [`erofs.md`](./erofs.md). [Deprecated](https://github.com/containerd/containerd/blob/main/RELEASES.md#deprecated-features): - `aufs`: AUFS. Deprecated since containerd 1.5. Removed in containerd 2.0. See also https://github.com/containerd/aufs . diff --git a/docs/snapshotters/erofs.md b/docs/snapshotters/erofs.md new file mode 100644 index 000000000000..d6871bc13c7d --- /dev/null +++ b/docs/snapshotters/erofs.md @@ -0,0 +1,143 @@ +# EROFS Snapshotter + +The [EROFS](https://erofs.docs.kernel.org) snapshotter is an experimental +feature, which is able to leverage EROFS-formatted blobs for each committed +snapshot and prepares an EROFS + OverlayFS mount for each active snapshot. + +In order to leverage EROFS-formatted blobs, the EROFS differ is needed to be +used together to apply image layers. Otherwise, the EROFS snapshotter will +just behave as the existing OverlayFS snapshotter: the default applier will +unpack the image layer into the active EROFS snapshot, and commit it. + +Although it sounds somewhat similar to an enhanced OverlayFS snapshotter but +I believe there are clear differences if looking into `s.mount()` and it highly +tightens to the EROFS internals. Currently, it's not quite clear to form an +enhanced OverlayFS snapshotter directly, and (I think) it's not urgent since +in the very beginning, it'd be better to be left as an independent snapshotter +so that existing overlayfs users won't be impacted by the new behaviors and +users could have a chance to try and develop the related ecosystems (such as +ComposeFS, confidential containers, gVisor, Kata, gVisor, and more) together. + +## Use Cases + +The EROFS snapshotter can benefit to several use cases: + +For runC containers, instead of unpacking individual files into a directory +on the backing filesystem, it applies OCI layers into EROFS blobs, therefore: + + - Improved image unpacking performance (~14% for WordPress image with the + latest erofs-utils 1.8.2) due to reduced metadata overhead; + + - Full data protection for each snapshot using the S_IMMUTABLE file attribute + or fsverity. Currently, fsverity can only protect blob data in the content + store; + + - Parallel unpacking can be supported in a more reliable way (fsync) compared + to the overlayfs snapshotter (syncfs); + + - Native EROFS layers can be pulled from registries without conversion. + +For VM containers, the EROFS snapshotter can efficiently pass through and share +image layers, offering several advantages (e.g. better performance and smaller +memory footprints) over [virtiofs](https://virtio-fs.gitlab.io) or +[9p](https://www.kernel.org/doc/Documentation/filesystems/9p.txt). Besides, +the popular application kernel [gVisor](https://gvisor.dev/) also supports +[EROFS](https://github.com/google/gvisor/pull/9486) for efficient image +pass-through. + +## Usage + +### Checking if the EROFS snapshotter and differ are available + +To check if the EROFS snapshotter is available, run the following command: + +```bash +$ ctr plugins ls | grep erofs +``` + +The following message will be shown like below: +``` +io.containerd.snapshotter.v1 erofs linux/amd64 ok +io.containerd.differ.v1 erofs linux/amd64 ok +``` + +### Ensure that EROFS is available + +On newer Ubuntu/Debian systems, it can be installed directly using the apt +command, and on Fedora it can be installed directly using the dnf command. + +# Debian/Ubuntu +$ apt install erofs-utils +# Fedora +$ dnf install erofs-utils + +Make sure that erofs-utils version is 1.7 or higher. + +Before using EROFS snapshotter, also make sure the _EROFS kernel module_ is +loaded: it can be loaded with `modprobe erofs`. + +### Configuration + +The following configuration can be used in your containerd `config.toml`. Don't +forget to restart containerd after changing the configuration. + +``` + [plugins."io.containerd.service.v1.diff-service"] + default = ["erofs","walking"] +``` + +### Running a container + +To run a container using the EROFS snapshotter, it needs to be explicitly +specified: + +```bash +$ # ensure that the image we are using exists; it is a regular OCI image +$ ctr image pull docker.io/library/busybox:latest +$ # run the container with the provides snapshotter +$ ctr run -rm -t --snapshotter erofs docker.io/library/busybox:latest hello sh +``` + +## How It Works + +For each layer, the EROFS snapshotter prepares a directory containing the +following items: + +``` + .erofslayer + fs + work +``` + +`.erofslayer` file is used to indicate that the layer is prepared by the EROFS +snapshotter. + +If the EROFS differ is also enabled, the differ will check for the existence +of `.erofslayer` and convert the image content blob (e.g., an OCI layer) into +an EROFS layer blob. + +In this case, the snapshot layer directory will look like this: +``` + .erofslayer + fs + layer.erofs + work +``` + +Then the EROFS snapshotter will check for the existence of `layer.erofs`: it +will mount the EROFS layer blob to `fs/` and return a valid overlayfs mount +with all parent layers. + +If other differs (not the EROFS differ) are used, the EROFS snapshotter will +convert the flat directory into an EROFS layer blob on Commit instead. + +In other words, the EROFS differ can only be used with the EROFS snapshotter; +otherwise, it will skip to the next differ. The EROFS snapshotter can work +with or without the EROFS differ. + + +## TODO + +The EROFS Fsmerge feature is NOT supported in the current implementation +because it was somewhat unclean (relying on `containerd.io/snapshot.ref`). +It needs to be reconsidered later. From 2f15d6586b261d0f0bc68b847660dc2b691169db Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 17 Dec 2024 16:54:51 +0800 Subject: [PATCH 18/46] Add tests for EROFS snapshotter Some basic tests for now. Signed-off-by: Gao Xiang --- plugins/snapshots/erofs/erofs_linux_test.go | 53 +++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 plugins/snapshots/erofs/erofs_linux_test.go diff --git a/plugins/snapshots/erofs/erofs_linux_test.go b/plugins/snapshots/erofs/erofs_linux_test.go new file mode 100644 index 000000000000..5e66274b43d8 --- /dev/null +++ b/plugins/snapshots/erofs/erofs_linux_test.go @@ -0,0 +1,53 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package erofs + +import ( + "context" + "os/exec" + "testing" + + "github.com/containerd/containerd/v2/core/snapshots" + "github.com/containerd/containerd/v2/core/snapshots/testsuite" + "github.com/containerd/containerd/v2/pkg/testutil" +) + +func newSnapshotter(t *testing.T) func(ctx context.Context, root string) (snapshots.Snapshotter, func() error, error) { + _, err := exec.LookPath("mkfs.erofs") + if err != nil { + t.Skipf("could not find mkfs.erofs: %v", err) + } + + if !findErofs() { + t.Skip("check for erofs kernel support failed, skipping test") + } + return func(ctx context.Context, root string) (snapshots.Snapshotter, func() error, error) { + var opts []Opt + + snapshotter, err := NewSnapshotter(root, opts...) + if err != nil { + return nil, nil, err + } + + return snapshotter, func() error { return snapshotter.Close() }, nil + } +} + +func TestErofs(t *testing.T) { + testutil.RequiresRoot(t) + testsuite.SnapshotterSuite(t, "erofs", newSnapshotter(t)) +} From 6148dbdd778942f7b1f5361d3e18859ada70f4d6 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 13 Jan 2025 08:18:55 -0800 Subject: [PATCH 19/46] Update platforms to latest rc Signed-off-by: Derek McGowan --- go.mod | 2 +- go.sum | 4 +- .../containerd/platforms/compare.go | 83 +++++++++--------- .../containerd/platforms/database.go | 19 +---- .../containerd/platforms/defaults_windows.go | 76 ----------------- ..._windows.go => platform_windows_compat.go} | 84 ++++++++++++++++++- .../containerd/platforms/platforms.go | 37 +++++++- .../containerd/platforms/platforms_other.go | 30 ------- .../containerd/platforms/platforms_windows.go | 34 -------- vendor/modules.txt | 2 +- 10 files changed, 163 insertions(+), 208 deletions(-) rename vendor/github.com/containerd/platforms/{platform_compat_windows.go => platform_windows_compat.go} (58%) delete mode 100644 vendor/github.com/containerd/platforms/platforms_other.go delete mode 100644 vendor/github.com/containerd/platforms/platforms_windows.go diff --git a/go.mod b/go.mod index fbe1a7551697..7beff6597362 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/containerd/log v0.1.0 github.com/containerd/nri v0.8.0 github.com/containerd/otelttrpc v0.1.0 - github.com/containerd/platforms v1.0.0-rc.0 + github.com/containerd/platforms v1.0.0-rc.1 github.com/containerd/plugin v1.0.0 github.com/containerd/ttrpc v1.2.7 github.com/containerd/typeurl/v2 v2.2.3 diff --git a/go.sum b/go.sum index c78c9f58cecf..4af444e11ac8 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,8 @@ github.com/containerd/nri v0.8.0 h1:n1S753B9lX8RFrHYeSgwVvS1yaUcHjxbB+f+xzEncRI= github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ= github.com/containerd/otelttrpc v0.1.0 h1:UOX68eVTE8H/T45JveIg+I22Ev2aFj4qPITCmXsskjw= github.com/containerd/otelttrpc v0.1.0/go.mod h1:XhoA2VvaGPW1clB2ULwrBZfXVuEWuyOd2NUD1IM0yTg= -github.com/containerd/platforms v1.0.0-rc.0 h1:GuHWSKgVVO3POn6nRBB4sH63uPOLa87yuuhsGLWaXAA= -github.com/containerd/platforms v1.0.0-rc.0/go.mod h1:T1XAzzOdYs3it7l073MNXyxRwQofJfqwi/8cRjufIk4= +github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= +github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= github.com/containerd/plugin v1.0.0/go.mod h1:hQfJe5nmWfImiqT1q8Si3jLv3ynMUIBB47bQ+KexvO8= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go index 0abe54140f94..24403f3b3d45 100644 --- a/vendor/github.com/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/platforms/compare.go @@ -31,6 +31,34 @@ type MatchComparer interface { Less(specs.Platform, specs.Platform) bool } +type platformVersions struct { + major []int + minor []int +} + +var arm64variantToVersion = map[string]platformVersions{ + "v8": {[]int{8}, []int{0}}, + "v8.0": {[]int{8}, []int{0}}, + "v8.1": {[]int{8}, []int{1}}, + "v8.2": {[]int{8}, []int{2}}, + "v8.3": {[]int{8}, []int{3}}, + "v8.4": {[]int{8}, []int{4}}, + "v8.5": {[]int{8}, []int{5}}, + "v8.6": {[]int{8}, []int{6}}, + "v8.7": {[]int{8}, []int{7}}, + "v8.8": {[]int{8}, []int{8}}, + "v8.9": {[]int{8}, []int{9}}, + "v9": {[]int{9, 8}, []int{0, 5}}, + "v9.0": {[]int{9, 8}, []int{0, 5}}, + "v9.1": {[]int{9, 8}, []int{1, 6}}, + "v9.2": {[]int{9, 8}, []int{2, 7}}, + "v9.3": {[]int{9, 8}, []int{3, 8}}, + "v9.4": {[]int{9, 8}, []int{4, 9}}, + "v9.5": {[]int{9, 8}, []int{5, 9}}, + "v9.6": {[]int{9, 8}, []int{6, 9}}, + "v9.7": {[]int{9, 8}, []int{7, 9}}, +} + // platformVector returns an (ordered) vector of appropriate specs.Platform // objects to try matching for the given platform object (see platforms.Only). func platformVector(platform specs.Platform) []specs.Platform { @@ -73,52 +101,19 @@ func platformVector(platform specs.Platform) []specs.Platform { variant = "v8" } - majorVariant, minorVariant, hasMinor := strings.Cut(variant, ".") - if armMajor, err := strconv.Atoi(strings.TrimPrefix(majorVariant, "v")); err == nil && armMajor >= 8 { - armMinor := 0 - if len(variant) == 4 { - if minor, err := strconv.Atoi(minorVariant); err == nil && hasMinor { - armMinor = minor - } - } - - if armMajor == 9 { - for minor := armMinor - 1; minor >= 0; minor-- { - arm64Variant := "v" + strconv.Itoa(armMajor) + "." + strconv.Itoa(minor) - if minor == 0 { - arm64Variant = "v" + strconv.Itoa(armMajor) - } - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: arm64Variant, - }) - } - - // v9.0 diverged from v8.5, meaning that v9.x is compatible with v8.{x+5} until v9.4/v8.9 - armMinor = armMinor + 5 - if armMinor > 9 { - armMinor = 9 - } - armMajor = 8 - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v8." + strconv.Itoa(armMinor), - }) - } - - for minor := armMinor - 1; minor >= 0; minor-- { - arm64Variant := "v" + strconv.Itoa(armMajor) + "." + strconv.Itoa(minor) + vector = []specs.Platform{} // Reset vector, the first variant will be added in loop. + arm64Versions, ok := arm64variantToVersion[variant] + if !ok { + break + } + for i, major := range arm64Versions.major { + for minor := arm64Versions.minor[i]; minor >= 0; minor-- { + arm64Variant := "v" + strconv.Itoa(major) + "." + strconv.Itoa(minor) if minor == 0 { - arm64Variant = "v" + strconv.Itoa(armMajor) + arm64Variant = "v" + strconv.Itoa(major) } vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, + Architecture: "arm64", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, @@ -129,7 +124,7 @@ func platformVector(platform specs.Platform) []specs.Platform { // All arm64/v8.x and arm64/v9.x are compatible with arm/v8 (32-bits) and below. // There's no arm64 v9 variant, so it's normalized to v8. - if strings.HasPrefix(variant, "v8.") || strings.HasPrefix(variant, "v9.") { + if strings.HasPrefix(variant, "v8") || strings.HasPrefix(variant, "v9") { variant = "v8" } vector = append(vector, platformVector(specs.Platform{ diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go index 4c7c669605a7..7a6f0d98cdda 100644 --- a/vendor/github.com/containerd/platforms/database.go +++ b/vendor/github.com/containerd/platforms/database.go @@ -86,22 +86,11 @@ func normalizeArch(arch, variant string) (string, string) { } case "aarch64", "arm64": arch = "arm64" - majorVariant, minorVariant, hasMinor := strings.Cut(variant, ".") - majorVariant = strings.TrimPrefix(majorVariant, "v") - if minorVariant == "0" { - minorVariant = "" - hasMinor = false - } - - if (majorVariant == "" || majorVariant == "8") && !hasMinor { - // normalize v8 to empty string + switch variant { + case "8", "v8", "v8.0": variant = "" - } else { - // otherwise to v8.x or v9 or v9.x - variant = "v" + majorVariant - if hasMinor { - variant = variant + "." + minorVariant - } + case "9", "9.0", "v9.0": + variant = "v9" } case "armhf": arch = "arm" diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go index 8bae4eb003bf..0165adea7e41 100644 --- a/vendor/github.com/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/platforms/defaults_windows.go @@ -19,8 +19,6 @@ package platforms import ( "fmt" "runtime" - "strconv" - "strings" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" @@ -38,80 +36,6 @@ func DefaultSpec() specs.Platform { } } -type windowsmatcher struct { - specs.Platform - osVersionPrefix string - defaultMatcher Matcher -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m windowsmatcher) Match(p specs.Platform) bool { - match := m.defaultMatcher.Match(p) - - if match && m.OS == "windows" { - // HPC containers do not have OS version filled - if m.OSVersion == "" || p.OSVersion == "" { - return true - } - - hostOsVersion := getOSVersion(m.osVersionPrefix) - ctrOsVersion := getOSVersion(p.OSVersion) - return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion) - } - - return match -} - -func getOSVersion(osVersionPrefix string) osVersion { - parts := strings.Split(osVersionPrefix, ".") - if len(parts) < 3 { - return osVersion{} - } - - majorVersion, _ := strconv.ParseUint(parts[0], 10, 8) - minorVersion, _ := strconv.ParseUint(parts[1], 10, 8) - buildNumber, _ := strconv.ParseUint(parts[2], 10, 16) - - return osVersion{ - MajorVersion: uint8(majorVersion), - MinorVersion: uint8(minorVersion), - Build: uint16(buildNumber), - } -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -func prefix(v string) string { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return v - } - return strings.Join(parts[0:3], ".") -} - // Default returns the current platform's default platform specification. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_windows_compat.go similarity index 58% rename from vendor/github.com/containerd/platforms/platform_compat_windows.go rename to vendor/github.com/containerd/platforms/platform_windows_compat.go index 89e66f0c0903..7f3d9966bcf0 100644 --- a/vendor/github.com/containerd/platforms/platform_compat_windows.go +++ b/vendor/github.com/containerd/platforms/platform_windows_compat.go @@ -16,9 +16,16 @@ package platforms -// osVersion is a wrapper for Windows version information +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// windowsOSVersion is a wrapper for Windows version information // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type osVersion struct { +type windowsOSVersion struct { Version uint32 MajorVersion uint8 MinorVersion uint8 @@ -55,7 +62,7 @@ var compatLTSCReleases = []uint16{ // Every release after WS 2022 will support the previous ltsc // container image. Stable ABI is in preview mode for windows 11 client. // Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility -func checkHostAndContainerCompat(host, ctr osVersion) bool { +func checkWindowsHostAndContainerCompat(host, ctr windowsOSVersion) bool { // check major minor versions of host and guest if host.MajorVersion != ctr.MajorVersion || host.MinorVersion != ctr.MinorVersion { @@ -76,3 +83,74 @@ func checkHostAndContainerCompat(host, ctr osVersion) bool { } return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build } + +func getWindowsOSVersion(osVersionPrefix string) windowsOSVersion { + if strings.Count(osVersionPrefix, ".") < 2 { + return windowsOSVersion{} + } + + major, extra, _ := strings.Cut(osVersionPrefix, ".") + minor, extra, _ := strings.Cut(extra, ".") + build, _, _ := strings.Cut(extra, ".") + + majorVersion, err := strconv.ParseUint(major, 10, 8) + if err != nil { + return windowsOSVersion{} + } + + minorVersion, err := strconv.ParseUint(minor, 10, 8) + if err != nil { + return windowsOSVersion{} + } + buildNumber, err := strconv.ParseUint(build, 10, 16) + if err != nil { + return windowsOSVersion{} + } + + return windowsOSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + +func winRevision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +type windowsVersionMatcher struct { + windowsOSVersion +} + +func (m windowsVersionMatcher) Match(v string) bool { + if m.isEmpty() || v == "" { + return true + } + osv := getWindowsOSVersion(v) + return checkWindowsHostAndContainerCompat(m.windowsOSVersion, osv) +} + +func (m windowsVersionMatcher) isEmpty() bool { + return m.MajorVersion == 0 && m.MinorVersion == 0 && m.Build == 0 +} + +type windowsMatchComparer struct { + Matcher +} + +func (c *windowsMatchComparer) Less(p1, p2 specs.Platform) bool { + m1, m2 := c.Match(p1), c.Match(p2) + if m1 && m2 { + r1, r2 := winRevision(p1.OSVersion), winRevision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go index 7a84449c0cf3..14d65abd4f86 100644 --- a/vendor/github.com/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/platforms/platforms.go @@ -144,18 +144,51 @@ type Matcher interface { // // Applications should opt to use `Match` over directly parsing specifiers. func NewMatcher(platform specs.Platform) Matcher { - return newDefaultMatcher(platform) + m := &matcher{ + Platform: Normalize(platform), + } + + if platform.OS == "windows" { + m.osvM = &windowsVersionMatcher{ + windowsOSVersion: getWindowsOSVersion(platform.OSVersion), + } + // In prior versions, on windows, the returned matcher implements a + // MatchComprarer interface. + // This preserves that behavior for backwards compatibility. + // + // TODO: This isn't actually used in this package, except for a test case, + // which may have been an unintended side of some refactor. + // It was likely intended to be used in `Ordered` but it is not since + // `Less` that is implemented here ends up getting masked due to wrapping. + if runtime.GOOS == "windows" { + return &windowsMatchComparer{m} + } + } + return m +} + +type osVerMatcher interface { + Match(string) bool } type matcher struct { specs.Platform + osvM osVerMatcher } func (m *matcher) Match(platform specs.Platform) bool { normalized := Normalize(platform) return m.OS == normalized.OS && m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant + m.Variant == normalized.Variant && + m.matchOSVersion(platform) +} + +func (m *matcher) matchOSVersion(platform specs.Platform) bool { + if m.osvM != nil { + return m.osvM.Match(platform.OSVersion) + } + return true } func (m *matcher) String() string { diff --git a/vendor/github.com/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go deleted file mode 100644 index 03f4dcd99814..000000000000 --- a/vendor/github.com/containerd/platforms/platforms_other.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// NewMatcher returns the default Matcher for containerd -func newDefaultMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} diff --git a/vendor/github.com/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go deleted file mode 100644 index 950e2a2ddbb5..000000000000 --- a/vendor/github.com/containerd/platforms/platforms_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// NewMatcher returns a Windows matcher that will match on osVersionPrefix if -// the platform is Windows otherwise use the default matcher -func newDefaultMatcher(platform specs.Platform) Matcher { - prefix := prefix(platform.OSVersion) - return windowsmatcher{ - Platform: platform, - osVersionPrefix: prefix, - defaultMatcher: &matcher{ - Platform: Normalize(platform), - }, - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 88a3059e3eae..60a9f71a1fc3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -182,7 +182,7 @@ github.com/containerd/nri/types/v1 ## explicit; go 1.21 github.com/containerd/otelttrpc github.com/containerd/otelttrpc/internal -# github.com/containerd/platforms v1.0.0-rc.0 +# github.com/containerd/platforms v1.0.0-rc.1 ## explicit; go 1.20 github.com/containerd/platforms # github.com/containerd/plugin v1.0.0 From b49df6af11dbf7e4fc715e972c8e816edcb02309 Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Sat, 11 Jan 2025 17:41:43 +0000 Subject: [PATCH 20/46] move FuzzCRIServer to go native fuzz Signed-off-by: Jin Dong --- contrib/fuzz/containerd_import_fuzz_test.go | 2 +- ...{cri_fuzzer.go => cri_server_fuzz_test.go} | 251 ++++++++---------- contrib/fuzz/cri_server_fuzzer.go | 92 ------- internal/cri/server/fuzz.go | 33 --- 4 files changed, 107 insertions(+), 271 deletions(-) rename contrib/fuzz/{cri_fuzzer.go => cri_server_fuzz_test.go} (75%) delete mode 100644 contrib/fuzz/cri_server_fuzzer.go delete mode 100644 internal/cri/server/fuzz.go diff --git a/contrib/fuzz/containerd_import_fuzz_test.go b/contrib/fuzz/containerd_import_fuzz_test.go index abdd1496c946..a15221a14029 100644 --- a/contrib/fuzz/containerd_import_fuzz_test.go +++ b/contrib/fuzz/containerd_import_fuzz_test.go @@ -41,7 +41,7 @@ func FuzzContainerdImport(f *testing.F) { client, err := containerd.New(defaultAddress) if err != nil { - return + t.Fatal(err) } defer client.Close() diff --git a/contrib/fuzz/cri_fuzzer.go b/contrib/fuzz/cri_server_fuzz_test.go similarity index 75% rename from contrib/fuzz/cri_fuzzer.go rename to contrib/fuzz/cri_server_fuzz_test.go index ff67439b4d5a..8f9e0941f280 100644 --- a/contrib/fuzz/cri_fuzzer.go +++ b/contrib/fuzz/cri_server_fuzz_test.go @@ -1,5 +1,3 @@ -//go:build gofuzz - /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,46 +16,93 @@ package fuzz import ( "context" "fmt" + "os" golangruntime "runtime" "strings" + "testing" fuzz "github.com/AdaLogics/go-fuzz-headers" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1" - + containerd "github.com/containerd/containerd/v2/client" + criconfig "github.com/containerd/containerd/v2/internal/cri/config" + "github.com/containerd/containerd/v2/internal/cri/instrument" "github.com/containerd/containerd/v2/internal/cri/server" - sandboxstore "github.com/containerd/containerd/v2/internal/cri/store/sandbox" + "github.com/containerd/containerd/v2/internal/cri/server/images" + "github.com/containerd/containerd/v2/pkg/oci" + "github.com/containerd/errdefs" + "google.golang.org/grpc" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) -var ( - // The APIs the fuzzer can call: - ops = map[int]string{ - 0: "createContainer", - 1: "removeContainer", - 2: "addSandboxes", - 3: "listContainers", - 4: "startContainer", - 5: "containerStats", - 6: "listContainerStats", - 7: "containerStatus", - 8: "stopContainer", - 9: "updateContainerResources", - 10: "listImages", - 11: "removeImages", - 12: "imageStatus", - 13: "imageFsInfo", - 14: "listPodSandbox", - 15: "portForward", - 16: "removePodSandbox", - 17: "runPodSandbox", - 18: "podSandboxStatus", - 19: "stopPodSandbox", - 20: "status", - 21: "updateRuntimeConfig", +func FuzzCRIServer(f *testing.F) { + if os.Getuid() != 0 { + f.Skip("skipping fuzz test that requires root") } + + f.Fuzz(func(t *testing.T, data []byte) { + initDaemon.Do(startDaemon) + + f := fuzz.NewConsumer(data) + + client, err := containerd.New(defaultAddress) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + imageConfig := criconfig.ImageConfig{} + + imageService, err := images.NewService(imageConfig, &images.CRIImageServiceOptions{ + Client: client, + }) + if err != nil { + t.Fatal(err) + } + + c, rs, err := server.NewCRIService(&server.CRIServiceOptions{ + RuntimeService: &fakeRuntimeService{}, + ImageService: imageService, + Client: client, + }) + if err != nil { + t.Fatal(err) + } + + fuzzCRI(t, f, &service{ + CRIService: c, + RuntimeServiceServer: rs, + ImageServiceServer: imageService.GRPCService(), + }) + }) +} + +type fakeRuntimeService struct{} + +func (fakeRuntimeService) Config() criconfig.Config { + return criconfig.Config{} +} + +func (fakeRuntimeService) LoadOCISpec(string) (*oci.Spec, error) { + return nil, errdefs.ErrNotFound +} + +type service struct { + server.CRIService + runtime.RuntimeServiceServer + runtime.ImageServiceServer +} + +func (c *service) Register(s *grpc.Server) error { + instrumented := instrument.NewService(c) + runtime.RegisterRuntimeServiceServer(s, instrumented) + runtime.RegisterImageServiceServer(s, instrumented) + return nil +} + +var ( executionOrder []string ) -func printExecutions() { +func printExecutions(t *testing.T) { if r := recover(); r != nil { var err string switch res := r.(type) { @@ -70,11 +115,11 @@ func printExecutions() { default: err = "uknown error type" } - fmt.Println("Executions:") + t.Log("Executions:") for _, eo := range executionOrder { - fmt.Println(eo) + t.Log(eo) } - panic(err) + t.Fatal(err) } } @@ -84,68 +129,45 @@ type fuzzCRIService interface { runtime.ImageServiceServer } -func fuzzCRI(f *fuzz.ConsumeFuzzer, c fuzzCRIService) int { +func fuzzCRI(t *testing.T, f *fuzz.ConsumeFuzzer, c fuzzCRIService) int { + ops := []func(c fuzzCRIService, f *fuzz.ConsumeFuzzer) error{ + createContainerFuzz, + removeContainerFuzz, + listContainersFuzz, + startContainerFuzz, + containerStatsFuzz, + listContainerStatsFuzz, + containerStatusFuzz, + stopContainerFuzz, + updateContainerResourcesFuzz, + listImagesFuzz, + removeImagesFuzz, + imageStatusFuzz, + imageFsInfoFuzz, + listPodSandboxFuzz, + portForwardFuzz, + removePodSandboxFuzz, + runPodSandboxFuzz, + podSandboxStatusFuzz, + stopPodSandboxFuzz, + statusFuzz, + updateRuntimeConfigFuzz, + } + calls, err := f.GetInt() if err != nil { return 0 } executionOrder = make([]string, 0) - defer printExecutions() + defer printExecutions(t) for i := 0; i < calls%40; i++ { op, err := f.GetInt() if err != nil { return 0 } - opType := op % len(ops) - - switch ops[opType] { - case "createContainer": - createContainerFuzz(c, f) - case "removeContainer": - removeContainerFuzz(c, f) - case "addSandboxes": - addSandboxesFuzz(c, f) - case "listContainers": - listContainersFuzz(c, f) - case "startContainer": - startContainerFuzz(c, f) - case "containerStats": - containerStatsFuzz(c, f) - case "listContainerStats": - listContainerStatsFuzz(c, f) - case "containerStatus": - containerStatusFuzz(c, f) - case "stopContainer": - stopContainerFuzz(c, f) - case "updateContainerResources": - updateContainerResourcesFuzz(c, f) - case "listImages": - listImagesFuzz(c, f) - case "removeImages": - removeImagesFuzz(c, f) - case "imageStatus": - imageStatusFuzz(c, f) - case "imageFsInfo": - imageFsInfoFuzz(c, f) - case "listPodSandbox": - listPodSandboxFuzz(c, f) - case "portForward": - portForwardFuzz(c, f) - case "removePodSandbox": - removePodSandboxFuzz(c, f) - case "runPodSandbox": - runPodSandboxFuzz(c, f) - case "podSandboxStatus": - podSandboxStatusFuzz(c, f) - case "stopPodSandbox": - stopPodSandboxFuzz(c, f) - case "status": - statusFuzz(c, f) - case "updateRuntimeConfig": - updateRuntimeConfigFuzz(c, f) - } + ops[op%len(ops)](c, f) } return 1 } @@ -184,67 +206,6 @@ func removeContainerFuzz(c fuzzCRIService, f *fuzz.ConsumeFuzzer) error { return nil } -func sandboxStore(cs fuzzCRIService) (*sandboxstore.Store, error) { - var ( - ss *sandboxstore.Store - err error - ) - - ss, err = server.SandboxStore(cs) - if err != nil { - ss, err = server.SandboxStore(cs) - if err != nil { - return nil, err - } - return ss, nil - } - return ss, nil -} - -// addSandboxesFuzz creates a sandbox and adds it to the sandboxstore -func addSandboxesFuzz(c fuzzCRIService, f *fuzz.ConsumeFuzzer) error { - quantity, err := f.GetInt() - if err != nil { - return err - } - - ss, err := sandboxStore(c) - if err != nil { - return err - } - - for i := 0; i < quantity%20; i++ { - newSandbox, err := getSandboxFuzz(f) - if err != nil { - return err - } - err = ss.Add(newSandbox) - if err != nil { - return err - } - } - return nil -} - -// getSandboxFuzz creates a sandbox -func getSandboxFuzz(f *fuzz.ConsumeFuzzer) (sandboxstore.Sandbox, error) { - metadata := sandboxstore.Metadata{} - status := sandboxstore.Status{} - err := f.GenerateStruct(&metadata) - if err != nil { - return sandboxstore.Sandbox{}, err - } - err = f.GenerateStruct(&status) - if err != nil { - return sandboxstore.Sandbox{}, err - } - - reqString := fmt.Sprintf("metadata: %+v\nstatus: %+v\n", metadata, status) - logExecution("sandboxstore.NewSandbox", reqString) - - return sandboxstore.NewSandbox(metadata, status), nil -} - // listContainersFuzz creates a ListContainersRequest and passes // it to c.ListContainers func listContainersFuzz(c fuzzCRIService, f *fuzz.ConsumeFuzzer) error { diff --git a/contrib/fuzz/cri_server_fuzzer.go b/contrib/fuzz/cri_server_fuzzer.go deleted file mode 100644 index 3eaaa514b55a..000000000000 --- a/contrib/fuzz/cri_server_fuzzer.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fuzz - -import ( - fuzz "github.com/AdaLogics/go-fuzz-headers" - "google.golang.org/grpc" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1" - - containerd "github.com/containerd/containerd/v2/client" - criconfig "github.com/containerd/containerd/v2/internal/cri/config" - "github.com/containerd/containerd/v2/internal/cri/instrument" - "github.com/containerd/containerd/v2/internal/cri/server" - "github.com/containerd/containerd/v2/internal/cri/server/images" - "github.com/containerd/containerd/v2/pkg/oci" - "github.com/containerd/errdefs" -) - -func FuzzCRIServer(data []byte) int { - initDaemon.Do(startDaemon) - - f := fuzz.NewConsumer(data) - - client, err := containerd.New(defaultAddress) - if err != nil { - return 0 - } - defer client.Close() - - imageConfig := criconfig.ImageConfig{} - - imageService, err := images.NewService(imageConfig, &images.CRIImageServiceOptions{ - Client: client, - }) - if err != nil { - panic(err) - } - - c, rs, err := server.NewCRIService(&server.CRIServiceOptions{ - RuntimeService: &fakeRuntimeService{}, - ImageService: imageService, - Client: client, - }) - if err != nil { - panic(err) - } - - return fuzzCRI(f, &service{ - CRIService: c, - RuntimeServiceServer: rs, - ImageServiceServer: imageService.GRPCService(), - }) -} - -type fakeRuntimeService struct{} - -func (fakeRuntimeService) Config() criconfig.Config { - return criconfig.Config{} -} - -func (fakeRuntimeService) LoadOCISpec(string) (*oci.Spec, error) { - return nil, errdefs.ErrNotFound -} - -type service struct { - server.CRIService - runtime.RuntimeServiceServer - runtime.ImageServiceServer -} - -func (c *service) Register(s *grpc.Server) error { - instrumented := instrument.NewService(c) - runtime.RegisterRuntimeServiceServer(s, instrumented) - runtime.RegisterImageServiceServer(s, instrumented) - return nil -} diff --git a/internal/cri/server/fuzz.go b/internal/cri/server/fuzz.go deleted file mode 100644 index d217d0ec760a..000000000000 --- a/internal/cri/server/fuzz.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package server - -import ( - "fmt" - - "github.com/containerd/containerd/v2/internal/cri/store/sandbox" -) - -func SandboxStore(cs CRIService) (*sandbox.Store, error) { - s, ok := cs.(*criService) - if !ok { - return nil, fmt.Errorf("%+v is not sbserver.criService", cs) - } - return s.sandboxStore, nil -} From 740c5d4284de1704ffab91bf03967346ae7d29a9 Mon Sep 17 00:00:00 2001 From: luchenhan Date: Tue, 14 Jan 2025 11:36:35 +0800 Subject: [PATCH 21/46] docs: fix some function names in comment Signed-off-by: luchenhan --- cmd/ctr/commands/run/run.go | 2 +- contrib/apparmor/template.go | 2 +- contrib/fuzz/content_fuzz_test.go | 2 +- internal/cri/util/util.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/ctr/commands/run/run.go b/cmd/ctr/commands/run/run.go index e104c9577126..365f7774631e 100644 --- a/cmd/ctr/commands/run/run.go +++ b/cmd/ctr/commands/run/run.go @@ -269,7 +269,7 @@ var Command = &cli.Command{ }, } -// buildLabel builds the labels from command line labels and the image labels +// buildLabels builds the labels from command line labels and the image labels func buildLabels(cmdLabels, imageLabels map[string]string) map[string]string { labels := make(map[string]string) for k, v := range imageLabels { diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go index 2d8a9a68d37d..3701c90ede14 100644 --- a/contrib/apparmor/template.go +++ b/contrib/apparmor/template.go @@ -165,7 +165,7 @@ func load(path string) error { return nil } -// macrosExists checks if the passed macro exists. +// macroExists checks if the passed macro exists. func macroExists(m string) bool { _, err := os.Stat(path.Join(dir, m)) return err == nil diff --git a/contrib/fuzz/content_fuzz_test.go b/contrib/fuzz/content_fuzz_test.go index 3e18b8c6b66b..53e73e3ad9c0 100644 --- a/contrib/fuzz/content_fuzz_test.go +++ b/contrib/fuzz/content_fuzz_test.go @@ -68,7 +68,7 @@ func generateBlobs(f *fuzz.ConsumeFuzzer) (map[digest.Digest][]byte, error) { return blobs, nil } -// checkwrite is a wrapper around content.WriteBlob() +// checkWrite is a wrapper around content.WriteBlob() func checkWrite(ctx context.Context, cs content.Store, dgst digest.Digest, p []byte) (digest.Digest, error) { if err := content.WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p), ocispec.Descriptor{Size: int64(len(p)), Digest: dgst}); err != nil { diff --git a/internal/cri/util/util.go b/internal/cri/util/util.go index e5ca181b144b..08c413dd4237 100644 --- a/internal/cri/util/util.go +++ b/internal/cri/util/util.go @@ -67,7 +67,7 @@ func GetPassthroughAnnotations(podAnnotations map[string]string, return passthroughAnnotations } -// BuildLabel builds the labels from config to be passed to containerd +// BuildLabels builds the labels from config to be passed to containerd func BuildLabels(configLabels, imageConfigLabels map[string]string, containerType string) map[string]string { labels := make(map[string]string) From e20f7f4a2425c005d85855abfd4556d7b4ccbf87 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 13 Jan 2025 21:55:52 -0800 Subject: [PATCH 22/46] Move CDI device spec out of the OCI package The CDI device injection spec opt was mistakenly added to the OCI package which brought in an unintended dependency on CDI and its transitive dependencies. Signed-off-by: Derek McGowan --- cmd/ctr/commands/run/run_unix.go | 8 +++-- internal/cri/opts/spec_linux.go | 6 ++-- pkg/cdi/oci_opt.go | 56 ++++++++++++++++++++++++++++++++ pkg/oci/spec_opts.go | 42 +++++++----------------- 4 files changed, 77 insertions(+), 35 deletions(-) create mode 100644 pkg/cdi/oci_opt.go diff --git a/cmd/ctr/commands/run/run_unix.go b/cmd/ctr/commands/run/run_unix.go index 5ab526d51eae..50d31a4aa37a 100644 --- a/cmd/ctr/commands/run/run_unix.go +++ b/cmd/ctr/commands/run/run_unix.go @@ -27,6 +27,9 @@ import ( "strconv" "strings" + "github.com/containerd/log" + "github.com/containerd/platforms" + containerd "github.com/containerd/containerd/v2/client" "github.com/containerd/containerd/v2/cmd/ctr/commands" "github.com/containerd/containerd/v2/contrib/apparmor" @@ -35,9 +38,8 @@ import ( "github.com/containerd/containerd/v2/core/containers" "github.com/containerd/containerd/v2/core/diff" "github.com/containerd/containerd/v2/core/snapshots" + cdispec "github.com/containerd/containerd/v2/pkg/cdi" "github.com/containerd/containerd/v2/pkg/oci" - "github.com/containerd/log" - "github.com/containerd/platforms" "github.com/intel/goresctrl/pkg/blockio" "github.com/opencontainers/runtime-spec/specs-go" @@ -359,7 +361,7 @@ func NewContainer(ctx context.Context, client *containerd.Client, cliContext *cl if len(cdiDeviceIDs) > 0 { opts = append(opts, withStaticCDIRegistry()) } - opts = append(opts, oci.WithCDIDevices(cdiDeviceIDs...)) + opts = append(opts, cdispec.WithCDIDevices(cdiDeviceIDs...)) rootfsPropagation := cliContext.String("rootfs-propagation") if rootfsPropagation != "" { diff --git a/internal/cri/opts/spec_linux.go b/internal/cri/opts/spec_linux.go index d78c99c309f8..712c080cffd3 100644 --- a/internal/cri/opts/spec_linux.go +++ b/internal/cri/opts/spec_linux.go @@ -31,9 +31,11 @@ import ( runtime "k8s.io/cri-api/pkg/apis/runtime/v1" "tags.cncf.io/container-device-interface/pkg/cdi" + "github.com/containerd/log" + "github.com/containerd/containerd/v2/core/containers" + cdispec "github.com/containerd/containerd/v2/pkg/cdi" "github.com/containerd/containerd/v2/pkg/oci" - "github.com/containerd/log" ) // Linux dependent OCI spec opts. @@ -172,6 +174,6 @@ func WithCDI(annotations map[string]string, CDIDevices []*runtime.CDIDevice) oci log.G(ctx).Debug("Passing CDI devices as annotations will be deprecated soon, please use CRI CDIDevices instead") } - return oci.WithCDIDevices(devices...)(ctx, client, c, s) + return cdispec.WithCDIDevices(devices...)(ctx, client, c, s) } } diff --git a/pkg/cdi/oci_opt.go b/pkg/cdi/oci_opt.go new file mode 100644 index 000000000000..0fd6f28e9346 --- /dev/null +++ b/pkg/cdi/oci_opt.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "context" + "fmt" + + "github.com/containerd/log" + "tags.cncf.io/container-device-interface/pkg/cdi" + + "github.com/containerd/containerd/v2/core/containers" + "github.com/containerd/containerd/v2/pkg/oci" +) + +// WithCDIDevices injects the requested CDI devices into the OCI specification. +func WithCDIDevices(devices ...string) oci.SpecOpts { + return func(ctx context.Context, _ oci.Client, c *containers.Container, s *oci.Spec) error { + if len(devices) == 0 { + return nil + } + + if err := cdi.Refresh(); err != nil { + // We don't consider registry refresh failure a fatal error. + // For instance, a dynamically generated invalid CDI Spec file for + // any particular vendor shouldn't prevent injection of devices of + // different vendors. CDI itself knows better and it will fail the + // injection if necessary. + log.G(ctx).Warnf("CDI registry refresh failed: %v", err) + } + + if _, err := cdi.InjectDevices(s, devices...); err != nil { + return fmt.Errorf("CDI device injection failed: %w", err) + } + + // One crucial thing to keep in mind is that CDI device injection + // might add OCI Spec environment variables, hooks, and mounts as + // well. Therefore it is important that none of the corresponding + // OCI Spec fields are reset up in the call stack once we return. + return nil + } +} diff --git a/pkg/oci/spec_opts.go b/pkg/oci/spec_opts.go index 5101c63bb638..7ea08b71fad2 100644 --- a/pkg/oci/spec_opts.go +++ b/pkg/oci/spec_opts.go @@ -28,18 +28,17 @@ import ( "strconv" "strings" - "github.com/containerd/containerd/v2/core/containers" - "github.com/containerd/containerd/v2/core/content" - "github.com/containerd/containerd/v2/core/images" - "github.com/containerd/containerd/v2/core/mount" - "github.com/containerd/containerd/v2/pkg/namespaces" "github.com/containerd/continuity/fs" - "github.com/containerd/log" "github.com/containerd/platforms" "github.com/moby/sys/user" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" - "tags.cncf.io/container-device-interface/pkg/cdi" + + "github.com/containerd/containerd/v2/core/containers" + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/containerd/v2/pkg/namespaces" ) // SpecOpts sets spec specific information to a newly generated OCI spec @@ -1644,30 +1643,13 @@ func WithWindowsNetworkNamespace(ns string) SpecOpts { } } -// WithCDIDevices injects the requested CDI devices into the OCI specification. +// WithCDIDevices should be used from the cdi package. This version is used for +// compatibility to point to the non-deprecated version but will return an error if used. +// This function will be removed in 2.1. +// +// Deprecated: Use [github.com/containerd/containerd/v2/pkg/cdi.WithCDIDevices] instead. func WithCDIDevices(devices ...string) SpecOpts { return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - if len(devices) == 0 { - return nil - } - - if err := cdi.Refresh(); err != nil { - // We don't consider registry refresh failure a fatal error. - // For instance, a dynamically generated invalid CDI Spec file for - // any particular vendor shouldn't prevent injection of devices of - // different vendors. CDI itself knows better and it will fail the - // injection if necessary. - log.G(ctx).Warnf("CDI registry refresh failed: %v", err) - } - - if _, err := cdi.InjectDevices(s, devices...); err != nil { - return fmt.Errorf("CDI device injection failed: %w", err) - } - - // One crucial thing to keep in mind is that CDI device injection - // might add OCI Spec environment variables, hooks, and mounts as - // well. Therefore it is important that none of the corresponding - // OCI Spec fields are reset up in the call stack once we return. - return nil + return errors.New("must use cdi package for CDI device injection") } } From bdc847f1eb535a6728b6db3f2619d2a5ed0edbb9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 13 Jan 2025 22:01:10 -0800 Subject: [PATCH 23/46] Remove deprecated WithCDIDevices in oci spec opts This function has been moved to prevent an unintended dependency on CDI. Signed-off-by: Derek McGowan --- pkg/oci/spec_opts.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/pkg/oci/spec_opts.go b/pkg/oci/spec_opts.go index 7ea08b71fad2..3b85d764ae10 100644 --- a/pkg/oci/spec_opts.go +++ b/pkg/oci/spec_opts.go @@ -1642,14 +1642,3 @@ func WithWindowsNetworkNamespace(ns string) SpecOpts { return nil } } - -// WithCDIDevices should be used from the cdi package. This version is used for -// compatibility to point to the non-deprecated version but will return an error if used. -// This function will be removed in 2.1. -// -// Deprecated: Use [github.com/containerd/containerd/v2/pkg/cdi.WithCDIDevices] instead. -func WithCDIDevices(devices ...string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return errors.New("must use cdi package for CDI device injection") - } -} From 04f9e30db313908c1209b7f7d526d5d3eb8467ed Mon Sep 17 00:00:00 2001 From: zouyee Date: Wed, 15 Jan 2025 09:54:03 +0800 Subject: [PATCH 24/46] log: avoid using unsupported field by logrus Signed-off-by: zouyee --- core/runtime/v2/binary.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/runtime/v2/binary.go b/core/runtime/v2/binary.go index c77fb494bb58..47780301e879 100644 --- a/core/runtime/v2/binary.go +++ b/core/runtime/v2/binary.go @@ -195,7 +195,7 @@ func (b *binary) Delete(ctx context.Context) (*runtime.Exit, error) { cmd.Stdout = out cmd.Stderr = errb if err := cmd.Run(); err != nil { - log.G(ctx).WithField("cmd", cmd).WithError(err).Error("failed to delete") + log.G(ctx).WithField("cmd", cmd.String()).WithError(err).Error("failed to delete") return nil, fmt.Errorf("%s: %w", errb.String(), err) } s := errb.String() From 157faf65c55c5de56f636fe3466f59b43241abb3 Mon Sep 17 00:00:00 2001 From: Akhil Mohan Date: Sat, 18 Jan 2025 14:19:11 +0530 Subject: [PATCH 25/46] update to go1.23.5 / go1.22.11 - go1.23.5 (released 2025-01-16) includes security fixes to the crypto/x509 and net/http packages, as well as bug fixes to the compiler, the runtime, and the net package. See the Go 1.23.5 milestone on our issue tracker for details. - go1.22.11 (released 2025-01-16) includes security fixes to the crypto/x509 and net/http packages, as well as bug fixes to the runtime. See the Go 1.22.11 milestone on our issue tracker for details. Signed-off-by: Akhil Mohan --- .devcontainer/devcontainer.json | 2 +- .github/actions/install-go/action.yml | 2 +- .github/workflows/api-release.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/release.yml | 2 +- Vagrantfile | 2 +- contrib/Dockerfile.test | 2 +- contrib/fuzz/oss_fuzz_build.sh | 4 ++-- script/setup/prepare_env_windows.ps1 | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f445d4aed8b9..daaaaad75d97 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -12,7 +12,7 @@ "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/devcontainers/features/go:1": { - "version": "1.23.4" + "version": "1.23.5" } }, diff --git a/.github/actions/install-go/action.yml b/.github/actions/install-go/action.yml index 525d7f22ef2c..0ee936adbf6e 100644 --- a/.github/actions/install-go/action.yml +++ b/.github/actions/install-go/action.yml @@ -3,7 +3,7 @@ description: "Reusable action to install Go, so there is one place to bump Go ve inputs: go-version: required: true - default: "1.23.4" + default: "1.23.5" description: "Go version to install" runs: diff --git a/.github/workflows/api-release.yml b/.github/workflows/api-release.yml index 39fc2e8bfdd4..156295056a70 100644 --- a/.github/workflows/api-release.yml +++ b/.github/workflows/api-release.yml @@ -6,7 +6,7 @@ on: name: API Release env: - GO_VERSION: "1.23.4" + GO_VERSION: "1.23.5" permissions: # added using https://github.com/step-security/secure-workflows contents: read diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4c0f51e96ea..e1c4ea9af61f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -191,7 +191,7 @@ jobs: strategy: matrix: os: [ubuntu-22.04, ubuntu-24.04, arm64-8core-32gb, macos-13, windows-2019, windows-2022] - go-version: ["1.22.10", "1.23.4"] + go-version: ["1.22.11", "1.23.5"] exclude: - os: ${{ github.repository != 'containerd/containerd' && 'arm64-8core-32gb' }} steps: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 36e189c21b74..229ac6af127a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ on: name: Release env: - GO_VERSION: "1.23.4" + GO_VERSION: "1.23.5" permissions: # added using https://github.com/step-security/secure-workflows contents: read diff --git a/Vagrantfile b/Vagrantfile index bfd6684b9142..74c28b5433e4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -107,7 +107,7 @@ EOF config.vm.provision "install-golang", type: "shell", run: "once" do |sh| sh.upload_path = "/tmp/vagrant-install-golang" sh.env = { - 'GO_VERSION': ENV['GO_VERSION'] || "1.23.4", + 'GO_VERSION': ENV['GO_VERSION'] || "1.23.5", } sh.inline = <<~SHELL #!/usr/bin/env bash diff --git a/contrib/Dockerfile.test b/contrib/Dockerfile.test index d1465bdcee72..0b84875a99ca 100644 --- a/contrib/Dockerfile.test +++ b/contrib/Dockerfile.test @@ -34,7 +34,7 @@ # docker run --privileged --group-add keep-groups -v ./critest_exit_code.txt:/tmp/critest_exit_code.txt containerd-test # ------------------------------------------------------------------------------ -ARG GOLANG_VERSION=1.23.4 +ARG GOLANG_VERSION=1.23.5 ARG GOLANG_IMAGE=golang FROM ${GOLANG_IMAGE}:${GOLANG_VERSION} AS golang diff --git a/contrib/fuzz/oss_fuzz_build.sh b/contrib/fuzz/oss_fuzz_build.sh index fef371b7a8ef..97e6925e3cd4 100755 --- a/contrib/fuzz/oss_fuzz_build.sh +++ b/contrib/fuzz/oss_fuzz_build.sh @@ -43,11 +43,11 @@ go run main.go --target_dir $SRC/containerd/images apt-get update && apt-get install -y wget cd $SRC -wget --quiet https://go.dev/dl/go1.23.4.linux-amd64.tar.gz +wget --quiet https://go.dev/dl/go1.23.5.linux-amd64.tar.gz mkdir temp-go rm -rf /root/.go/* -tar -C temp-go/ -xzf go1.23.4.linux-amd64.tar.gz +tar -C temp-go/ -xzf go1.23.5.linux-amd64.tar.gz mv temp-go/go/* /root/.go/ cd $SRC/containerd diff --git a/script/setup/prepare_env_windows.ps1 b/script/setup/prepare_env_windows.ps1 index a5211a0d3d94..efb955c912bc 100644 --- a/script/setup/prepare_env_windows.ps1 +++ b/script/setup/prepare_env_windows.ps1 @@ -5,7 +5,7 @@ # lived test environment. Set-MpPreference -DisableRealtimeMonitoring:$true -$PACKAGES= @{ mingw = "10.2.0"; git = ""; golang = "1.23.4"; make = ""; nssm = "" } +$PACKAGES= @{ mingw = "10.2.0"; git = ""; golang = "1.23.5"; make = ""; nssm = "" } Write-Host "Downloading chocolatey package" curl.exe -L "https://packages.chocolatey.org/chocolatey.0.10.15.nupkg" -o 'c:\choco.zip' From 460e5a2e2bec851ba357dc1b738e3023841d0f2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:33:04 +0000 Subject: [PATCH 26/46] build(deps): bump google.golang.org/protobuf from 1.36.1 to 1.36.3 Bumps google.golang.org/protobuf from 1.36.1 to 1.36.3. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../protobuf/encoding/protojson/decode.go | 2 +- .../protobuf/encoding/prototext/decode.go | 2 +- .../protobuf/internal/flags/flags.go | 5 ++ .../protobuf/internal/impl/codec_map.go | 14 ++--- .../protobuf/internal/impl/codec_map_go111.go | 38 ------------- .../protobuf/internal/impl/codec_map_go112.go | 12 ---- .../protobuf/internal/impl/codec_message.go | 4 +- .../internal/impl/codec_message_opaque.go | 6 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/message.go | 12 ++-- .../protobuf/internal/impl/message_opaque.go | 36 +++++++++--- .../internal/impl/message_reflect_field.go | 56 +++---------------- .../protobuf/internal/impl/pointer_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../reflect/protodesc/desc_validate.go | 2 +- .../protobuf/reflect/protodesc/editions.go | 31 ++++++---- .../protobuf/types/dynamicpb/types.go | 12 ++-- vendor/modules.txt | 2 +- 21 files changed, 95 insertions(+), 153 deletions(-) delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go diff --git a/go.mod b/go.mod index 7beff6597362..695405bd6036 100644 --- a/go.mod +++ b/go.mod @@ -76,7 +76,7 @@ require ( golang.org/x/sys v0.29.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 google.golang.org/grpc v1.69.2 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.3 k8s.io/apimachinery v0.31.3 k8s.io/client-go v0.31.3 k8s.io/component-base v0.31.3 diff --git a/go.sum b/go.sum index 4af444e11ac8..40ee21da7b55 100644 --- a/go.sum +++ b/go.sum @@ -527,8 +527,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 8f9e592f8701..cffdfda96193 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,7 +192,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac4226..d972a3d98ed1 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,7 +185,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd34850..5cb3ee70f91b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -22,3 +22,8 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy + +// ProtoLegacyWeak specifies whether to enable support for weak fields. +// This flag was split out of ProtoLegacy in preparation for removing +// support for weak fields (independent of the other protolegacy features). +const ProtoLegacyWeak = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae9c8..229c69801386 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2f43..000000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66eaf84..000000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 2f7b363ec4a6..111d95833d46 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -118,12 +118,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 88c16ae5b7cb..f81d7d0db9ae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -45,19 +45,19 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf var childMessage *MessageInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) case fd.Message() != nil && !fd.IsMap(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) if fd.IsList() { childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) } else { childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) } default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &coderFieldInfo{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a651d9..e4580b3ac2ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index fa10a0f5cc9f..d1f79b4224f0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -165,28 +165,28 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) + si.weakOffset = offsetOf(f) si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } case "lazyFields", "XXX_lazyUnmarshalInfo": - si.lazyOffset = offsetOf(f, mi.Exporter) + si.lazyOffset = offsetOf(f) case "XXX_presence": - si.presenceOffset = offsetOf(f, mi.Exporter) + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d407dd791e89..d8dcd7886364 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -88,9 +88,7 @@ func opaqueInitHook(mi *MessageInfo) bool { mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < mi.Desc.Oneofs().Len(); i++ { od := mi.Desc.Oneofs().Get(i) - if !od.IsSynthetic() { - mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter) - } + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) } mi.denseFields = make([]*fieldInfo, fds.Len()*2) @@ -119,12 +117,32 @@ func opaqueInitHook(mi *MessageInfo) bool { return true } +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) } - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) conv := NewConverter(ft, fd) return fieldInfo{ fieldDesc: fd, @@ -178,7 +196,7 @@ func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd prot panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(reflect.PtrTo(ft), fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) return fieldInfo{ fieldDesc: fd, @@ -228,7 +246,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd pro panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() return fieldInfo{ @@ -321,7 +339,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -393,7 +411,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref deref = true } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) var getter func(p pointer) protoreflect.Value if !nullable { @@ -462,7 +480,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() elemType := fs.Type.Elem() diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index a740646205c0..3cd1fbc21fb4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -76,7 +76,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +152,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +205,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -269,7 +269,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) // Generate specialized getter functions to avoid going through reflect.Value if nullable { @@ -333,7 +333,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { + if !flags.ProtoLegacyWeak { panic("no support for proto1 weak fields") } @@ -410,7 +410,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -419,7 +419,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -466,7 +466,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -479,7 +479,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -497,41 +497,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 041ebde2de6d..6bed45e35c25 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -22,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 3018450df799..f5c06280fe32 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 1 + Patch = 3 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index a3b5e142d241..e28d7acb3780 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -172,7 +172,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { + } else if flags.ProtoLegacyWeak { if fd.IsWeak() && fd.Message().IsPlaceholder() { err = errUnknown // weak referent is not linked in } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2ebdb4..5eaf652176c0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,7 +149,7 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { + if f.IsWeak() && !flags.ProtoLegacyWeak { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index bf0a0ccdeedb..f55b03695966 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -125,16 +125,27 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } - if sep := goFeatures.StripEnumPrefix; sep != nil { - parentFS.StripEnumPrefix = int(*sep) - } - if al := goFeatures.ApiLevel; al != nil { - parentFS.APILevel = int(*al) - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + gf := goFeatures.Message() + fields := gf.Descriptor().Fields() + + if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByName("api_level"); gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index c432817bb9c7..8e759fc9f725 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -28,11 +28,7 @@ type extField struct { type Types struct { // atomicExtFiles is used with sync/atomic and hence must be the first word // of the struct to guarantee 64-bit alignment. - // - // TODO(stapelberg): once we only support Go 1.19 and newer, switch this - // field to be of type atomic.Uint64 to guarantee alignment on - // stack-allocated values, too. - atomicExtFiles uint64 + atomicExtFiles atomic.Uint64 extMu sync.Mutex files *protoregistry.Files @@ -90,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. - if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) { t.updateExtensions() } xd := t.extensionsByMessage[extField{message, field}] @@ -133,10 +129,10 @@ func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { func (t *Types) updateExtensions() { t.extMu.Lock() defer t.extMu.Unlock() - if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) { return } - defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + defer t.atomicExtFiles.Store(uint64(t.files.NumFiles())) t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { t.registerExtensions(fd.Extensions()) t.registerExtensionsInMessages(fd.Messages()) diff --git a/vendor/modules.txt b/vendor/modules.txt index 60a9f71a1fc3..789a3a7b5b54 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -708,7 +708,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.1 +# google.golang.org/protobuf v1.36.3 ## explicit; go 1.21 google.golang.org/protobuf/compiler/protogen google.golang.org/protobuf/encoding/protodelim From 19c546c9760b11c266a314bf25177b96d7a21f24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:33:14 +0000 Subject: [PATCH 27/46] build(deps): bump github.com/tchap/go-patricia/v2 from 2.3.1 to 2.3.2 Bumps [github.com/tchap/go-patricia/v2](https://github.com/tchap/go-patricia) from 2.3.1 to 2.3.2. - [Commits](https://github.com/tchap/go-patricia/compare/v2.3.1...v2.3.2) --- updated-dependencies: - dependency-name: github.com/tchap/go-patricia/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 7beff6597362..548d30ab25b5 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/prometheus/client_golang v1.20.5 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 - github.com/tchap/go-patricia/v2 v2.3.1 + github.com/tchap/go-patricia/v2 v2.3.2 github.com/urfave/cli/v2 v2.27.5 github.com/vishvananda/netlink v1.3.0 go.etcd.io/bbolt v1.3.11 diff --git a/go.sum b/go.sum index 4af444e11ac8..9074a805d8d3 100644 --- a/go.sum +++ b/go.sum @@ -324,8 +324,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= diff --git a/vendor/modules.txt b/vendor/modules.txt index 60a9f71a1fc3..2f718875e969 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -475,7 +475,7 @@ github.com/stretchr/testify/require # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability -# github.com/tchap/go-patricia/v2 v2.3.1 +# github.com/tchap/go-patricia/v2 v2.3.2 ## explicit; go 1.16 github.com/tchap/go-patricia/v2/patricia # github.com/urfave/cli/v2 v2.27.5 From 36d3888cf7eb7c9f533167cf93748ece98eb79cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:34:10 +0000 Subject: [PATCH 28/46] build(deps): bump actions/upload-artifact from 4.4.3 to 4.6.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.3 to 4.6.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882...65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/api-release.yml | 2 +- .github/workflows/ci.yml | 6 +++--- .github/workflows/fuzz.yml | 2 +- .github/workflows/nightly.yml | 12 ++++++------ .github/workflows/release.yml | 4 ++-- .github/workflows/scorecards.yml | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/api-release.yml b/.github/workflows/api-release.yml index 39fc2e8bfdd4..23fdae925500 100644 --- a/.github/workflows/api-release.yml +++ b/.github/workflows/api-release.yml @@ -50,7 +50,7 @@ jobs: working-directory: src/github.com/containerd/containerd - name: Save release notes - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: containerd-release-notes path: src/github.com/containerd/containerd/release-notes.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4c0f51e96ea..8a5109f37411 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -367,7 +367,7 @@ jobs: } critest.exe --runtime-endpoint=npipe://.//pipe//containerd-containerd --test-images-file='${{env.CRI_TEST_IMAGES}}' --report-dir='${{github.workspace}}/critestreport' $skip - - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: always() with: name: TestResults ${{ matrix.os }} ${{ matrix.cgroup_driver }} @@ -514,7 +514,7 @@ jobs: sudo lsmod sudo dmesg -T -f kern - - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: always() with: name: TestResults ${{ matrix.runtime }} ${{matrix.runc}} ${{ matrix.os }} ${{ matrix.cgroup_driver }} @@ -686,7 +686,7 @@ jobs: if: always() - run: script/test/test2annotation.sh *-gotest.json if: always() - - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: always() with: name: TestResults MacOS diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 5e7152c95e7a..78f21f9a0453 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -26,7 +26,7 @@ jobs: language: go continue-on-error: true - name: Upload Crash - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index e19fbef5fdc7..379dc09670da 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -99,31 +99,31 @@ jobs: # - name: Upload artifacts (linux_amd64) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: linux_amd64 path: src/github.com/containerd/containerd/bin_amd64 - name: Upload artifacts (linux_arm64) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: linux_arm64 path: src/github.com/containerd/containerd/bin_arm64 - name: Upload artifacts (linux_s390x) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: linux_s390x path: src/github.com/containerd/containerd/bin_s390x - name: Upload artifacts (linux_ppc64le) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: linux_ppc64le path: src/github.com/containerd/containerd/bin_ppc64le - name: Upload artifacts (linux_riscv64) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: linux_riscv64 path: src/github.com/containerd/containerd/bin_riscv64 @@ -158,7 +158,7 @@ jobs: make binaries - name: Upload artifacts (windows_amd64) - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: windows_amd64 path: src/github.com/containerd/containerd/bin/ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 36e189c21b74..0462c69d6117 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -57,7 +57,7 @@ jobs: working-directory: src/github.com/containerd/containerd - name: Save release notes - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: containerd-release-notes path: src/github.com/containerd/containerd/release-notes.md @@ -127,7 +127,7 @@ jobs: env: PLATFORM: ${{ matrix.dockerfile-platform }} - name: Save Artifacts - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: release-tars-${{env.PLATFORM_CLEAN}} path: src/github.com/containerd/containerd/releases/*.tar.gz* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 391b5c2cecd8..fa8337f03681 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -41,7 +41,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # tag=v4.6.0 with: name: SARIF file path: results.sarif From f572a6db9037e4a36225a4146a4344aaf34d692c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:34:11 +0000 Subject: [PATCH 29/46] build(deps): bump lycheeverse/lychee-action from 2.1.0 to 2.2.0 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/f81112d0d2814ded911bd23e3beaa9dda9093915...f796c8b7d468feb9b8c0a46da3fac0af6874d374) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/links.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index a96f712769d9..e6467e0ea5dd 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # v2.1.0 + - uses: lycheeverse/lychee-action@f796c8b7d468feb9b8c0a46da3fac0af6874d374 # v2.2.0 with: # Fail action on broken links fail: true From 22e77720b3e6aecbb299ad70c68e2ade6dfd0108 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:34:19 +0000 Subject: [PATCH 30/46] build(deps): bump github/codeql-action from 3.27.9 to 3.28.1 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.9 to 3.28.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/df409f7d9260372bd5f19e5b04e83cb3c43714ae...b6a472f63d85b9c78a3ac5e89422239fc15e9b3c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/scorecards.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c1959d2d0414..56ac64867b1d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -36,7 +36,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 + uses: github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java @@ -46,4 +46,4 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 + uses: github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 391b5c2cecd8..b622f19c1f7c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -49,6 +49,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@df409f7d9260372bd5f19e5b04e83cb3c43714ae # tag=v3.27.9 + uses: github/codeql-action/upload-sarif@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # tag=v3.28.1 with: sarif_file: results.sarif From 4b77d4e41ef99e6526f3e20dae36bc301f648477 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:34:19 +0000 Subject: [PATCH 31/46] build(deps): bump softprops/action-gh-release from 2.2.0 to 2.2.1 Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.2.0 to 2.2.1. - [Release notes](https://github.com/softprops/action-gh-release/releases) - [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md) - [Commits](https://github.com/softprops/action-gh-release/compare/7b4da11513bf3f43f9999e90eabced41ab8bb048...c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda) --- updated-dependencies: - dependency-name: softprops/action-gh-release dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/api-release.yml | 2 +- .github/workflows/release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/api-release.yml b/.github/workflows/api-release.yml index 39fc2e8bfdd4..74d72af75bcb 100644 --- a/.github/workflows/api-release.yml +++ b/.github/workflows/api-release.yml @@ -69,7 +69,7 @@ jobs: with: path: builds - name: Create Release - uses: softprops/action-gh-release@7b4da11513bf3f43f9999e90eabced41ab8bb048 # v2.2.0 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1 with: token: ${{ secrets.GITHUB_TOKEN }} fail_on_unmatched_files: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 36e189c21b74..548b537695b4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -155,7 +155,7 @@ jobs: - name: Rename attestation artifact run: mv ${{ steps.attest.outputs.bundle-path }} containerd-${{ needs.check.outputs.stringver }}-attestation.intoto.jsonl - name: Create Release - uses: softprops/action-gh-release@7b4da11513bf3f43f9999e90eabced41ab8bb048 # v2.2.0 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1 with: token: ${{ secrets.GITHUB_TOKEN }} fail_on_unmatched_files: true From 53d6f34822dda24bf7c8674305c93eadb4bad50b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:34:22 +0000 Subject: [PATCH 32/46] build(deps): bump golangci/golangci-lint-action from 6.1.1 to 6.2.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.1 to 6.2.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/971e284b6050e8a5849b72094c50ab08da042db8...ec5d18412c0aeab7936cb16880d708ba2a64e1ae) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4c0f51e96ea..a4b077733427 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/install-go - - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + - uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: version: v1.60.3 skip-cache: true From 69e82f9cd3e29428bd480b1c349268a0723af51d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 07:00:05 +0000 Subject: [PATCH 33/46] build(deps): bump the otel group across 1 directory with 8 updates Bumps the otel group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.56.0` | `0.59.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.56.0` | `0.59.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.34.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.34.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.34.0` | Updates `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc` from 0.56.0 to 0.59.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.56.0...zpages/v0.59.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.56.0 to 0.59.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.56.0...zpages/v0.59.0) Updates `go.opentelemetry.io/otel` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) Updates `go.opentelemetry.io/otel/sdk` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) Updates `go.opentelemetry.io/otel/trace` from 1.31.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.34.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: otel ... Signed-off-by: dependabot[bot] --- go.mod | 37 +- go.sum | 73 +-- .../grpc-gateway/v2/runtime/convert.go | 6 +- .../grpc-gateway/v2/runtime/errors.go | 15 + .../grpc-gateway/v2/runtime/fieldmask.go | 2 +- .../grpc-gateway/v2/runtime/handler.go | 10 +- .../grpc-gateway/v2/runtime/marshaler.go | 8 + .../v2/runtime/marshaler_registry.go | 4 +- .../grpc-gateway/v2/runtime/proto2_convert.go | 4 +- .../grpc-gateway/v2/runtime/query.go | 8 +- .../grpc-gateway/v2/utilities/pattern.go | 2 +- .../v2/utilities/string_array_flag.go | 2 +- .../auto/sdk/CONTRIBUTING.md | 27 ++ vendor/go.opentelemetry.io/auto/sdk/LICENSE | 201 ++++++++ .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 +++ .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 ++++ .../auto/sdk/internal/telemetry/number.go | 67 +++ .../auto/sdk/internal/telemetry/resource.go | 66 +++ .../auto/sdk/internal/telemetry/scope.go | 67 +++ .../auto/sdk/internal/telemetry/span.go | 456 ++++++++++++++++++ .../auto/sdk/internal/telemetry/status.go | 40 ++ .../auto/sdk/internal/telemetry/traces.go | 189 ++++++++ .../auto/sdk/internal/telemetry/value.go | 452 +++++++++++++++++ vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 ++++ vendor/go.opentelemetry.io/auto/sdk/span.go | 432 +++++++++++++++++ vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 +++++ .../auto/sdk/tracer_provider.go | 33 ++ .../google.golang.org/grpc/otelgrpc/config.go | 52 +- .../grpc/otelgrpc/stats_handler.go | 23 +- .../grpc/otelgrpc/version.go | 2 +- .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/handler.go | 50 +- .../net/http/otelhttp/internal/semconv/env.go | 141 ++++-- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 205 +++++++- .../http/otelhttp/internal/semconv/util.go | 17 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 50 +- .../net/http/otelhttp/start_time_context.go | 29 ++ .../net/http/otelhttp/transport.go | 2 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.golangci.yml | 20 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 92 +++- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 18 +- vendor/go.opentelemetry.io/otel/Makefile | 16 +- vendor/go.opentelemetry.io/otel/README.md | 2 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/baggage/baggage.go | 4 +- .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../tracetransform/instrumentation.go | 5 +- .../otlp/otlptrace/otlptracegrpc/client.go | 9 +- .../internal/otlpconfig/options.go | 6 +- .../otlp/otlptrace/otlptracehttp/client.go | 60 ++- .../internal/otlpconfig/options.go | 6 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 69 ++- .../otel/internal/global/trace.go | 33 +- vendor/go.opentelemetry.io/otel/renovate.json | 6 - .../otel/sdk/instrumentation/scope.go | 4 + .../otel/sdk/resource/auto.go | 62 +-- .../otel/sdk/resource/builtin.go | 6 +- .../otel/sdk/trace/batch_span_processor.go | 1 + .../otel/sdk/trace/provider.go | 9 +- .../otel/sdk/trace/sampler_env.go | 5 +- .../otel/sdk/trace/span.go | 106 ++-- .../otel/sdk/trace/tracetest/recorder.go | 13 + .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../go.opentelemetry.io/otel/trace/config.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 15 +- .../proto/otlp/trace/v1/trace.pb.go | 6 +- vendor/golang.org/x/net/http2/config.go | 2 +- vendor/golang.org/x/net/http2/config_go124.go | 2 +- vendor/golang.org/x/net/http2/transport.go | 13 +- vendor/golang.org/x/oauth2/README.md | 15 +- .../googleapis/api/httpbody/httpbody.pb.go | 6 +- .../rpc/errdetails/error_details.pb.go | 128 +++-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 66 +-- 85 files changed, 3575 insertions(+), 527 deletions(-) create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go diff --git a/go.mod b/go.mod index 695405bd6036..a5d08c6d3dab 100644 --- a/go.mod +++ b/go.mod @@ -63,19 +63,19 @@ require ( github.com/urfave/cli/v2 v2.27.5 github.com/vishvananda/netlink v1.3.0 go.etcd.io/bbolt v1.3.11 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 + go.opentelemetry.io/otel v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 + go.opentelemetry.io/otel/trace v1.34.0 golang.org/x/mod v0.22.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 - google.golang.org/grpc v1.69.2 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f + google.golang.org/grpc v1.69.4 google.golang.org/protobuf v1.36.3 k8s.io/apimachinery v0.31.3 k8s.io/client-go v0.31.3 @@ -107,7 +107,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect @@ -132,16 +132,17 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.31.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 40ee21da7b55..bd133cdbb6f7 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpS github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -351,28 +351,30 @@ go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -384,8 +386,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= @@ -419,11 +421,11 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -470,8 +472,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -507,17 +510,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3f8..2e50082ad116 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 01f573419180..41cd4f5030e4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf46..2fcd7af3c40e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 9f50a569e9ba..0fa907656612 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,7 +64,13 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte @@ -194,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff4935..b1dfc37af9b9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e894a..07c28112c899 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20fb..f710036b350c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b857..0a1ca7e06fe9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864ab..38ca39cc5380 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c0c..66aa5f2dcc57 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 000000000000..773c9b6431f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 000000000000..088d19a6ce72 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 000000000000..ad73d8cb9d28 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 000000000000..af6ef171f6a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 000000000000..949e2165c057 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 000000000000..e854d7e84e86 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 000000000000..29e629d6674d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 000000000000..cecad8bae3c9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 000000000000..b6f2e28d408c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 000000000000..a13a6b733da8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 000000000000..1217776ead1e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 000000000000..69a348f0f064 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 000000000000..0dd01b063a34 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 000000000000..86babf1a885e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 000000000000..6ebea12a9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 000000000000..cbcfabde3b1a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 000000000000..dbc477a59ad2 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 18436eaedffd..9e87fb4bb192 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -51,11 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram + rpcDuration metric.Float64Histogram + rpcInBytes metric.Int64Histogram + rpcOutBytes metric.Int64Histogram + rpcInMessages metric.Int64Histogram + rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -96,46 +96,64 @@ func newConfig(opts []Option, role string) *config { } } - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} + if rpcRequestSize == nil { + rpcRequestSize = noop.Int64Histogram{} } } - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} + if rpcResponseSize == nil { + rpcResponseSize = noop.Int64Histogram{} } } - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} + if rpcRequestsPerRPC == nil { + rpcRequestsPerRPC = noop.Int64Histogram{} } } - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} + if rpcResponsesPerRPC == nil { + rpcResponsesPerRPC = noop.Int64Histogram{} } } + switch role { + case "client": + c.rpcInBytes = rpcResponseSize + c.rpcInMessages = rpcResponsesPerRPC + c.rpcOutBytes = rpcRequestSize + c.rpcOutMessages = rpcRequestsPerRPC + case "server": + c.rpcInBytes = rpcRequestSize + c.rpcInMessages = rpcRequestsPerRPC + c.rpcOutBytes = rpcResponseSize + c.rpcOutMessages = rpcResponsesPerRPC + default: + c.rpcInBytes = noop.Int64Histogram{} + c.rpcInMessages = noop.Int64Histogram{} + c.rpcOutBytes = noop.Int64Histogram{} + c.rpcOutMessages = noop.Int64Histogram{} + } + return c } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fbcbfb84e047..c01cb897cd30 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,21 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) type gRPCContextKey struct{} type gRPCContext struct { - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -150,8 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.inMessages, 1) + c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,8 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.outMessages, 1) + c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -213,8 +214,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) + c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 439251e13476..80e5f2f6fc9a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.56.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd208..b25641c55d34 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e4236ab398c3..3ea05d01995b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -117,6 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -184,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -199,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index fb893b25042e..eaf4c3796744 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,12 +12,17 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -30,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -50,9 +63,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +73,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -102,29 +115,56 @@ type MetricData struct { ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, } +) - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } - // TODO: Duplicate Metrics + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } return server } @@ -135,32 +175,41 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -175,7 +224,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} @@ -194,34 +243,48 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { - attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 000000000000..32630864bf23 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={}" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={}" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={}" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={}" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={}" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={}" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc40..8c3c6275133a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,14 +10,17 @@ import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +38,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +65,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +110,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +141,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +156,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +166,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +201,94 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +308,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +370,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +397,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +414,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -343,6 +429,91 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f579..558efd0594bc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,14 +17,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { @@ -96,3 +99,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 5367732ec5dd..57d1507b620d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -17,7 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +37,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +69,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +86,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +115,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +146,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,24 +166,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } -func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.status_code int @@ -197,7 +199,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit var requestHost string var requestPort int for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -214,7 +216,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -235,7 +237,7 @@ const ( clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds ) -func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -262,13 +264,3 @@ func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } - -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 000000000000..9476ef01b015 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b098..44b86ad86095 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index a07d8689d473..386f09e1b7aa 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.56.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664beb5..ae8577ef366a 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f7e..ce3f40b609c1 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,6 +22,7 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv @@ -30,6 +31,7 @@ linters: - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -127,8 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -156,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269c6..599d59cd130d 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3197,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb3396557432..22a2e9dbd495 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a212408..a7f6d8cc6882 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -235,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -260,7 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index efec278905bb..d9a19207625a 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b61258ab..4ebef4f9ddff 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -130,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362bbe..b8cb605c1669 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030c9..0e1fe2422031 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac354665..49a35b12255d 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc900..2e7690e43a24 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927def..8409b5f8f95c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. @@ -289,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8d4b4bf08988..0a317d92637d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index bb2f3ffd1d8a..16c006b2cfdd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -166,8 +166,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc }() } - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer @@ -194,34 +193,33 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc } } return nil - - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failures. - rErr := newResponseError(resp.Header, nil) - - // server may return a message with the response - // body, so we read it to include in the error - // message to be returned. It will help in - // debugging the actual issue. - var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { - _ = resp.Body.Close() - return err - } - - // overwrite the error message with the response body - // if it is not empty - if respStr := strings.TrimSpace(respData.String()); respStr != "" { - // Include response for context. - e := errors.New(respStr) - rErr = newResponseError(resp.Header, e) - } - return rErr + } + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) default: - return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) + // Non-retryable failure. + return fmt.Errorf("failed to send to %s: %s (%w)", request.URL, resp.Status, bodyErr) } }) } @@ -278,7 +276,7 @@ func (d *client) MarshalLog() interface{} { Endpoint string Insecure bool }{ - Type: "otlphttphttp", + Type: "otlptracehttp", Endpoint: d.cfg.Endpoint, Insecure: d.cfg.Insecure, } @@ -328,7 +326,7 @@ func newResponseError(header http.Header, wrapped error) error { func (e retryableError) Error() string { if e.err != nil { - return fmt.Sprintf("retry-able request failure: %s", e.err.Error()) + return "retry-able request failure: " + e.err.Error() } return "retry-able request failure" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 4cfd6c27f586..6a9c4d3a6521 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 709c8f70a68f..f156ee66720c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.31.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d84794741..691d96c7554c 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a47..ae92a4251666 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09f5..a6acd8dca66e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48f9..8982aa0dc56e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // @@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13d80..4f80c898a1dd 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -14,12 +14,6 @@ "matchDepTypes": ["indirect"], "enabled": true }, - { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false - }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045bb4..34852a47b219 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d49c..c02aeefdde53 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b45..cf3c88e15cd6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 4ce757dfd6b1..ccc97e1b6625 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebda0..185aa7c08f7c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f72466be..9b672a1d70d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 730fb85c3ef6..8f4fc3850823 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. @@ -639,10 +684,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index 7aababbbf2fd..732669a17ade 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -69,6 +69,19 @@ func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { return dst } +// Reset clears the recorded spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Reset() { + sr.startedMu.Lock() + sr.endedMu.Lock() + defer sr.startedMu.Unlock() + defer sr.endedMu.Unlock() + + sr.started = nil + sr.ended = nil +} + // Ended returns a copy of all ended spans that have been recorded. // // This method is safe to be called concurrently. diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index dc1eaa8e9d06..6b403851073d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.31.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e00146..9c0b720a4d63 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40ec..eb22002d8243 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb78a..ce4fe59b0e4b 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.56.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.10.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35bc41..b342a0a94012 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8dc49..ca645d9a1aff 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123c81a..5b516c55fffd 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 090d0e1bdb5d..b2e2ed337395 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -375,6 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + closedOnIdle bool // true if conn was closed for idleness seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -2434,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c20464..48dbb9d84c89 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3653..f388426b08f7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e5621827921..3cd9a5bb8e62 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 6cbe116f2479..d2bba7f3d9ec 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.69.2" +const Version = "1.69.4" diff --git a/vendor/modules.txt b/vendor/modules.txt index 789a3a7b5b54..46dedf08eac9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -319,8 +319,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus # github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 ## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 -## explicit; go 1.21 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 +## explicit; go 1.22.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -504,18 +504,22 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 -## explicit; go 1.22 +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -529,31 +533,31 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/metric v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/sdk v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env @@ -561,18 +565,18 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.31.0 +# golang.org/x/crypto v0.32.0 ## explicit; go 1.20 golang.org/x/crypto/cast5 golang.org/x/crypto/cryptobyte @@ -590,7 +594,7 @@ golang.org/x/exp/constraints # golang.org/x/mod v0.22.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.33.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/html @@ -604,7 +608,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.23.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -622,7 +626,7 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/mgr -# golang.org/x/term v0.27.0 +# golang.org/x/term v0.28.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.21.0 @@ -634,15 +638,15 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.2 +# google.golang.org/grpc v1.69.4 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes From a6dc9905cbb1833c459362ba72928bd348967158 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 17 Jan 2025 23:05:31 +0100 Subject: [PATCH 34/46] client: add WithExtraDialOpts option the client package provides a WithDialOpts option, however, dial-options passed to override all defaults that are set in containerd. This makes it difficult to expand the defaults with custom options, as this requires copying the defaults, and trying to keep those in sync (e.g. see [moby#48617]). This patch introduces a new `WithExtraDialOpts` option which, unlike `WithDialOpts` are appended to, instead of overriding, previous options. This allows setting custom options, while maintaining containerd's defaults. Also unlike `WithDialOpts`, this option can be used multiple times to allow additional options to be set. [moby#48617]: https://github.com/moby/moby/pull/48617 Signed-off-by: Sebastiaan van Stijn --- client/client.go | 2 ++ client/client_opts.go | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/client/client.go b/client/client.go index c58459b214f3..b8ff31ce9f9d 100644 --- a/client/client.go +++ b/client/client.go @@ -139,6 +139,8 @@ func New(address string, opts ...Opt) (*Client, error) { if len(copts.dialOptions) > 0 { gopts = copts.dialOptions } + gopts = append(gopts, copts.extraDialOpts...) + gopts = append(gopts, grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize), grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize))) diff --git a/client/client_opts.go b/client/client_opts.go index 54b37d4ca3a2..941a67b19e32 100644 --- a/client/client_opts.go +++ b/client/client_opts.go @@ -34,6 +34,7 @@ type clientOpts struct { defaultPlatform platforms.MatchComparer services *services dialOptions []grpc.DialOption + extraDialOpts []grpc.DialOption callOptions []grpc.CallOption timeout time.Duration } @@ -76,6 +77,19 @@ func WithDialOpts(opts []grpc.DialOption) Opt { } } +// WithExtraDialOpts allows additional grpc.DialOptions to be set on the +// connection. Unlike [WithDialOpts], options set here are appended to, +// instead of overriding previous options, which allows setting options +// to extend containerd client's defaults. +// +// This option can be used multiple times to set additional dial options. +func WithExtraDialOpts(opts []grpc.DialOption) Opt { + return func(c *clientOpts) error { + c.extraDialOpts = append(c.extraDialOpts, opts...) + return nil + } +} + // WithCallOpts allows grpc.CallOptions to be set on the connection func WithCallOpts(opts []grpc.CallOption) Opt { return func(c *clientOpts) error { From 886d971f855da042f1c83fc87b2074c858062f3b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 17 Jan 2025 14:54:57 -0800 Subject: [PATCH 35/46] Update LTS definition and support horizon Signed-off-by: Derek McGowan --- RELEASES.md | 59 +++++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 2a8a486db79d..882f6d345833 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -90,43 +90,34 @@ whichever is longer. Additionally, releases may have an extended security suppor period after the end of the active period to accept security backports. This timeframe will be decided by maintainers before the end of the active status. -Long term stable (_LTS_) releases will be supported for at least three years after -their initial _minor_ release. These branches will accept bug reports and -backports until the end of life date. They may also accept a wider range of -patches than non-_LTS_ releases to support the longer term maintainability of the -branch, including library dependency, toolchain (including Go) and other version updates -which are needed to ensure each release is built with fully supported dependencies and -remains usable by containerd clients. _LTS_ releases can also accept feature backports -to support new Kubernetes releases. The default action has to be reject it though, -for long-term stability. This is still negotiable when the feature is a hard dependency -for a new release of Kubernetes. There should be at least a 6-month overlap between -the end of life of an _LTS_ release and the initial release of a new _LTS_ release. -Up to 6 months before the announced end of life of an _LTS_ branch, the branch may -convert to a regular _Active_ release with stricter backport criteria. +Long term stable (_LTS_) releases are owned by at least two maintainers for at least two +years after their initial _minor_ (x.y.0) release. The maintainers of the _LTS_ branch may commit to +a longer period or extend the support period as needed. These branches will accept bug reports and +backports until the end of life date. They may also accept a wider range of patches than non-_LTS_ +releases to support the longer term maintainability of the branch, including library dependency, +toolchain (including Go) and other version updates which are needed to ensure each release is built +with fully supported dependencies. Feature backports are up to the discretion of the maintainers who +own the branch but should be rejected by default. There is no defined limit to the number of _LTS_ +branches and any branch may become an _LTS_ branch after its initial release. There is no guarantee +that a new _LTS_ branch will be designated before existing _LTS_ branches reach their end of life. The current state is available in the following tables: -| Release | Status | Start | End of Life | -| --------- | ------------- | ------------------ | ------------------- | -| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | -| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | -| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | -| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | -| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | -| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | -| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | -| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | -| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.18) | End of Life | May 3, 2021 | February 28, 2023 | -| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.36) | LTS | February 15, 2022 | next LTS + 6 months | -| [1.7](https://github.com/containerd/containerd/releases/tag/v1.7.23) | Active | March 10, 2023 | active(May 5, 2025), extended(EOL of 1.6) | -| [2.0](https://github.com/containerd/containerd/releases/tag/v2.0.0) | Active | November 5, 2024 | max(November 5, 2025 or release of 2.1 + 6 months) | -| [2.1](https://github.com/containerd/containerd/milestone/48) | Next | TBD | TBD | - -> **_NOTE_** containerd v1.7 will end of life at the same time as v1.6 LTS. Due to -> [Minimal Version Selection](https://go.dev/ref/mod#minimal-version-selection) used -> by Go modules, 1.7 must be supported until EOL of all 1.x releases. Once 1.7 is in -> extended support, it will continue to accept security patches in addition to client -> changes relevant for package importers using the 1.6 LTS daemon. +| Release | Status | Start | End of Life | Owners | +| --------- | ------------- | ------------------ | -------------------------------------------------- | ---------------------- | +| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | | +| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | | +| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | | +| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | | +| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | | +| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | | +| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | | +| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | | +| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.18) | End of Life | May 3, 2021 | February 28, 2023 | | +| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.36) | LTS | February 15, 2022 | July 17, 2025 | @containerd/committers | +| [1.7](https://github.com/containerd/containerd/releases/tag/v1.7.23) | LTS | March 10, 2023 | March 10, 2026 | @containerd/committers | +| [2.0](https://github.com/containerd/containerd/releases/tag/v2.0.0) | Active | November 5, 2024 | max(November 5, 2025 or release of 2.1 + 6 months) | @containerd/committers | +| [2.1](https://github.com/containerd/containerd/milestone/48) | Next | TBD | TBD | | ### Kubernetes Support From 5f238fa827a97e729592c1ed896a1192ba53ab09 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 17 Jan 2025 17:01:03 -0800 Subject: [PATCH 36/46] Update to time based releases Adds new criteria and schedule for time based releases. Adds more ownership and roles for the different phases of the release process. Signed-off-by: Derek McGowan --- RELEASES.md | 118 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 44 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 882f6d345833..4c1ff410dab6 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -39,44 +39,39 @@ be done from that branch. For example, once we release `v1.0.0`, a branch `release/1.0` will be created from that tag. All future patch releases will be done against that branch. -### Pre-releases +### Release Cadence -Pre-releases, such as alphas, betas and release candidates will be conducted -from their source branch. For major and minor releases, these releases will be -done from main. For patch releases, these pre-releases should be done within -the corresponding release branch. +Minor releases are provided on a time basis with an initial cadence of 6 months. +The next two containerd releases should have a target release date scheduled based +on the current release cadence. Changes to the release cadence will not impact +releases which are already scheduled. -While pre-releases are done to assist in the stabilization process, no -guarantees are provided. +The maintainers will maintain a roadmap and milestones for each release, however, +features may be pushed to accommodate the release timeline. If your issue or feature +is not present in the roadmap, please open a Github issue or leave a +comment requesting it be added to a milestone. -### Upgrade Path +### Patch Releases -The upgrade path for containerd is such that the 0.0.x patch releases are -always backward compatible with its major and minor version. Minor (0.x.0) -version will always be compatible with the previous minor release. i.e. 1.2.0 -is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0. There is -no compatibility guarantees for upgrades that span multiple, _minor_ releases. -For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1, -then 1.2. - -There are no compatibility guarantees with upgrades to _major_ versions. For -example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or -integrations to change. Each major version will be supported for at least 1 -year with bug fixes and security patches. +Patch releases are made directly from release branches and will be done as needed +by the release branch owners. -### Next Release +### Pre-releases -The activity for the next release will be tracked in the -[milestones](https://github.com/containerd/containerd/milestones). If your -issue or PR is not present in a milestone, please reach out to the maintainers -to create the milestone or add an issue or PR to an existing milestone. +Pre-releases, such as alphas, betas and release candidates will be conducted +from their source branch. For major and minor releases, these releases will be +done from main. For patch releases, it is uncommon to have pre-releases but +they may have an rc based on the discretion of the release branch owners. ### Support Horizon Support horizons will be defined corresponding to a release branch, identified by `.`. Release branches will be in one of several states: -- __*Next*__: The next planned release branch. +- __*Future*__: An upcoming scheduled release. +- __*Alpha*__: The next scheduled release on the main branch under active development. +- __*Beta*__: The next scheduled release on the main branch under testing. Begins 8-10 weeks before a final release. +- __*RC*__: The next scheduled release on the main branch under final testing and stabilization. Begins 2-4 weeks before a final release. For new releases where the source branch is main, the main branch will be in a feature freeze during this phase. - __*Active*__: The release is a stable branch which is currently supported and accepting patches. - __*Extended*__: The release branch is only accepting security patches. - __*LTS*__: The release is a long term stable branch which is currently supported and accepting patches. @@ -101,23 +96,42 @@ own the branch but should be rejected by default. There is no defined limit to t branches and any branch may become an _LTS_ branch after its initial release. There is no guarantee that a new _LTS_ branch will be designated before existing _LTS_ branches reach their end of life. -The current state is available in the following tables: - -| Release | Status | Start | End of Life | Owners | -| --------- | ------------- | ------------------ | -------------------------------------------------- | ---------------------- | -| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | | -| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | | -| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | | -| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | | -| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | | -| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | | -| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | | -| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | | -| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.18) | End of Life | May 3, 2021 | February 28, 2023 | | -| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.36) | LTS | February 15, 2022 | July 17, 2025 | @containerd/committers | -| [1.7](https://github.com/containerd/containerd/releases/tag/v1.7.23) | LTS | March 10, 2023 | March 10, 2026 | @containerd/committers | -| [2.0](https://github.com/containerd/containerd/releases/tag/v2.0.0) | Active | November 5, 2024 | max(November 5, 2025 or release of 2.1 + 6 months) | @containerd/committers | -| [2.1](https://github.com/containerd/containerd/milestone/48) | Next | TBD | TBD | | +### Release Owners + +Every release shall be assigned owners when entering into the beta stage of the release. The initial +release owners will be responsible for creating the releases and ensuring the release is on time. +Once the release is in rc, the release owners should be part of any discussion around merging +impactful or risky changes. Every release should have at least two owners who are all active +maintainers and one of which has been a release owner in at least two prior releases. + +Once the final release is out and the release branch moves to active, ownership will be +transferred back over to all committers. Active releases are maintained by all committers +until the release reaches end of life or the branch transitions to _LTS_. + +Every _LTS_ release requires at least two maintainers to volunteer as owners. The owners of the +_LTS_ release may step down or be replaced by another maintainer at any time if they can no longer +support the release. If no maintainers volunteer to own the _LTS_ release after maintainers step +down, the branch will end of life after 6 months of extended support with ownership transferred back +to all committers. + +### Current State of containerd Releases + +| Release | Status | Start | End of Life | Owners | +| -------------------------------------------------------------------- | ------------- | ------------------------------ | ------------------------------ | ---------------------- | +| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | | +| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | | +| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | | +| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | | +| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | | +| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | | +| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | | +| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | | +| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.18) | End of Life | May 3, 2021 | February 28, 2023 | | +| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | LTS | February 15, 2022 | July 23, 2025 | @containerd/committers | +| [1.7](https://github.com/containerd/containerd/releases/tag/v1.7.0) | LTS | March 10, 2023 | March 10, 2026 | @containerd/committers | +| [2.0](https://github.com/containerd/containerd/releases/tag/v2.0.0) | Active | November 5, 2024 | November 7, 2025 (_tentative_) | @containerd/committers | +| [2.1](https://github.com/containerd/containerd/milestone/48) | Alpha | May 7, 2025 (_tentative_) | _TBD_ | _TBD_ | +| [2.2](https://github.com/containerd/containerd/milestone/49) | _Future_ | November 5, 2025 (_tentative_) | _TBD_ | _TBD_ | ### Kubernetes Support @@ -223,6 +237,21 @@ completed, open a PR using the process above. Only when the bug is not seen in main and must be made for the specific release branch should you open a PR with new code. +### Upgrade Path + +The upgrade path for containerd is such that the 0.0.x patch releases are +always backward compatible with its major and minor version. Minor (0.x.0) +version will always be compatible with the previous minor release. i.e. 1.2.0 +is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0. There is +no compatibility guarantees for upgrades that span multiple, _minor_ releases. +For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1, +then 1.2. + +There are no compatibility guarantees with upgrades to _major_ versions. For +example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or +integrations to change. Each major version will be supported for at least 1 +year with bug fixes and security patches. + ## Public API Stability The following table provides an overview of the components covered by @@ -296,7 +325,8 @@ releases for prior API versions should be avoided if possible. | v1.6 | 1.6 | | v1.7 | 1.7 | | v2.0 | 1.8 | -| next | 1.9 | +| _v2.1_ | _1.9_ | +| _v2.2_ | _1.10_ | ### Metrics API From 6d1f6e75d65283dc6440556cfaf694c20059d77d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 21 Jan 2025 22:30:33 -0800 Subject: [PATCH 37/46] Update upgrade section Signed-off-by: Derek McGowan --- RELEASES.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 4c1ff410dab6..2112876293e3 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -247,10 +247,13 @@ no compatibility guarantees for upgrades that span multiple, _minor_ releases. For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1, then 1.2. -There are no compatibility guarantees with upgrades to _major_ versions. For -example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or -integrations to change. Each major version will be supported for at least 1 -year with bug fixes and security patches. +There are no compatibility guarantees with upgrades to _major_ versions. For 2.0, migration was +added to ensure upgrading from 1.6 or 1.7 to 2.0 is easy. The latest releases of 1.6 and 1.7 provide +deprecation warnings if any configuration is used which is incompatible with 2.0. If deprecation +warnings are showing up, the configuration can be safely migrated in 1.6 or 1.7 before upgrading to +2.0. Once no deprecation warnings are showing up, the upgrade to 2.0 should be smooth. Always +check the release notes, breaking changes are listed there, and test your configuration before +upgrading. ## Public API Stability From 48d09104dcc4244672c590e9b6ab3ab71d8c9ce4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 23:56:04 +0000 Subject: [PATCH 38/46] build(deps): bump github/codeql-action from 3.28.1 to 3.28.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.1 to 3.28.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b6a472f63d85b9c78a3ac5e89422239fc15e9b3c...17a820bf2e43b47be2c72b39cc905417bc1ab6d0) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/scorecards.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 56ac64867b1d..a5307e3716ab 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -36,7 +36,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 + uses: github/codeql-action/init@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # v3.28.6 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java @@ -46,4 +46,4 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # v3.28.1 + uses: github/codeql-action/analyze@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # v3.28.6 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f0a4bb843f69..f5c2263d8ec7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -49,6 +49,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b6a472f63d85b9c78a3ac5e89422239fc15e9b3c # tag=v3.28.1 + uses: github/codeql-action/upload-sarif@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # tag=v3.28.6 with: sarif_file: results.sarif From 5752397896d44d5807837c8a71e2c0f1769ba66a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 23:56:10 +0000 Subject: [PATCH 39/46] build(deps): bump actions/stale from 9.0.0 to 9.1.0 Bumps [actions/stale](https://github.com/actions/stale) from 9.0.0 to 9.1.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/28ca1036281a5e5922ead5184a1bbf96e5fc984e...5bef64f19d7facfb25b37b414482c7164d639639) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 16c57b5fb9f0..c0e0bf5f1038 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -17,7 +17,7 @@ jobs: pull-requests: write steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 # All stale bot options: https://github.com/actions/stale#all-options with: # Idle number of days before marking issues/PRs stale From 0c986c332f072ce2273c06d2707976b321830423 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 23:56:11 +0000 Subject: [PATCH 40/46] build(deps): bump actions/attest-build-provenance from 2.1.0 to 2.2.0 Bumps [actions/attest-build-provenance](https://github.com/actions/attest-build-provenance) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/actions/attest-build-provenance/releases) - [Changelog](https://github.com/actions/attest-build-provenance/blob/main/RELEASE.md) - [Commits](https://github.com/actions/attest-build-provenance/compare/7668571508540a607bdfd90a87a560489fe372eb...520d128f165991a6c774bcb264f323e3d70747f4) --- updated-dependencies: - dependency-name: actions/attest-build-provenance dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5cf25a7b4bd9..2ccd464c4c65 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -149,7 +149,7 @@ jobs: path: builds - name: Attest Artifacts id: attest - uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0 + uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 with: subject-path: ./builds/release-tars-**/*.tar.gz - name: Rename attestation artifact From 168c49e4dcf1fcfebcf5d751f5aa20747b2a2032 Mon Sep 17 00:00:00 2001 From: Jin Dong Date: Tue, 28 Jan 2025 21:43:31 +0000 Subject: [PATCH 41/46] Fix state/root bug in shim sandbox controller Signed-off-by: Jin Dong --- plugins/sandbox/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/sandbox/controller.go b/plugins/sandbox/controller.go index aec9cc3466ee..0bd1b503670a 100644 --- a/plugins/sandbox/controller.go +++ b/plugins/sandbox/controller.go @@ -73,7 +73,7 @@ func init() { } } - if err := shims.LoadExistingShims(ic.Context, root, state); err != nil { + if err := shims.LoadExistingShims(ic.Context, state, root); err != nil { return nil, fmt.Errorf("failed to load existing shim sandboxes, %v", err) } From 2eb0aa6b988a508400d6567602e7f3af838ca3c4 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Mon, 3 Feb 2025 14:22:03 +0000 Subject: [PATCH 42/46] nri: make OCI spec available on StopPodSandbox The NRI plugins define hooks on the Pod and the Container lifecycle and provide contextual information for each of them in the corresponding hooks. The StopPodSandbox hook already has the namespaces created and the plugins using that hook may require that information, so we should be able to do a best effort to pass it down to the NRI plugins. Signed-off-by: Antonio Ojea --- go.mod | 2 +- integration/nri_linux_test.go | 257 ++++++++++++++++++++++++++++++ integration/nri_test.go | 30 +++- internal/cri/nri/nri_api_linux.go | 5 + 4 files changed, 290 insertions(+), 4 deletions(-) create mode 100644 integration/nri_linux_test.go diff --git a/go.mod b/go.mod index eb4b2336f09a..f90145e3f9c2 100644 --- a/go.mod +++ b/go.mod @@ -62,6 +62,7 @@ require ( github.com/tchap/go-patricia/v2 v2.3.2 github.com/urfave/cli/v2 v2.27.5 github.com/vishvananda/netlink v1.3.0 + github.com/vishvananda/netns v0.0.4 go.etcd.io/bbolt v1.3.11 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 @@ -128,7 +129,6 @@ require ( github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/vishvananda/netns v0.0.4 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/integration/nri_linux_test.go b/integration/nri_linux_test.go new file mode 100644 index 000000000000..eb0137ba596d --- /dev/null +++ b/integration/nri_linux_test.go @@ -0,0 +1,257 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/containerd/nri/pkg/api" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +// Test NRI networking capabilities. +func TestNriPluginNetworkingSynchronization(t *testing.T) { + skipNriTestIfNecessary(t) + + t.Log("Test that NRI plugins get pod networking attributes on synchronization.") + + var ( + tc = &nriTest{ + t: t, + } + podCount = 3 + ctrPerPod = 2 + podPrefix = "pod-net-sync" + ) + + tc.setup() + + for i := 0; i < podCount; i++ { + podID := tc.runPod(fmt.Sprintf("%s%d", podPrefix, i)) + for j := 0; j < ctrPerPod; j++ { + tc.startContainer(podID, fmt.Sprintf("ctr%d", j)) + } + } + + numPods := 0 + // override hooks are executed after the sync events, use a channel to synchronize the test + syncCh := make(chan struct{}) + plugin := &mockPlugin{ + synchronize: func(mp *mockPlugin, pods []*api.PodSandbox, c []*api.Container) ([]*api.ContainerUpdate, error) { + defer close(syncCh) + for _, pod := range pods { + t.Logf("synchronize pod %s/%s: namespace=%s ips=%v", pod.GetNamespace(), pod.GetName(), getNetworkNamespace(pod), pod.GetIps()) + // only process the pods created by this test + if !strings.HasPrefix(pod.GetName(), podPrefix) { + t.Logf("DEBUG pod namespace %s name %s tc.namespace %s", pod.GetNamespace(), pod.GetName(), tc.namespace) + continue + } + numPods++ + // get the pod network namespace + ns := getNetworkNamespace(pod) + // test only creates pods with network + require.NotEmpty(t, ns) + require.ElementsMatch(t, pod.GetIps(), getNetworkNamespaceIPs(ns)) + } + return nil, nil + }, + } + + tc.connectNewPlugin(plugin) + + err := plugin.Wait(PluginSynchronized, time.After(pluginSyncTimeout)) + require.NoError(t, err, "plugin sync wait") + + for _, id := range tc.pods { + for _, plugin := range tc.plugins { + _, ok := plugin.pods[id] + require.True(tc.t, ok, "runtime sync of pod "+id) + } + } + + for _, id := range tc.ctrs { + for _, plugin := range tc.plugins { + _, ok := plugin.ctrs[id] + require.True(t, ok, "runtime sync of container "+id) + } + } + + select { + case <-syncCh: + case <-time.After(pluginSyncTimeout): + t.Fatalf("test timed out waiting for the plugin to sync") + } + require.Equal(t, numPods, podCount) +} + +func TestNriPluginNetworkingLifecycle(t *testing.T) { + skipNriTestIfNecessary(t) + + t.Log("Test that NRI plugins get pod networking attributes during the pod lifecycle.") + + var ( + tc = &nriTest{ + t: t, + } + podName = "pod-net-lifecycle" + ) + + tc.setup() + + hookExecs := 0 + // override hooks are executed after the sync events, use channels to synchronize the test + runPodSandboxCh := make(chan struct{}) + stopPodSandboxCh := make(chan struct{}) + removePodSandboxCh := make(chan struct{}) + + plugin := &mockPlugin{ + runPodSandbox: func(mp *mockPlugin, pod *api.PodSandbox) error { + t.Logf("runPodSandbox pod %s/%s: namespace=%s ips=%v", pod.GetNamespace(), pod.GetName(), getNetworkNamespace(pod), pod.GetIps()) + if pod.Name != podName { + return nil + } + defer close(runPodSandboxCh) + // get the pod network namespace + ns := getNetworkNamespace(pod) + // test only creates pods with network + require.NotEmpty(t, ns) + require.ElementsMatch(t, pod.GetIps(), getNetworkNamespaceIPs(ns)) + hookExecs++ + return nil + }, + stopPodSandbox: func(mp *mockPlugin, pod *api.PodSandbox) error { + t.Logf("stopPodSandbox pod %s/%s: namespace=%s ips=%v", pod.GetNamespace(), pod.GetName(), getNetworkNamespace(pod), pod.GetIps()) + if pod.Name != podName { + return nil + } + defer close(stopPodSandboxCh) + // get the pod network namespace + ns := getNetworkNamespace(pod) + // test only creates pods with network + require.NotEmpty(t, ns) + require.ElementsMatch(t, pod.GetIps(), getNetworkNamespaceIPs(ns)) + hookExecs++ + return nil + }, + removePodSandbox: func(mp *mockPlugin, pod *api.PodSandbox) error { + t.Logf("removePodSandbox pod %s/%s: namespace=%s ips=%v", pod.GetNamespace(), pod.GetName(), getNetworkNamespace(pod), pod.GetIps()) + if pod.Name != podName { + return nil + } + defer close(removePodSandboxCh) + // get the pod network namespace + ns := getNetworkNamespace(pod) + // test only creates pods with network but at this point networking namespace is not present + require.Empty(t, ns) + // the Pod assigned IPs are still available + require.NotEmpty(t, pod.GetIps()) + hookExecs++ + return nil + }, + } + + tc.connectNewPlugin(plugin) + + err := plugin.Wait(PluginSynchronized, time.After(pluginSyncTimeout)) + require.NoError(t, err, "plugin sync wait") + + sbConfig := PodSandboxConfig(podName, tc.namespace) + podID, err := runtimeService.RunPodSandbox(sbConfig, *runtimeHandler) + require.NoError(t, err) + + err = plugin.Wait(&Event{Pod: podID, Type: EventType(RunPodSandbox)}, time.After(pluginSyncTimeout)) + require.NoError(t, err, "plugin sync wait") + select { + case <-runPodSandboxCh: + case <-time.After(pluginSyncTimeout): + t.Fatalf("test timed out waiting for the RunPodSandbox hook to be executed") + } + + assert.NoError(t, runtimeService.StopPodSandbox(podID)) + err = plugin.Wait(&Event{Pod: podID, Type: EventType(StopPodSandbox)}, time.After(pluginSyncTimeout)) + require.NoError(t, err, "plugin sync wait") + select { + case <-stopPodSandboxCh: + case <-time.After(pluginSyncTimeout): + t.Fatalf("test timed out waiting for the StopPodSandbox hook to be executed") + } + + assert.NoError(t, runtimeService.RemovePodSandbox(podID)) + err = plugin.Wait(&Event{Pod: podID, Type: EventType(RemovePodSandbox)}, time.After(pluginSyncTimeout)) + require.NoError(t, err, "plugin sync wait") + select { + case <-removePodSandboxCh: + case <-time.After(pluginSyncTimeout): + t.Fatalf("test timed out waiting for the RemovePodSandbox hook to be executed") + } + + if hookExecs != 3 { + t.Fatalf("expected 3 hooks executed only got %d", hookExecs) + } + +} + +func getNetworkNamespace(pod *api.PodSandbox) string { + // get the pod network namespace + for _, namespace := range pod.Linux.GetNamespaces() { + if namespace.Type == "network" { + return namespace.Path + } + } + return "" +} + +func getNetworkNamespaceIPs(nsPath string) []string { + ips := []string{} + sandboxNs, err := netns.GetFromPath(nsPath) + if err != nil { + return ips + } + // to avoid golang problem with goroutines we create the socket in the + // namespace and use it directly + nhNs, err := netlink.NewHandleAt(sandboxNs) + if err != nil { + return ips + } + + // there is a convention the interface inside the Pod is always named eth0 + // internal/cri/server/helpers.go: defaultIfName = "eth0" + nsLink, err := nhNs.LinkByName("eth0") + if err != nil { + return ips + } + addrs, err := nhNs.AddrList(nsLink, netlink.FAMILY_ALL) + if err != nil { + return ips + } + for _, addr := range addrs { + // ignore link local and loopback addresses + // those are not added by the CNI + if addr.IP.IsGlobalUnicast() { + ips = append(ips, addr.IP.String()) + } + } + return ips +} diff --git a/integration/nri_test.go b/integration/nri_test.go index b56d173bd90b..150cd7551166 100644 --- a/integration/nri_test.go +++ b/integration/nri_test.go @@ -710,6 +710,9 @@ type mockPlugin struct { namespace string logf func(string, ...interface{}) synchronize func(*mockPlugin, []*api.PodSandbox, []*api.Container) ([]*api.ContainerUpdate, error) + runPodSandbox func(*mockPlugin, *api.PodSandbox) error + stopPodSandbox func(*mockPlugin, *api.PodSandbox) error + removePodSandbox func(*mockPlugin, *api.PodSandbox) error createContainer func(*mockPlugin, *api.PodSandbox, *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) postCreateContainer func(*mockPlugin, *api.PodSandbox, *api.Container) updateContainer func(*mockPlugin, *api.PodSandbox, *api.Container) ([]*api.ContainerUpdate, error) @@ -762,6 +765,15 @@ func (m *mockPlugin) Start() error { if m.synchronize == nil { m.synchronize = nopSynchronize } + if m.runPodSandbox == nil { + m.runPodSandbox = nopRunPodSandbox + } + if m.stopPodSandbox == nil { + m.stopPodSandbox = nopStopPodSandbox + } + if m.removePodSandbox == nil { + m.removePodSandbox = nopRemovePodSandbox + } if m.createContainer == nil { m.createContainer = nopCreateContainer } @@ -845,7 +857,7 @@ func (m *mockPlugin) RunPodSandbox(ctx context.Context, pod *api.PodSandbox) err m.Log("RunPodSandbox %s/%s", pod.Namespace, pod.Name) m.pods[pod.Id] = pod m.q.Add(PodSandboxEvent(pod, RunPodSandbox)) - return nil + return m.runPodSandbox(m, pod) } func (m *mockPlugin) StopPodSandbox(ctx context.Context, pod *api.PodSandbox) error { @@ -856,7 +868,7 @@ func (m *mockPlugin) StopPodSandbox(ctx context.Context, pod *api.PodSandbox) er m.Log("StopPodSandbox %s/%s", pod.Namespace, pod.Name) m.pods[pod.Id] = pod m.q.Add(PodSandboxEvent(pod, StopPodSandbox)) - return nil + return m.stopPodSandbox(m, pod) } func (m *mockPlugin) RemovePodSandbox(ctx context.Context, pod *api.PodSandbox) error { @@ -867,7 +879,7 @@ func (m *mockPlugin) RemovePodSandbox(ctx context.Context, pod *api.PodSandbox) m.Log("RemovePodSandbox %s/%s", pod.Namespace, pod.Name) delete(m.pods, pod.Id) m.q.Add(PodSandboxEvent(pod, RemovePodSandbox)) - return nil + return m.removePodSandbox(m, pod) } func (m *mockPlugin) CreateContainer(ctx context.Context, pod *api.PodSandbox, ctr *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) { @@ -978,6 +990,18 @@ func nopSynchronize(*mockPlugin, []*api.PodSandbox, []*api.Container) ([]*api.Co return nil, nil } +func nopRunPodSandbox(*mockPlugin, *api.PodSandbox) error { + return nil +} + +func nopStopPodSandbox(*mockPlugin, *api.PodSandbox) error { + return nil +} + +func nopRemovePodSandbox(*mockPlugin, *api.PodSandbox) error { + return nil +} + func nopCreateContainer(*mockPlugin, *api.PodSandbox, *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) { return nil, nil, nil } diff --git a/internal/cri/nri/nri_api_linux.go b/internal/cri/nri/nri_api_linux.go index 43643db11a13..a3eecbce5f52 100644 --- a/internal/cri/nri/nri_api_linux.go +++ b/internal/cri/nri/nri_api_linux.go @@ -482,6 +482,11 @@ func (a *API) nriPodSandbox(pod *sstore.Sandbox) *criPodSandbox { log.L.WithError(err).Errorf("failed to get task for sandbox container %s", pod.Container.ID()) } + // the containers no longer exist but the oci.Spec may still be available to use on the StopPodSandbox hook + spec, err := pod.Container.Spec(ctx) + if err == nil { + criPod.spec = spec + } return criPod } From 565b50dbb92f231ea1f416dead040d8e96f0963a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 00:03:47 +0000 Subject: [PATCH 43/46] build(deps): bump google-github-actions/auth from 2.1.7 to 2.1.8 Bumps [google-github-actions/auth](https://github.com/google-github-actions/auth) from 2.1.7 to 2.1.8. - [Release notes](https://github.com/google-github-actions/auth/releases) - [Changelog](https://github.com/google-github-actions/auth/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/auth/compare/6fc4af4b145ae7821d527454aa9bd537d1f2dc5f...71f986410dfbc7added4569d411d040a91dc6935) --- updated-dependencies: - dependency-name: google-github-actions/auth dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/windows-hyperv-periodic.yml | 2 +- .github/workflows/windows-periodic.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows-hyperv-periodic.yml b/.github/workflows/windows-hyperv-periodic.yml index 4878c4f34c59..76e20ad3cc79 100644 --- a/.github/workflows/windows-hyperv-periodic.yml +++ b/.github/workflows/windows-hyperv-periodic.yml @@ -306,7 +306,7 @@ jobs: echo 'GCP_WORKLOAD_IDENTITY_PROVIDER=${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}' >> $GITHUB_OUTPUT - name: AuthGcp - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f # v2.1.7 + uses: google-github-actions/auth@71f986410dfbc7added4569d411d040a91dc6935 # v2.1.8 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} diff --git a/.github/workflows/windows-periodic.yml b/.github/workflows/windows-periodic.yml index 761469a5038e..d4dfaac654f4 100644 --- a/.github/workflows/windows-periodic.yml +++ b/.github/workflows/windows-periodic.yml @@ -256,7 +256,7 @@ jobs: echo 'GCP_WORKLOAD_IDENTITY_PROVIDER=${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}' >> $GITHUB_OUTPUT - name: AuthGcp - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f # v2.1.7 + uses: google-github-actions/auth@71f986410dfbc7added4569d411d040a91dc6935 # v2.1.8 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} From 841ab361c1e52200319c08dc8b09f11e07d78f17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 00:03:54 +0000 Subject: [PATCH 44/46] build(deps): bump github/codeql-action from 3.28.6 to 3.28.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.6 to 3.28.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/17a820bf2e43b47be2c72b39cc905417bc1ab6d0...dd746615b3b9d728a6a37ca2045b68ca76d4841a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/scorecards.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a5307e3716ab..32dee4e1b435 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -36,7 +36,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # v3.28.6 + uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java @@ -46,4 +46,4 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # v3.28.6 + uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f5c2263d8ec7..4192b09b4180 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -49,6 +49,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17a820bf2e43b47be2c72b39cc905417bc1ab6d0 # tag=v3.28.6 + uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # tag=v3.28.8 with: sarif_file: results.sarif From b65f3875ba3365a780ac9d9ace295c56ac230ee4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 00:03:58 +0000 Subject: [PATCH 45/46] build(deps): bump google-github-actions/upload-cloud-storage Bumps [google-github-actions/upload-cloud-storage](https://github.com/google-github-actions/upload-cloud-storage) from 2.2.1 to 2.2.2. - [Release notes](https://github.com/google-github-actions/upload-cloud-storage/releases) - [Changelog](https://github.com/google-github-actions/upload-cloud-storage/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/upload-cloud-storage/compare/386ab77f37fdf51c0e38b3d229fad286861cc0d0...7c6e11cb7291594c5dfe0bc1dd9cd905e31e600c) --- updated-dependencies: - dependency-name: google-github-actions/upload-cloud-storage dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/windows-hyperv-periodic.yml | 4 ++-- .github/workflows/windows-periodic.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/windows-hyperv-periodic.yml b/.github/workflows/windows-hyperv-periodic.yml index 4878c4f34c59..950006b98088 100644 --- a/.github/workflows/windows-hyperv-periodic.yml +++ b/.github/workflows/windows-hyperv-periodic.yml @@ -313,7 +313,7 @@ jobs: workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} - name: UploadJobReport - uses: google-github-actions/upload-cloud-storage@386ab77f37fdf51c0e38b3d229fad286861cc0d0 # v2.2.1 + uses: google-github-actions/upload-cloud-storage@7c6e11cb7291594c5dfe0bc1dd9cd905e31e600c # v2.2.2 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: path: ${{ github.workspace }}/latest-build.txt @@ -321,7 +321,7 @@ jobs: parent: false - name: UploadLogsDir - uses: google-github-actions/upload-cloud-storage@386ab77f37fdf51c0e38b3d229fad286861cc0d0 # v2.2.1 + uses: google-github-actions/upload-cloud-storage@7c6e11cb7291594c5dfe0bc1dd9cd905e31e600c # v2.2.2 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: path: ${{ env.LOGS_DIR }} diff --git a/.github/workflows/windows-periodic.yml b/.github/workflows/windows-periodic.yml index 761469a5038e..fab537f9fff3 100644 --- a/.github/workflows/windows-periodic.yml +++ b/.github/workflows/windows-periodic.yml @@ -263,7 +263,7 @@ jobs: workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} - name: UploadJobReport - uses: google-github-actions/upload-cloud-storage@386ab77f37fdf51c0e38b3d229fad286861cc0d0 # v2.2.1 + uses: google-github-actions/upload-cloud-storage@7c6e11cb7291594c5dfe0bc1dd9cd905e31e600c # v2.2.2 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: path: ${{ github.workspace }}/latest-build.txt @@ -271,7 +271,7 @@ jobs: parent: false - name: UploadLogsDir - uses: google-github-actions/upload-cloud-storage@386ab77f37fdf51c0e38b3d229fad286861cc0d0 # v2.2.1 + uses: google-github-actions/upload-cloud-storage@7c6e11cb7291594c5dfe0bc1dd9cd905e31e600c # v2.2.2 if: steps.AssignGcpCreds.outputs.GCP_SERVICE_ACCOUNT && steps.AssignGcpCreds.outputs.GCP_WORKLOAD_IDENTITY_PROVIDER with: path: ${{ env.LOGS_DIR }} From a1c540085f86dcc8613e6db11b73bed4a3a02883 Mon Sep 17 00:00:00 2001 From: Amit Barve Date: Mon, 10 Feb 2025 13:32:52 -0500 Subject: [PATCH 46/46] Support for importing layers in the block CIM format. Adds a new diff plugin that can import image layers in the block CIM format using the new block CIM layer writer added in hcsshim repo. This commit also makes another important change in the way a diff is applied when using CimFS based layer writers. Currently, the diff plugins call archive.Apply to apply a diff and pass a function (that can actually apply the diff) as an argument (via archive.ApplyOptions). This allows the callers to call archive.Apply with either a custom applier function or if the caller doesn't pass such a function archive.Apply uses the default naive diff applier. However, there is drawback to this approach. The applier function passed to the `archive.Apply` call needs to follow a specific signature. This signature expects it that all parent layers are represented as an array of strings. In cases like CimFS, we can't easily represent a set of layers as strings (unless we encode extra data in those strings in a hacky way). To get around this problem, the diff plugins for CimFS based layers, skip the archive.Apply call and directly call the layer writer instead. Signed-off-by: Amit Barve --- pkg/archive/tar_opts_windows.go | 16 --- plugins/diff/windows/cimfs.go | 193 ++++++++++++++++++++++++++++- plugins/diff/windows/windows.go | 42 +++---- plugins/snapshots/windows/cimfs.go | 15 ++- script/setup/runhcs-version | 2 +- 5 files changed, 223 insertions(+), 45 deletions(-) diff --git a/pkg/archive/tar_opts_windows.go b/pkg/archive/tar_opts_windows.go index e8943ba5264d..567a3c35209a 100644 --- a/pkg/archive/tar_opts_windows.go +++ b/pkg/archive/tar_opts_windows.go @@ -22,7 +22,6 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/pkg/ociwclayer" - ocicimlayer "github.com/Microsoft/hcsshim/pkg/ociwclayer/cim" ) // applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer @@ -78,18 +77,3 @@ func WithParentLayers(p []string) WriteDiffOpt { return nil } } - -func applyWindowsCimLayer(cimPath string, parentLayerCimPaths []string) func(context.Context, string, io.Reader, ApplyOptions) (int64, error) { - return func(ctx context.Context, root string, r io.Reader, options ApplyOptions) (int64, error) { - return ocicimlayer.ImportCimLayerFromTar(ctx, r, root, cimPath, options.Parents, parentLayerCimPaths) - } -} - -// AsCimContainerLayer indicates that the tar stream to apply is that of a Windows container Layer written in -// the cim format. -func AsCimContainerLayer(cimPath string, parentLayerCimPaths []string) ApplyOpt { - return func(options *ApplyOptions) error { - options.applyFunc = applyWindowsCimLayer(cimPath, parentLayerCimPaths) - return nil - } -} diff --git a/plugins/diff/windows/cimfs.go b/plugins/diff/windows/cimfs.go index 7bb66f80522a..e9c76d48dbd6 100644 --- a/plugins/diff/windows/cimfs.go +++ b/plugins/diff/windows/cimfs.go @@ -21,21 +21,29 @@ package windows import ( "context" + "encoding/json" "fmt" + "io" + "path/filepath" + "strings" + "time" "github.com/Microsoft/hcsshim/pkg/cimfs" + ocicimlayer "github.com/Microsoft/hcsshim/pkg/ociwclayer/cim" "github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/core/diff" "github.com/containerd/containerd/v2/core/metadata" "github.com/containerd/containerd/v2/core/mount" - "github.com/containerd/containerd/v2/pkg/archive" "github.com/containerd/containerd/v2/plugins" winsn "github.com/containerd/containerd/v2/plugins/snapshots/windows" "github.com/containerd/errdefs" + "github.com/containerd/log" "github.com/containerd/platforms" "github.com/containerd/plugin" "github.com/containerd/plugin/registry" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" ) func init() { @@ -58,8 +66,34 @@ func init() { return NewCimDiff(md.(*metadata.DB).ContentStore()) }, }) + + registry.Register(&plugin.Registration{ + Type: plugins.DiffPlugin, + ID: "blockcim", + Requires: []plugin.Type{ + plugins.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.GetSingle(plugins.MetadataPlugin) + if err != nil { + return nil, err + } + + if !cimfs.IsBlockCimSupported() { + return nil, fmt.Errorf("host OS version doesn't support CimFS: %w", plugin.ErrSkipPlugin) + } + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + return NewBlockCimDiff(md.(*metadata.DB).ContentStore()) + }, + }) } +// cimApplyFunc is an applier function used when extracting layers into the CimFS format. +// Using the archive.applyFunc is very limiting for CimFS use cases as it forces you to +// represent layers as a single string. So for CimFS we skip the archive.Apply call and +// directly call into the layer writer +type cimApplyFunc func(context.Context, io.Reader) (int64, error) + // cimDiff does filesystem comparison and application // for CimFS specific layer diffs. type cimDiff struct { @@ -97,7 +131,12 @@ func (c cimDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mo if err != nil { return emptyDesc, err } - return applyDiffCommon(ctx, c.store, desc, m.Source, parentLayerPaths, archive.AsCimContainerLayer(cimPath, parentLayerCimPaths), opts...) + + applyFunc := func(fCtx context.Context, r io.Reader) (int64, error) { + return ocicimlayer.ImportCimLayerFromTar(fCtx, r, m.Source, cimPath, parentLayerPaths, parentLayerCimPaths) + } + + return applyCIMLayerCommon(ctx, desc, c.store, applyFunc, opts...) } // Compare creates a diff between the given mounts and uploads the result @@ -106,3 +145,153 @@ func (c cimDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts . // support for generating layer diff of cimfs layers will be added later. return emptyDesc, errdefs.ErrNotImplemented } + +// blockCIMDiff does filesystem comparison and application +// for blocked CIMs. +type blockCIMDiff struct { + store content.Store +} + +// NewBlockCimDiff is the Windows blocked cim container layer implementation for comparing +// and applying filesystem layers +func NewBlockCimDiff(store content.Store) (CompareApplier, error) { + return blockCIMDiff{ + store: store, + }, nil +} + +// parseBlockCIMMount parses the mount returned by the BlockCIM snapshotter and returns +func parseBlockCIMMount(m *mount.Mount) (*cimfs.BlockCIM, []*cimfs.BlockCIM, error) { + var ( + parentPaths []string + ) + + for _, option := range m.Options { + if val, ok := strings.CutPrefix(option, winsn.ParentLayerCimPathsFlag); ok { + err := json.Unmarshal([]byte(val), &parentPaths) + if err != nil { + return nil, nil, err + } + } else if val, ok = strings.CutPrefix(option, winsn.BlockCIMTypeFlag); ok { + // only support single file for extraction for now + if val != "file" { + return nil, nil, fmt.Errorf("extraction doesn't support layer type `%s`", val) + } + } + } + + var ( + parentLayers []*cimfs.BlockCIM + extractionLayer *cimfs.BlockCIM + ) + + extractionLayer = &cimfs.BlockCIM{ + Type: cimfs.BlockCIMTypeSingleFile, + BlockPath: filepath.Dir(m.Source), + CimName: filepath.Base(m.Source), + } + for _, p := range parentPaths { + parentLayers = append(parentLayers, &cimfs.BlockCIM{ + Type: cimfs.BlockCIMTypeSingleFile, + BlockPath: filepath.Dir(p), + CimName: filepath.Base(p), + }) + } + return extractionLayer, parentLayers, nil +} + +// Apply applies the content associated with the provided digests onto the +// provided mounts. Archive content will be extracted and decompressed if +// necessary. +func (c blockCIMDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + if len(mounts) != 1 { + return emptyDesc, fmt.Errorf("number of mounts should always be 1 for CimFS layers: %w", errdefs.ErrInvalidArgument) + } else if mounts[0].Type != winsn.BlockCIMMountType { + return emptyDesc, fmt.Errorf("blockCIMDiff does not support layer type %s: %w", mounts[0].Type, errdefs.ErrNotImplemented) + } + + m := mounts[0] + + log.G(ctx).WithFields(logrus.Fields{ + "mount": m, + }).Info("applying blockCIM diff") + + layer, parentLayers, err := parseBlockCIMMount(&m) + if err != nil { + return emptyDesc, err + } + + applyFunc := func(ctx context.Context, r io.Reader) (int64, error) { + return ocicimlayer.ImportSingleFileCimLayerFromTar(ctx, r, layer, parentLayers) + } + + return applyCIMLayerCommon(ctx, desc, c.store, applyFunc, opts...) + +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (c blockCIMDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + // support for generating layer diff of cimfs layers will be added later. + return emptyDesc, errdefs.ErrNotImplemented +} + +// applyCimFSCommon is a common function used for applying all diffs to a cim layer. +func applyCIMLayerCommon(ctx context.Context, desc ocispec.Descriptor, store content.Store, applyFunc cimApplyFunc, opts ...diff.ApplyOpt) (_ ocispec.Descriptor, err error) { + var config diff.ApplyConfig + for _, o := range opts { + if err := o(ctx, desc, &config); err != nil { + return emptyDesc, fmt.Errorf("failed to apply config opt: %w", err) + } + } + + t1 := time.Now() + defer func() { + if err == nil { + log.G(ctx).WithFields(log.Fields{ + "d": time.Since(t1), + "digest": desc.Digest, + "size": desc.Size, + "media": desc.MediaType, + }).Debug("diff applied") + } + }() + + ra, err := store.ReaderAt(ctx, desc) + if err != nil { + return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) + } + defer ra.Close() + + processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) + for { + if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { + return emptyDesc, fmt.Errorf("failed to get stream processor for %s: %w", desc.MediaType, err) + } + if processor.MediaType() == ocispec.MediaTypeImageLayer { + break + } + } + defer processor.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(processor, digester.Hash()), + } + + if _, err = applyFunc(ctx, rc); err != nil { + return emptyDesc, err + } + + // Read any trailing data + if _, err := io.Copy(io.Discard, rc); err != nil { + return emptyDesc, err + } + + return ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + }, nil + +} diff --git a/plugins/diff/windows/windows.go b/plugins/diff/windows/windows.go index 3424615eab7e..804e681c0c11 100644 --- a/plugins/diff/windows/windows.go +++ b/plugins/diff/windows/windows.go @@ -89,8 +89,23 @@ func NewWindowsDiff(store content.Store) (CompareApplier, error) { }, nil } -// applyDiffCommon is a common function that is called by both windows & cimfs differs. -func applyDiffCommon(ctx context.Context, store content.Store, desc ocispec.Descriptor, layerPath string, parentLayerPaths []string, applyOpt archive.ApplyOpt, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { +// Apply applies the content associated with the provided digests onto the +// provided mounts. Archive content will be extracted and decompressed if +// necessary. +func (s windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + layerPath, parentLayerPaths, err := mountsToLayerAndParents(mounts) + if err != nil { + return emptyDesc, err + } + + // TODO darrenstahlmsft: When this is done isolated, we should disable these. + // it currently cannot be disabled, unless we add ref counting. Since this is + // temporary, leaving it enabled is OK for now. + // https://github.com/containerd/containerd/issues/1681 + if err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + return emptyDesc, err + } + t1 := time.Now() defer func() { if err == nil { @@ -110,7 +125,7 @@ func applyDiffCommon(ctx context.Context, store content.Store, desc ocispec.Desc } } - ra, err := store.ReaderAt(ctx, desc) + ra, err := s.store.ReaderAt(ctx, desc) if err != nil { return emptyDesc, fmt.Errorf("failed to get reader from content store: %w", err) } @@ -135,7 +150,7 @@ func applyDiffCommon(ctx context.Context, store content.Store, desc ocispec.Desc archiveOpts := []archive.ApplyOpt{ archive.WithParents(parentLayerPaths), archive.WithNoSameOwner(), // Lchown is not supported on Windows - applyOpt, + archive.AsWindowsContainerLayer(), } if _, err := archive.Apply(ctx, layerPath, rc, archiveOpts...); err != nil { @@ -152,26 +167,7 @@ func applyDiffCommon(ctx context.Context, store content.Store, desc ocispec.Desc Size: rc.c, Digest: digester.Digest(), }, nil -} - -// Apply applies the content associated with the provided digests onto the -// provided mounts. Archive content will be extracted and decompressed if -// necessary. -func (s windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { - layer, parentLayerPaths, err := mountsToLayerAndParents(mounts) - if err != nil { - return emptyDesc, err - } - - // TODO darrenstahlmsft: When this is done isolated, we should disable these. - // it currently cannot be disabled, unless we add ref counting. Since this is - // temporary, leaving it enabled is OK for now. - // https://github.com/containerd/containerd/issues/1681 - if err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { - return emptyDesc, err - } - return applyDiffCommon(ctx, s.store, desc, layer, parentLayerPaths, archive.AsWindowsContainerLayer(), opts...) } // Compare creates a diff between the given mounts and uploads the result diff --git a/plugins/snapshots/windows/cimfs.go b/plugins/snapshots/windows/cimfs.go index b5ca498e9681..8e27f9ce0340 100644 --- a/plugins/snapshots/windows/cimfs.go +++ b/plugins/snapshots/windows/cimfs.go @@ -341,7 +341,7 @@ func (s *cimFSSnapshotter) mounts(sn storage.Snapshot, key string) []mount.Mount mounts := []mount.Mount{ { Source: s.getSnapshotDir(sn.ID), - Type: "CimFS", + Type: CimFSMountType, Options: options, }, } @@ -435,6 +435,15 @@ const ( // Similar to ParentLayerPathsFlag this is the optinos flag used to represent the JSON encoded list of // parent layer CIMs ParentLayerCimPathsFlag = "parentCimPaths=" + + // string to specify the standard cimfs type of mount + CimFSMountType string = "CimFS" + + // string to specify the block CIM type of mount + BlockCIMMountType string = "BlockCIM" + + // a flag that specifies the type of a block CIM in case of BlockCIM mounts + BlockCIMTypeFlag string = "blockCIMType=" ) // getOptionByPrefix finds an option that has the provided prefix, cuts the prefix from @@ -452,7 +461,7 @@ func getOptionByPrefix(m *mount.Mount, prefix string) (string, bool) { // gets the paths of the parent cims of this mount func GetParentCimPaths(m *mount.Mount) ([]string, error) { - if m.Type != "CimFS" { + if m.Type != CimFSMountType { return nil, fmt.Errorf("invalid mount type: '%s'", m.Type) } var parentCimPaths []string @@ -467,7 +476,7 @@ func GetParentCimPaths(m *mount.Mount) ([]string, error) { // Only applies to a snapshot created for image extraction, for such a snapshot provides the // path to a cim in which image layer will be extracted. func GetCimPath(m *mount.Mount) (string, error) { - if m.Type != "CimFS" { + if m.Type != CimFSMountType { return "", fmt.Errorf("invalid mount type: '%s'", m.Type) } cimPath, found := getOptionByPrefix(m, LayerCimPathFlag) diff --git a/script/setup/runhcs-version b/script/setup/runhcs-version index 75b05fc805ca..47da361c9927 100644 --- a/script/setup/runhcs-version +++ b/script/setup/runhcs-version @@ -1 +1 @@ -v0.13.0-rc.2 +v0.13.0-rc.3