From 4e2fb91a434eaf123158fc88e72b7aba64ecd412 Mon Sep 17 00:00:00 2001 From: JasonXuDeveloper Date: Wed, 17 Dec 2025 10:43:51 +1100 Subject: [PATCH 1/3] feat: add backward-compatible Specs field to LoadProfile Add Specs []LoadProfileSpec field alongside existing Spec field to enable time-series replay support in future. Update all code to use GetSpecs() accessor method for unified access to both old and new formats. Changes: - Add Specs field with omitempty tags to LoadProfile struct - Add GetSpecs() method to handle both single Spec and Specs formats - Update LoadProfile.Validate() to validate all specs - Update runner, runkperf bench, and warmup commands to use GetSpecs() - Maintain backward compatibility with existing configs All existing configs with 'spec:' continue to work unchanged. New configs can use 'specs:' array format. No functional changes in this commit. Signed-off-by: JasonXuDeveloper --- api/types/load_traffic.go | 42 +++++++++++++++-- cmd/kperf/commands/runner/runner.go | 47 ++++++++++--------- contrib/cmd/runkperf/commands/bench/utils.go | 14 ++++-- .../cmd/runkperf/commands/warmup/command.go | 9 +++- 4 files changed, 82 insertions(+), 30 deletions(-) diff --git a/api/types/load_traffic.go b/api/types/load_traffic.go index 48a69cb2..8216dddd 100644 --- a/api/types/load_traffic.go +++ b/api/types/load_traffic.go @@ -6,6 +6,7 @@ package types import ( "encoding/json" "fmt" + "reflect" "strings" apitypes "k8s.io/apimachinery/pkg/types" @@ -37,8 +38,29 @@ type LoadProfile struct { Version int `json:"version" yaml:"version"` // Description is a string value to describe this object. Description string `json:"description,omitempty" yaml:"description"` - // Spec defines behavior of load profile. - Spec LoadProfileSpec `json:"spec" yaml:"spec"` + // Spec defines behavior of load profile (deprecated, use Specs for single or multiple specs). + Spec LoadProfileSpec `json:"spec,omitempty" yaml:"spec,omitempty"` + // Specs defines behaviors of load profile for time-series replay support. + Specs []LoadProfileSpec `json:"specs,omitempty" yaml:"specs,omitempty"` +} + +// GetSpecs returns specs as a slice, handling both old and new format. +// If Specs is set, returns it. Otherwise, returns Spec as a single-element slice. +func (lp *LoadProfile) GetSpecs() []LoadProfileSpec { + if len(lp.Specs) > 0 { + return lp.Specs + } + // Fallback to old single Spec field + return []LoadProfileSpec{lp.Spec} +} + +// SetFirstSpec updates the first spec, handling both old and new format. +func (lp *LoadProfile) SetFirstSpec(spec LoadProfileSpec) { + if len(lp.Specs) > 0 { + lp.Specs[0] = spec + } else { + lp.Spec = spec + } } // LoadProfileSpec defines the load traffic for traget resource. @@ -198,7 +220,21 @@ func (lp LoadProfile) Validate() error { if lp.Version != 1 { return fmt.Errorf("version should be 1") } - return lp.Spec.Validate() + + // Validate that at least one format is provided + if len(lp.Specs) == 0 && reflect.DeepEqual(lp.Spec, LoadProfileSpec{}) { + return fmt.Errorf("either 'spec' or 'specs' must be provided") + } + + // Validate all specs + specs := lp.GetSpecs() + for i, spec := range specs { + if err := spec.Validate(); err != nil { + return fmt.Errorf("spec[%d]: %w", i, err) + } + } + + return nil } // Validate verifies fields of LoadProfileSpec. diff --git a/cmd/kperf/commands/runner/runner.go b/cmd/kperf/commands/runner/runner.go index ecb56311..16ac5717 100644 --- a/cmd/kperf/commands/runner/runner.go +++ b/cmd/kperf/commands/runner/runner.go @@ -103,19 +103,20 @@ var runCommand = cli.Command{ return err } - clientNum := profileCfg.Spec.Conns + specs := profileCfg.GetSpecs() + clientNum := specs[0].Conns restClis, err := request.NewClients(kubeCfgPath, clientNum, request.WithClientUserAgentOpt(cliCtx.String("user-agent")), - request.WithClientQPSOpt(profileCfg.Spec.Rate), - request.WithClientContentTypeOpt(profileCfg.Spec.ContentType), - request.WithClientDisableHTTP2Opt(profileCfg.Spec.DisableHTTP2), + request.WithClientQPSOpt(specs[0].Rate), + request.WithClientContentTypeOpt(specs[0].ContentType), + request.WithClientDisableHTTP2Opt(specs[0].DisableHTTP2), ) if err != nil { return err } - stats, err := request.Schedule(context.TODO(), &profileCfg.Spec, restClis) + stats, err := request.Schedule(context.TODO(), &specs[0], restClis) if err != nil { return err } @@ -165,40 +166,44 @@ func loadConfig(cliCtx *cli.Context) (*types.LoadProfile, error) { return nil, fmt.Errorf("failed to unmarshal %s from yaml format: %w", cfgPath, err) } + specs := profileCfg.GetSpecs() // override value by flags if v := "rate"; cliCtx.IsSet(v) { - profileCfg.Spec.Rate = cliCtx.Float64(v) + specs[0].Rate = cliCtx.Float64(v) } - if v := "conns"; cliCtx.IsSet(v) || profileCfg.Spec.Conns == 0 { - profileCfg.Spec.Conns = cliCtx.Int(v) + if v := "conns"; cliCtx.IsSet(v) || specs[0].Conns == 0 { + specs[0].Conns = cliCtx.Int(v) } - if v := "client"; cliCtx.IsSet(v) || profileCfg.Spec.Client == 0 { - profileCfg.Spec.Client = cliCtx.Int(v) + if v := "client"; cliCtx.IsSet(v) || specs[0].Client == 0 { + specs[0].Client = cliCtx.Int(v) } if v := "total"; cliCtx.IsSet(v) { - profileCfg.Spec.Total = cliCtx.Int(v) + specs[0].Total = cliCtx.Int(v) } if v := "duration"; cliCtx.IsSet(v) { - profileCfg.Spec.Duration = cliCtx.Int(v) + specs[0].Duration = cliCtx.Int(v) } - if profileCfg.Spec.Total > 0 && profileCfg.Spec.Duration > 0 { - klog.Warningf("both total:%v and duration:%v are set, duration will be ignored\n", profileCfg.Spec.Total, profileCfg.Spec.Duration) - profileCfg.Spec.Duration = 0 + if specs[0].Total > 0 && specs[0].Duration > 0 { + klog.Warningf("both total:%v and duration:%v are set, duration will be ignored\n", specs[0].Total, specs[0].Duration) + specs[0].Duration = 0 } - if profileCfg.Spec.Total == 0 && profileCfg.Spec.Duration == 0 { + if specs[0].Total == 0 && specs[0].Duration == 0 { // Use default total value - profileCfg.Spec.Total = cliCtx.Int("total") + specs[0].Total = cliCtx.Int("total") } - if v := "content-type"; cliCtx.IsSet(v) || profileCfg.Spec.ContentType == "" { - profileCfg.Spec.ContentType = types.ContentType(cliCtx.String(v)) + if v := "content-type"; cliCtx.IsSet(v) || specs[0].ContentType == "" { + specs[0].ContentType = types.ContentType(cliCtx.String(v)) } if v := "disable-http2"; cliCtx.IsSet(v) { - profileCfg.Spec.DisableHTTP2 = cliCtx.Bool(v) + specs[0].DisableHTTP2 = cliCtx.Bool(v) } if v := "max-retries"; cliCtx.IsSet(v) { - profileCfg.Spec.MaxRetries = cliCtx.Int(v) + specs[0].MaxRetries = cliCtx.Int(v) } + // Update profileCfg with modified specs + profileCfg.SetFirstSpec(specs[0]) + if err := profileCfg.Validate(); err != nil { return nil, err } diff --git a/contrib/cmd/runkperf/commands/bench/utils.go b/contrib/cmd/runkperf/commands/bench/utils.go index 3995db55..685cf3ba 100644 --- a/contrib/cmd/runkperf/commands/bench/utils.go +++ b/contrib/cmd/runkperf/commands/bench/utils.go @@ -151,9 +151,10 @@ func newLoadProfileFromEmbed(cliCtx *cli.Context, name string) (_name string, _s return fmt.Errorf("invalid total-requests value: %v", reqs) } reqsTime := cliCtx.Int("duration") + specs := spec.Profile.GetSpecs() if !cliCtx.IsSet("total") && reqsTime > 0 { reqs = 0 - spec.Profile.Spec.Duration = reqsTime + specs[0].Duration = reqsTime } rgAffinity := cliCtx.GlobalString("rg-affinity") @@ -163,10 +164,14 @@ func newLoadProfileFromEmbed(cliCtx *cli.Context, name string) (_name string, _s } if reqs != 0 { - spec.Profile.Spec.Total = reqs + specs[0].Total = reqs } spec.NodeAffinity = affinityLabels - spec.Profile.Spec.ContentType = types.ContentType(cliCtx.String("content-type")) + specs[0].ContentType = types.ContentType(cliCtx.String("content-type")) + + // Update profile with modified specs + spec.Profile.SetFirstSpec(specs[0]) + data, _ := yaml.Marshal(spec) // Tweak the load profile for read-update case @@ -202,7 +207,8 @@ func tweakReadUpdateProfile(cliCtx *cli.Context, spec *types.RunnerGroupSpec) er configmapTotal := cliCtx.Int("read-update-configmap-total") if namePattern != "" || ratio != 0 || namespace != "" || configmapTotal > 0 { - for _, r := range spec.Profile.Spec.Requests { + specs := spec.Profile.GetSpecs() + for _, r := range specs[0].Requests { if r.Patch != nil { if namePattern != "" { r.Patch.Name = fmt.Sprintf("runkperf-cm-%s", namePattern) diff --git a/contrib/cmd/runkperf/commands/warmup/command.go b/contrib/cmd/runkperf/commands/warmup/command.go index e022e91f..b3b1e3f2 100644 --- a/contrib/cmd/runkperf/commands/warmup/command.go +++ b/contrib/cmd/runkperf/commands/warmup/command.go @@ -102,8 +102,13 @@ var Command = cli.Command{ return fmt.Errorf("failed to parse %s affinity: %w", rgAffinity, err) } - spec.Profile.Spec.Total = reqs - spec.Profile.Spec.Rate = rate + specs := spec.Profile.GetSpecs() + specs[0].Total = reqs + specs[0].Rate = rate + + // Update profile with modified specs + spec.Profile.SetFirstSpec(specs[0]) + spec.NodeAffinity = affinityLabels data, _ := yaml.Marshal(spec) From 141cb1e55b1821f8d1df36a5818e0f2ebf701e83 Mon Sep 17 00:00:00 2001 From: JasonXuDeveloper Date: Fri, 19 Dec 2025 09:11:06 +1100 Subject: [PATCH 2/3] refactor: convert LoadProfile.Spec to Specs array Convert single Spec field to Specs array for time-series replay support. This is a breaking change that removes backward compatibility as the feature is not yet GA. Changes: - Update LoadProfile struct to use Specs []LoadProfileSpec - Remove backward compatibility code (GetSpecs/SetFirstSpec methods) - Update all code references to use .Specs[0] direct access - Convert all YAML configs from spec: to specs: list format - Update tests to match new structure All builds pass and tests verified. Signed-off-by: JasonXuDeveloper --- api/types/load_traffic.go | 35 +-- api/types/load_traffic_test.go | 206 +++++++++--------- cmd/kperf/commands/runner/runner.go | 47 ++-- contrib/cmd/runkperf/commands/bench/utils.go | 13 +- .../cmd/runkperf/commands/warmup/command.go | 8 +- .../manifests/loadprofile/cilium_cr_list.yaml | 48 ++-- .../loadprofile/list_configmaps.yaml | 32 +-- .../loadprofile/node100_job10_pod10k.yaml | 96 ++++---- .../loadprofile/node100_job1_pod3k.yaml | 46 ++-- .../manifests/loadprofile/node100_pod10k.yaml | 56 ++--- .../loadprofile/node10_job1_pod100.yaml | 46 ++-- .../loadprofile/node10_job1_pod1k.yaml | 96 ++++---- .../manifests/loadprofile/read_update.yaml | 56 ++--- .../manifests/loadprofile/warmup.yaml | 46 ++-- 14 files changed, 397 insertions(+), 434 deletions(-) diff --git a/api/types/load_traffic.go b/api/types/load_traffic.go index 8216dddd..816c5ab4 100644 --- a/api/types/load_traffic.go +++ b/api/types/load_traffic.go @@ -6,7 +6,6 @@ package types import ( "encoding/json" "fmt" - "reflect" "strings" apitypes "k8s.io/apimachinery/pkg/types" @@ -38,29 +37,8 @@ type LoadProfile struct { Version int `json:"version" yaml:"version"` // Description is a string value to describe this object. Description string `json:"description,omitempty" yaml:"description"` - // Spec defines behavior of load profile (deprecated, use Specs for single or multiple specs). - Spec LoadProfileSpec `json:"spec,omitempty" yaml:"spec,omitempty"` // Specs defines behaviors of load profile for time-series replay support. - Specs []LoadProfileSpec `json:"specs,omitempty" yaml:"specs,omitempty"` -} - -// GetSpecs returns specs as a slice, handling both old and new format. -// If Specs is set, returns it. Otherwise, returns Spec as a single-element slice. -func (lp *LoadProfile) GetSpecs() []LoadProfileSpec { - if len(lp.Specs) > 0 { - return lp.Specs - } - // Fallback to old single Spec field - return []LoadProfileSpec{lp.Spec} -} - -// SetFirstSpec updates the first spec, handling both old and new format. -func (lp *LoadProfile) SetFirstSpec(spec LoadProfileSpec) { - if len(lp.Specs) > 0 { - lp.Specs[0] = spec - } else { - lp.Spec = spec - } + Specs []LoadProfileSpec `json:"specs" yaml:"specs"` } // LoadProfileSpec defines the load traffic for traget resource. @@ -221,16 +199,15 @@ func (lp LoadProfile) Validate() error { return fmt.Errorf("version should be 1") } - // Validate that at least one format is provided - if len(lp.Specs) == 0 && reflect.DeepEqual(lp.Spec, LoadProfileSpec{}) { - return fmt.Errorf("either 'spec' or 'specs' must be provided") + // Validate that specs is provided + if len(lp.Specs) == 0 { + return fmt.Errorf("specs must be provided") } // Validate all specs - specs := lp.GetSpecs() - for i, spec := range specs { + for i, spec := range lp.Specs { if err := spec.Validate(); err != nil { - return fmt.Errorf("spec[%d]: %w", i, err) + return fmt.Errorf("specs[%d]: %w", i, err) } } diff --git a/api/types/load_traffic_test.go b/api/types/load_traffic_test.go index 32178e7d..b7976005 100644 --- a/api/types/load_traffic_test.go +++ b/api/types/load_traffic_test.go @@ -15,122 +15,122 @@ func TestLoadProfileUnmarshalFromYAML(t *testing.T) { in := ` version: 1 description: test -spec: - rate: 100 - total: 10000 - conns: 2 - client: 1 - contentType: json - requests: - - staleGet: - group: core - version: v1 - resource: pods - namespace: default - name: x1 - shares: 100 - - quorumGet: - group: core - version: v1 - resource: configmaps - namespace: default - name: x2 - shares: 150 - - staleList: - group: core - version: v1 - resource: pods - namespace: default - selector: app=x2 - fieldSelector: spec.nodeName=x - shares: 200 - - quorumList: - group: core - version: v1 - resource: configmaps - namespace: default - limit: 10000 - selector: app=x3 - shares: 400 - - put: - group: core - version: v1 - resource: configmaps - namespace: kperf - name: kperf- - keySpaceSize: 1000 - valueSize: 1024 - shares: 1000 - - getPodLog: - namespace: default - name: hello - container: main - tailLines: 1000 - limitBytes: 1024 - shares: 10 - - watchList: - group: core - version: v1 - resource: pods - namespace: default - selector: app=x2 - fieldSelector: spec.nodeName=x - shares: 250 +specs: + - rate: 100 + total: 10000 + conns: 2 + client: 1 + contentType: json + requests: + - staleGet: + group: core + version: v1 + resource: pods + namespace: default + name: x1 + shares: 100 + - quorumGet: + group: core + version: v1 + resource: configmaps + namespace: default + name: x2 + shares: 150 + - staleList: + group: core + version: v1 + resource: pods + namespace: default + selector: app=x2 + fieldSelector: spec.nodeName=x + shares: 200 + - quorumList: + group: core + version: v1 + resource: configmaps + namespace: default + limit: 10000 + selector: app=x3 + shares: 400 + - put: + group: core + version: v1 + resource: configmaps + namespace: kperf + name: kperf- + keySpaceSize: 1000 + valueSize: 1024 + shares: 1000 + - getPodLog: + namespace: default + name: hello + container: main + tailLines: 1000 + limitBytes: 1024 + shares: 10 + - watchList: + group: core + version: v1 + resource: pods + namespace: default + selector: app=x2 + fieldSelector: spec.nodeName=x + shares: 250 ` target := LoadProfile{} require.NoError(t, yaml.Unmarshal([]byte(in), &target)) assert.Equal(t, 1, target.Version) assert.Equal(t, "test", target.Description) - assert.Equal(t, float64(100), target.Spec.Rate) - assert.Equal(t, 10000, target.Spec.Total) - assert.Equal(t, 2, target.Spec.Conns) - assert.Len(t, target.Spec.Requests, 7) + assert.Equal(t, float64(100), target.Specs[0].Rate) + assert.Equal(t, 10000, target.Specs[0].Total) + assert.Equal(t, 2, target.Specs[0].Conns) + assert.Len(t, target.Specs[0].Requests, 7) - assert.Equal(t, 100, target.Spec.Requests[0].Shares) - assert.NotNil(t, target.Spec.Requests[0].StaleGet) - assert.Equal(t, "pods", target.Spec.Requests[0].StaleGet.Resource) - assert.Equal(t, "v1", target.Spec.Requests[0].StaleGet.Version) - assert.Equal(t, "core", target.Spec.Requests[0].StaleGet.Group) - assert.Equal(t, "default", target.Spec.Requests[0].StaleGet.Namespace) - assert.Equal(t, "x1", target.Spec.Requests[0].StaleGet.Name) + assert.Equal(t, 100, target.Specs[0].Requests[0].Shares) + assert.NotNil(t, target.Specs[0].Requests[0].StaleGet) + assert.Equal(t, "pods", target.Specs[0].Requests[0].StaleGet.Resource) + assert.Equal(t, "v1", target.Specs[0].Requests[0].StaleGet.Version) + assert.Equal(t, "core", target.Specs[0].Requests[0].StaleGet.Group) + assert.Equal(t, "default", target.Specs[0].Requests[0].StaleGet.Namespace) + assert.Equal(t, "x1", target.Specs[0].Requests[0].StaleGet.Name) - assert.NotNil(t, target.Spec.Requests[1].QuorumGet) - assert.Equal(t, 150, target.Spec.Requests[1].Shares) + assert.NotNil(t, target.Specs[0].Requests[1].QuorumGet) + assert.Equal(t, 150, target.Specs[0].Requests[1].Shares) - assert.Equal(t, 200, target.Spec.Requests[2].Shares) - assert.NotNil(t, target.Spec.Requests[2].StaleList) - assert.Equal(t, "pods", target.Spec.Requests[2].StaleList.Resource) - assert.Equal(t, "v1", target.Spec.Requests[2].StaleList.Version) - assert.Equal(t, "core", target.Spec.Requests[2].StaleList.Group) - assert.Equal(t, "default", target.Spec.Requests[2].StaleList.Namespace) - assert.Equal(t, 0, target.Spec.Requests[2].StaleList.Limit) - assert.Equal(t, "app=x2", target.Spec.Requests[2].StaleList.Selector) - assert.Equal(t, "spec.nodeName=x", target.Spec.Requests[2].StaleList.FieldSelector) + assert.Equal(t, 200, target.Specs[0].Requests[2].Shares) + assert.NotNil(t, target.Specs[0].Requests[2].StaleList) + assert.Equal(t, "pods", target.Specs[0].Requests[2].StaleList.Resource) + assert.Equal(t, "v1", target.Specs[0].Requests[2].StaleList.Version) + assert.Equal(t, "core", target.Specs[0].Requests[2].StaleList.Group) + assert.Equal(t, "default", target.Specs[0].Requests[2].StaleList.Namespace) + assert.Equal(t, 0, target.Specs[0].Requests[2].StaleList.Limit) + assert.Equal(t, "app=x2", target.Specs[0].Requests[2].StaleList.Selector) + assert.Equal(t, "spec.nodeName=x", target.Specs[0].Requests[2].StaleList.FieldSelector) - assert.NotNil(t, target.Spec.Requests[3].QuorumList) - assert.Equal(t, 400, target.Spec.Requests[3].Shares) + assert.NotNil(t, target.Specs[0].Requests[3].QuorumList) + assert.Equal(t, 400, target.Specs[0].Requests[3].Shares) - assert.Equal(t, 1000, target.Spec.Requests[4].Shares) - assert.NotNil(t, target.Spec.Requests[4].Put) - assert.Equal(t, "configmaps", target.Spec.Requests[4].Put.Resource) - assert.Equal(t, "v1", target.Spec.Requests[4].Put.Version) - assert.Equal(t, "core", target.Spec.Requests[4].Put.Group) - assert.Equal(t, "kperf", target.Spec.Requests[4].Put.Namespace) - assert.Equal(t, "kperf-", target.Spec.Requests[4].Put.Name) - assert.Equal(t, 1000, target.Spec.Requests[4].Put.KeySpaceSize) - assert.Equal(t, 1024, target.Spec.Requests[4].Put.ValueSize) + assert.Equal(t, 1000, target.Specs[0].Requests[4].Shares) + assert.NotNil(t, target.Specs[0].Requests[4].Put) + assert.Equal(t, "configmaps", target.Specs[0].Requests[4].Put.Resource) + assert.Equal(t, "v1", target.Specs[0].Requests[4].Put.Version) + assert.Equal(t, "core", target.Specs[0].Requests[4].Put.Group) + assert.Equal(t, "kperf", target.Specs[0].Requests[4].Put.Namespace) + assert.Equal(t, "kperf-", target.Specs[0].Requests[4].Put.Name) + assert.Equal(t, 1000, target.Specs[0].Requests[4].Put.KeySpaceSize) + assert.Equal(t, 1024, target.Specs[0].Requests[4].Put.ValueSize) - assert.Equal(t, 10, target.Spec.Requests[5].Shares) - assert.NotNil(t, target.Spec.Requests[5].GetPodLog) - assert.Equal(t, "default", target.Spec.Requests[5].GetPodLog.Namespace) - assert.Equal(t, "hello", target.Spec.Requests[5].GetPodLog.Name) - assert.Equal(t, "main", target.Spec.Requests[5].GetPodLog.Container) - assert.Equal(t, int64(1000), *target.Spec.Requests[5].GetPodLog.TailLines) - assert.Equal(t, int64(1024), *target.Spec.Requests[5].GetPodLog.LimitBytes) + assert.Equal(t, 10, target.Specs[0].Requests[5].Shares) + assert.NotNil(t, target.Specs[0].Requests[5].GetPodLog) + assert.Equal(t, "default", target.Specs[0].Requests[5].GetPodLog.Namespace) + assert.Equal(t, "hello", target.Specs[0].Requests[5].GetPodLog.Name) + assert.Equal(t, "main", target.Specs[0].Requests[5].GetPodLog.Container) + assert.Equal(t, int64(1000), *target.Specs[0].Requests[5].GetPodLog.TailLines) + assert.Equal(t, int64(1024), *target.Specs[0].Requests[5].GetPodLog.LimitBytes) - assert.Equal(t, 250, target.Spec.Requests[6].Shares) - assert.NotNil(t, target.Spec.Requests[6].WatchList) + assert.Equal(t, 250, target.Specs[0].Requests[6].Shares) + assert.NotNil(t, target.Specs[0].Requests[6].WatchList) assert.NoError(t, target.Validate()) } diff --git a/cmd/kperf/commands/runner/runner.go b/cmd/kperf/commands/runner/runner.go index 16ac5717..69950019 100644 --- a/cmd/kperf/commands/runner/runner.go +++ b/cmd/kperf/commands/runner/runner.go @@ -103,20 +103,19 @@ var runCommand = cli.Command{ return err } - specs := profileCfg.GetSpecs() - clientNum := specs[0].Conns + clientNum := profileCfg.Specs[0].Conns restClis, err := request.NewClients(kubeCfgPath, clientNum, request.WithClientUserAgentOpt(cliCtx.String("user-agent")), - request.WithClientQPSOpt(specs[0].Rate), - request.WithClientContentTypeOpt(specs[0].ContentType), - request.WithClientDisableHTTP2Opt(specs[0].DisableHTTP2), + request.WithClientQPSOpt(profileCfg.Specs[0].Rate), + request.WithClientContentTypeOpt(profileCfg.Specs[0].ContentType), + request.WithClientDisableHTTP2Opt(profileCfg.Specs[0].DisableHTTP2), ) if err != nil { return err } - stats, err := request.Schedule(context.TODO(), &specs[0], restClis) + stats, err := request.Schedule(context.TODO(), &profileCfg.Specs[0], restClis) if err != nil { return err } @@ -166,44 +165,40 @@ func loadConfig(cliCtx *cli.Context) (*types.LoadProfile, error) { return nil, fmt.Errorf("failed to unmarshal %s from yaml format: %w", cfgPath, err) } - specs := profileCfg.GetSpecs() // override value by flags if v := "rate"; cliCtx.IsSet(v) { - specs[0].Rate = cliCtx.Float64(v) + profileCfg.Specs[0].Rate = cliCtx.Float64(v) } - if v := "conns"; cliCtx.IsSet(v) || specs[0].Conns == 0 { - specs[0].Conns = cliCtx.Int(v) + if v := "conns"; cliCtx.IsSet(v) || profileCfg.Specs[0].Conns == 0 { + profileCfg.Specs[0].Conns = cliCtx.Int(v) } - if v := "client"; cliCtx.IsSet(v) || specs[0].Client == 0 { - specs[0].Client = cliCtx.Int(v) + if v := "client"; cliCtx.IsSet(v) || profileCfg.Specs[0].Client == 0 { + profileCfg.Specs[0].Client = cliCtx.Int(v) } if v := "total"; cliCtx.IsSet(v) { - specs[0].Total = cliCtx.Int(v) + profileCfg.Specs[0].Total = cliCtx.Int(v) } if v := "duration"; cliCtx.IsSet(v) { - specs[0].Duration = cliCtx.Int(v) + profileCfg.Specs[0].Duration = cliCtx.Int(v) } - if specs[0].Total > 0 && specs[0].Duration > 0 { - klog.Warningf("both total:%v and duration:%v are set, duration will be ignored\n", specs[0].Total, specs[0].Duration) - specs[0].Duration = 0 + if profileCfg.Specs[0].Total > 0 && profileCfg.Specs[0].Duration > 0 { + klog.Warningf("both total:%v and duration:%v are set, duration will be ignored\n", profileCfg.Specs[0].Total, profileCfg.Specs[0].Duration) + profileCfg.Specs[0].Duration = 0 } - if specs[0].Total == 0 && specs[0].Duration == 0 { + if profileCfg.Specs[0].Total == 0 && profileCfg.Specs[0].Duration == 0 { // Use default total value - specs[0].Total = cliCtx.Int("total") + profileCfg.Specs[0].Total = cliCtx.Int("total") } - if v := "content-type"; cliCtx.IsSet(v) || specs[0].ContentType == "" { - specs[0].ContentType = types.ContentType(cliCtx.String(v)) + if v := "content-type"; cliCtx.IsSet(v) || profileCfg.Specs[0].ContentType == "" { + profileCfg.Specs[0].ContentType = types.ContentType(cliCtx.String(v)) } if v := "disable-http2"; cliCtx.IsSet(v) { - specs[0].DisableHTTP2 = cliCtx.Bool(v) + profileCfg.Specs[0].DisableHTTP2 = cliCtx.Bool(v) } if v := "max-retries"; cliCtx.IsSet(v) { - specs[0].MaxRetries = cliCtx.Int(v) + profileCfg.Specs[0].MaxRetries = cliCtx.Int(v) } - // Update profileCfg with modified specs - profileCfg.SetFirstSpec(specs[0]) - if err := profileCfg.Validate(); err != nil { return nil, err } diff --git a/contrib/cmd/runkperf/commands/bench/utils.go b/contrib/cmd/runkperf/commands/bench/utils.go index 685cf3ba..4e508818 100644 --- a/contrib/cmd/runkperf/commands/bench/utils.go +++ b/contrib/cmd/runkperf/commands/bench/utils.go @@ -151,10 +151,9 @@ func newLoadProfileFromEmbed(cliCtx *cli.Context, name string) (_name string, _s return fmt.Errorf("invalid total-requests value: %v", reqs) } reqsTime := cliCtx.Int("duration") - specs := spec.Profile.GetSpecs() if !cliCtx.IsSet("total") && reqsTime > 0 { reqs = 0 - specs[0].Duration = reqsTime + spec.Profile.Specs[0].Duration = reqsTime } rgAffinity := cliCtx.GlobalString("rg-affinity") @@ -164,13 +163,10 @@ func newLoadProfileFromEmbed(cliCtx *cli.Context, name string) (_name string, _s } if reqs != 0 { - specs[0].Total = reqs + spec.Profile.Specs[0].Total = reqs } spec.NodeAffinity = affinityLabels - specs[0].ContentType = types.ContentType(cliCtx.String("content-type")) - - // Update profile with modified specs - spec.Profile.SetFirstSpec(specs[0]) + spec.Profile.Specs[0].ContentType = types.ContentType(cliCtx.String("content-type")) data, _ := yaml.Marshal(spec) @@ -207,8 +203,7 @@ func tweakReadUpdateProfile(cliCtx *cli.Context, spec *types.RunnerGroupSpec) er configmapTotal := cliCtx.Int("read-update-configmap-total") if namePattern != "" || ratio != 0 || namespace != "" || configmapTotal > 0 { - specs := spec.Profile.GetSpecs() - for _, r := range specs[0].Requests { + for _, r := range spec.Profile.Specs[0].Requests { if r.Patch != nil { if namePattern != "" { r.Patch.Name = fmt.Sprintf("runkperf-cm-%s", namePattern) diff --git a/contrib/cmd/runkperf/commands/warmup/command.go b/contrib/cmd/runkperf/commands/warmup/command.go index b3b1e3f2..c01697a9 100644 --- a/contrib/cmd/runkperf/commands/warmup/command.go +++ b/contrib/cmd/runkperf/commands/warmup/command.go @@ -102,12 +102,8 @@ var Command = cli.Command{ return fmt.Errorf("failed to parse %s affinity: %w", rgAffinity, err) } - specs := spec.Profile.GetSpecs() - specs[0].Total = reqs - specs[0].Rate = rate - - // Update profile with modified specs - spec.Profile.SetFirstSpec(specs[0]) + spec.Profile.Specs[0].Total = reqs + spec.Profile.Specs[0].Rate = rate spec.NodeAffinity = affinityLabels diff --git a/contrib/internal/manifests/loadprofile/cilium_cr_list.yaml b/contrib/internal/manifests/loadprofile/cilium_cr_list.yaml index ddb00772..35812d64 100644 --- a/contrib/internal/manifests/loadprofile/cilium_cr_list.yaml +++ b/contrib/internal/manifests/loadprofile/cilium_cr_list.yaml @@ -9,29 +9,29 @@ count: 10 loadProfile: version: 1 description: cilium list profile - spec: - rate: 20 # 20 req/sec * 10 runners = 200 req/sec - total: 12000 # run for ~10 minutes, 600 seconds * 20/sec = 12000 - # 5k node cluster, one cilium agent per node - # divided by the number of runners. - conns: 500 - client: 500 + specs: + - rate: 20 # 20 req/sec * 10 runners = 200 req/sec + total: 12000 # run for ~10 minutes, 600 seconds * 20/sec = 12000 + # 5k node cluster, one cilium agent per node + # divided by the number of runners. + conns: 500 + client: 500 - contentType: json - disableHTTP2: false + contentType: json + disableHTTP2: false - # 50/50 mix of ciliumidentity and ciliumendpoint queries. - # We're simulating with CilumEndpointSlice disabled here, on the assumption that CES will always - # have lower count than CEP, so if we can survive with CEP only then we're in good shape. - requests: - - staleList: - group: cilium.io - version: v2 - resource: ciliumidentities - shares: 1000 # Has 50% chance = 1000 / (1000 + 1000) - - staleList: - group: cilium.io - version: v2 - resource: ciliumendpoints - namespace: "default" - shares: 1000 # Has 50% chance = 1000 / (1000 + 1000) \ No newline at end of file + # 50/50 mix of ciliumidentity and ciliumendpoint queries. + # We're simulating with CilumEndpointSlice disabled here, on the assumption that CES will always + # have lower count than CEP, so if we can survive with CEP only then we're in good shape. + requests: + - staleList: + group: cilium.io + version: v2 + resource: ciliumidentities + shares: 1000 # Has 50% chance = 1000 / (1000 + 1000) + - staleList: + group: cilium.io + version: v2 + resource: ciliumendpoints + namespace: "default" + shares: 1000 # Has 50% chance = 1000 / (1000 + 1000) \ No newline at end of file diff --git a/contrib/internal/manifests/loadprofile/list_configmaps.yaml b/contrib/internal/manifests/loadprofile/list_configmaps.yaml index e359ad1d..38736f56 100644 --- a/contrib/internal/manifests/loadprofile/list_configmaps.yaml +++ b/contrib/internal/manifests/loadprofile/list_configmaps.yaml @@ -2,19 +2,19 @@ count: 10 loadProfile: version: 1 description: "list configmaps" - spec: - rate: 10 - conns: 10 - client: 10 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: configmaps - shares: 100 # chance 100 / (100 + 100) - - quorumList: - version: v1 - resource: configmaps - shares: 100 # chance 100 / (100 + 100) + specs: + - rate: 10 + conns: 10 + client: 10 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: configmaps + shares: 100 # chance 100 / (100 + 100) + - quorumList: + version: v1 + resource: configmaps + shares: 100 # chance 100 / (100 + 100) diff --git a/contrib/internal/manifests/loadprofile/node100_job10_pod10k.yaml b/contrib/internal/manifests/loadprofile/node100_job10_pod10k.yaml index 8fdcc7c3..712f1291 100644 --- a/contrib/internal/manifests/loadprofile/node100_job10_pod10k.yaml +++ b/contrib/internal/manifests/loadprofile/node100_job10_pod10k.yaml @@ -3,51 +3,51 @@ count: 10 loadProfile: version: 1 description: "100nodes_10job_1000pods" - spec: - rate: 10 - conns: 10 - client: 10 - contentType: json - requests: - - staleList: - version: v1 - resource: pods - namespace: job10pod10k - shares: 10 - - staleGet: - version: v1 - resource: pods - namespace: virtualnodes-kperf-io - name: node100job10pod10k-1 - shares: 300 - - staleList: - group: batch - version: v1 - resource: jobs - namespace: job10pod10k - shares: 10 - - staleGet: - group: batch - version: v1 - resource: jobs - namespace: job10pod10k - name: benchmark-jobs-5 - shares: 300 - - staleList: # cluster scope - version: v1 - resource: nodes - shares: 10 - - staleList: #cluster scope - version: v1 - resource: namespaces - shares: 10 - - staleGet: # cluster scope - version: v1 - resource: nodes - name: node100job10pod10k-3 - shares: 300 - - staleGet: # cluster scope - version: v1 - resource: namespaces - name: job10pod10k - shares: 300 + specs: + - rate: 10 + conns: 10 + client: 10 + contentType: json + requests: + - staleList: + version: v1 + resource: pods + namespace: job10pod10k + shares: 10 + - staleGet: + version: v1 + resource: pods + namespace: virtualnodes-kperf-io + name: node100job10pod10k-1 + shares: 300 + - staleList: + group: batch + version: v1 + resource: jobs + namespace: job10pod10k + shares: 10 + - staleGet: + group: batch + version: v1 + resource: jobs + namespace: job10pod10k + name: benchmark-jobs-5 + shares: 300 + - staleList: # cluster scope + version: v1 + resource: nodes + shares: 10 + - staleList: #cluster scope + version: v1 + resource: namespaces + shares: 10 + - staleGet: # cluster scope + version: v1 + resource: nodes + name: node100job10pod10k-3 + shares: 300 + - staleGet: # cluster scope + version: v1 + resource: namespaces + name: job10pod10k + shares: 300 diff --git a/contrib/internal/manifests/loadprofile/node100_job1_pod3k.yaml b/contrib/internal/manifests/loadprofile/node100_job1_pod3k.yaml index 8d451123..5d618f31 100644 --- a/contrib/internal/manifests/loadprofile/node100_job1_pod3k.yaml +++ b/contrib/internal/manifests/loadprofile/node100_job1_pod3k.yaml @@ -2,26 +2,26 @@ count: 10 loadProfile: version: 1 description: "node100-job1-pod3k" - spec: - rate: 10 - total: 36000 - conns: 10 - client: 100 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: pods - shares: 1000 # chance 1000 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: pods - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: events - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) + specs: + - rate: 10 + total: 36000 + conns: 10 + client: 100 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: pods + shares: 1000 # chance 1000 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: pods + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: events + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) diff --git a/contrib/internal/manifests/loadprofile/node100_pod10k.yaml b/contrib/internal/manifests/loadprofile/node100_pod10k.yaml index 145a0d57..89a57942 100644 --- a/contrib/internal/manifests/loadprofile/node100_pod10k.yaml +++ b/contrib/internal/manifests/loadprofile/node100_pod10k.yaml @@ -2,31 +2,31 @@ count: 10 loadProfile: version: 1 description: "node100-pod10k" - spec: - rate: 10 - total: 36000 - conns: 10 - client: 100 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: pods - fieldSelector: "spec.nodeName=node100pod10k-49" - shares: 1000 # 1000 / (1000 + 100 + 200) * 10 = 7.7 req/s - - staleList: - version: v1 - resource: pods - shares: 100 # 100 / (1000 + 100 + 200) * 10 = 0.7 req/s - - quorumList: - version: v1 - resource: pods - namespace: benchmark-0 - # NOTE: It's to simulate the request created by daemonset to get pods, - # including kubelet, when they want to get pods from ETCD. The limit - # is 100 because it's close to MaxPods value. - limit: 100 - selector: "app=benchmark" - shares: 200 # 200 / (1000 + 100 + 200) * 10 = 1.5 req/s + specs: + - rate: 10 + total: 36000 + conns: 10 + client: 100 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: pods + fieldSelector: "spec.nodeName=node100pod10k-49" + shares: 1000 # 1000 / (1000 + 100 + 200) * 10 = 7.7 req/s + - staleList: + version: v1 + resource: pods + shares: 100 # 100 / (1000 + 100 + 200) * 10 = 0.7 req/s + - quorumList: + version: v1 + resource: pods + namespace: benchmark-0 + # NOTE: It's to simulate the request created by daemonset to get pods, + # including kubelet, when they want to get pods from ETCD. The limit + # is 100 because it's close to MaxPods value. + limit: 100 + selector: "app=benchmark" + shares: 200 # 200 / (1000 + 100 + 200) * 10 = 1.5 req/s diff --git a/contrib/internal/manifests/loadprofile/node10_job1_pod100.yaml b/contrib/internal/manifests/loadprofile/node10_job1_pod100.yaml index 6b0df149..abf38e2f 100644 --- a/contrib/internal/manifests/loadprofile/node10_job1_pod100.yaml +++ b/contrib/internal/manifests/loadprofile/node10_job1_pod100.yaml @@ -2,26 +2,26 @@ count: 1 loadProfile: version: 1 description: "node10-job1-pod100" - spec: - rate: 10 - total: 1000 - conns: 10 - client: 10 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: pods - shares: 1000 # chance 1000 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: pods - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: events - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) + specs: + - rate: 10 + total: 1000 + conns: 10 + client: 10 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: pods + shares: 1000 # chance 1000 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: pods + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: events + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) diff --git a/contrib/internal/manifests/loadprofile/node10_job1_pod1k.yaml b/contrib/internal/manifests/loadprofile/node10_job1_pod1k.yaml index 656833eb..9ceb5d91 100644 --- a/contrib/internal/manifests/loadprofile/node10_job1_pod1k.yaml +++ b/contrib/internal/manifests/loadprofile/node10_job1_pod1k.yaml @@ -3,51 +3,51 @@ count: 10 loadProfile: version: 1 description: "10nodes_1000pods" - spec: - rate: 10 - conns: 10 - client: 10 - contentType: json - requests: - - staleList: - version: v1 - resource: pods - namespace: job1pod1k - shares: 10 - - staleGet: - version: v1 - resource: pods - namespace: virtualnodes-kperf-io - name: node10job1pod1k-1 - shares: 300 - - staleList: - group: batch - version: v1 - resource: jobs - namespace: job1pod1k - shares: 10 - - staleGet: - group: batch - version: v1 - resource: jobs - namespace: job1pod1k - name: batchjobs - shares: 300 - - staleList: # cluster scope - version: v1 - resource: nodes - shares: 10 - - staleList: #cluster scope - version: v1 - resource: namespaces - shares: 10 - - staleGet: # cluster scope - version: v1 - resource: nodes - name: node10job1pod1k-3 - shares: 300 - - staleGet: # cluster scope - version: v1 - resource: namespaces - name: job1pod1k - shares: 300 + specs: + - rate: 10 + conns: 10 + client: 10 + contentType: json + requests: + - staleList: + version: v1 + resource: pods + namespace: job1pod1k + shares: 10 + - staleGet: + version: v1 + resource: pods + namespace: virtualnodes-kperf-io + name: node10job1pod1k-1 + shares: 300 + - staleList: + group: batch + version: v1 + resource: jobs + namespace: job1pod1k + shares: 10 + - staleGet: + group: batch + version: v1 + resource: jobs + namespace: job1pod1k + name: batchjobs + shares: 300 + - staleList: # cluster scope + version: v1 + resource: nodes + shares: 10 + - staleList: #cluster scope + version: v1 + resource: namespaces + shares: 10 + - staleGet: # cluster scope + version: v1 + resource: nodes + name: node10job1pod1k-3 + shares: 300 + - staleGet: # cluster scope + version: v1 + resource: namespaces + name: job1pod1k + shares: 300 diff --git a/contrib/internal/manifests/loadprofile/read_update.yaml b/contrib/internal/manifests/loadprofile/read_update.yaml index bdb43c97..e31957ce 100644 --- a/contrib/internal/manifests/loadprofile/read_update.yaml +++ b/contrib/internal/manifests/loadprofile/read_update.yaml @@ -2,33 +2,33 @@ count: 10 loadProfile: version: 1 description: "read_update" - spec: - rate: 10 - total: 3000 - conns: 10 - client: 10 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: configmaps - namespace: default - shares: 50 - - patch: - version: v1 - resource: configmaps - namespace: default - patchType: merge - name: runkperf-cm-kperf-read-update - keySpaceSize: 100 - body: | - { - "metadata": { - "labels": { - "test-label": "mutation-test" + specs: + - rate: 10 + total: 3000 + conns: 10 + client: 10 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: configmaps + namespace: default + shares: 50 + - patch: + version: v1 + resource: configmaps + namespace: default + patchType: merge + name: runkperf-cm-kperf-read-update + keySpaceSize: 100 + body: | + { + "metadata": { + "labels": { + "test-label": "mutation-test" + } } } - } - shares: 50 + shares: 50 diff --git a/contrib/internal/manifests/loadprofile/warmup.yaml b/contrib/internal/manifests/loadprofile/warmup.yaml index 85e60f84..adb5dbf5 100644 --- a/contrib/internal/manifests/loadprofile/warmup.yaml +++ b/contrib/internal/manifests/loadprofile/warmup.yaml @@ -2,26 +2,26 @@ count: 10 loadProfile: version: 1 description: "warmup" - spec: - rate: 20 - total: 10000 - conns: 10 - client: 100 - contentType: json - disableHTTP2: false - maxRetries: 0 - requests: - - staleList: - version: v1 - resource: pods - shares: 1000 # chance 1000 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: pods - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) - - quorumList: - version: v1 - resource: events - limit: 1000 - shares: 100 # chance 100 / (1000 + 100 + 100) + specs: + - rate: 20 + total: 10000 + conns: 10 + client: 100 + contentType: json + disableHTTP2: false + maxRetries: 0 + requests: + - staleList: + version: v1 + resource: pods + shares: 1000 # chance 1000 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: pods + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) + - quorumList: + version: v1 + resource: events + limit: 1000 + shares: 100 # chance 100 / (1000 + 100 + 100) From 9cdc909d095ceb0b453d70ad003dad263f0e6bbb Mon Sep 17 00:00:00 2001 From: JasonXuDeveloper Date: Fri, 23 Jan 2026 10:08:48 +1100 Subject: [PATCH 3/3] refactor: simplify runner spec access with local variable Address code review feedback by extracting profileCfg.Specs[0] into a local pspec variable and adding validation to ensure runner only supports single spec configurations. Changes: - Add check to return error if len(profileCfg.Specs) > 1 - Extract profileCfg.Specs[0] into pspec variable for cleaner code - Replace all profileCfg.Specs[0] references with pspec This improves code readability and explicitly validates the single-spec constraint for the runner command. Signed-off-by: JasonXuDeveloper --- cmd/kperf/commands/runner/runner.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cmd/kperf/commands/runner/runner.go b/cmd/kperf/commands/runner/runner.go index 69950019..cc06d7ff 100644 --- a/cmd/kperf/commands/runner/runner.go +++ b/cmd/kperf/commands/runner/runner.go @@ -103,19 +103,25 @@ var runCommand = cli.Command{ return err } - clientNum := profileCfg.Specs[0].Conns + // Runner only supports single spec + if len(profileCfg.Specs) > 1 { + return fmt.Errorf("runner only supports single spec, but got %d specs", len(profileCfg.Specs)) + } + pspec := profileCfg.Specs[0] + + clientNum := pspec.Conns restClis, err := request.NewClients(kubeCfgPath, clientNum, request.WithClientUserAgentOpt(cliCtx.String("user-agent")), - request.WithClientQPSOpt(profileCfg.Specs[0].Rate), - request.WithClientContentTypeOpt(profileCfg.Specs[0].ContentType), - request.WithClientDisableHTTP2Opt(profileCfg.Specs[0].DisableHTTP2), + request.WithClientQPSOpt(pspec.Rate), + request.WithClientContentTypeOpt(pspec.ContentType), + request.WithClientDisableHTTP2Opt(pspec.DisableHTTP2), ) if err != nil { return err } - stats, err := request.Schedule(context.TODO(), &profileCfg.Specs[0], restClis) + stats, err := request.Schedule(context.TODO(), &pspec, restClis) if err != nil { return err }