Skip to content

Commit

Permalink
Move scenarios to subpackage
Browse files Browse the repository at this point in the history
Signed-off-by: Marek Siarkowicz <siarkowicz@google.com>
  • Loading branch information
serathius committed Oct 5, 2024
1 parent 279ffd5 commit 8cf1121
Show file tree
Hide file tree
Showing 2 changed files with 89 additions and 88 deletions.
41 changes: 21 additions & 20 deletions tests/robustness/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"go.etcd.io/etcd/tests/v3/robustness/identity"
"go.etcd.io/etcd/tests/v3/robustness/model"
"go.etcd.io/etcd/tests/v3/robustness/report"
"go.etcd.io/etcd/tests/v3/robustness/scenarios"
"go.etcd.io/etcd/tests/v3/robustness/traffic"
"go.etcd.io/etcd/tests/v3/robustness/validate"
)
Expand All @@ -44,21 +45,21 @@ func TestMain(m *testing.M) {

func TestRobustnessExploratory(t *testing.T) {
testRunner.BeforeTest(t)
for _, s := range exploratoryScenarios(t) {
t.Run(s.name, func(t *testing.T) {
for _, s := range scenarios.Exploratory(t) {
t.Run(s.Name, func(t *testing.T) {
lg := zaptest.NewLogger(t)
s.cluster.Logger = lg
s.Cluster.Logger = lg
ctx := context.Background()
c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.cluster))
c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.Cluster))
if err != nil {
t.Fatal(err)
}
defer forcestopCluster(c)
s.failpoint, err = failpoint.PickRandom(c, s.profile)
s.Failpoint, err = failpoint.PickRandom(c, s.Profile)
if err != nil {
t.Fatal(err)
}
t.Run(s.failpoint.Name(), func(t *testing.T) {
t.Run(s.Failpoint.Name(), func(t *testing.T) {
testRobustness(ctx, t, lg, s, c)
})
})
Expand All @@ -67,12 +68,12 @@ func TestRobustnessExploratory(t *testing.T) {

func TestRobustnessRegression(t *testing.T) {
testRunner.BeforeTest(t)
for _, s := range regressionScenarios(t) {
t.Run(s.name, func(t *testing.T) {
for _, s := range scenarios.Regression(t) {
t.Run(s.Name, func(t *testing.T) {
lg := zaptest.NewLogger(t)
s.cluster.Logger = lg
s.Cluster.Logger = lg
ctx := context.Background()
c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.cluster))
c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.Cluster))
if err != nil {
t.Fatal(err)
}
Expand All @@ -82,7 +83,7 @@ func TestRobustnessRegression(t *testing.T) {
}
}

func testRobustness(ctx context.Context, t *testing.T, lg *zap.Logger, s testScenario, c *e2e.EtcdProcessCluster) {
func testRobustness(ctx context.Context, t *testing.T, lg *zap.Logger, s scenarios.TestScenario, c *e2e.EtcdProcessCluster) {
r := report.TestReport{Logger: lg, Cluster: c}
// t.Failed() returns false during panicking. We need to forcibly
// save data on panicking.
Expand All @@ -91,24 +92,24 @@ func testRobustness(ctx context.Context, t *testing.T, lg *zap.Logger, s testSce
defer func() {
r.Report(t, panicked)
}()
r.Client = s.run(ctx, t, lg, c)
r.Client = runScenario(ctx, t, s, lg, c)
persistedRequests, err := report.PersistedRequestsCluster(lg, c)
if err != nil {
t.Fatal(err)
}

failpointImpactingWatch := s.failpoint == failpoint.SleepBeforeSendWatchResponse
failpointImpactingWatch := s.Failpoint == failpoint.SleepBeforeSendWatchResponse
if !failpointImpactingWatch {
watchProgressNotifyEnabled := c.Cfg.ServerConfig.ExperimentalWatchProgressNotifyInterval != 0
client.ValidateGotAtLeastOneProgressNotify(t, r.Client, s.watch.RequestProgress || watchProgressNotifyEnabled)
client.ValidateGotAtLeastOneProgressNotify(t, r.Client, s.Watch.RequestProgress || watchProgressNotifyEnabled)
}
validateConfig := validate.Config{ExpectRevisionUnique: s.traffic.ExpectUniqueRevision()}
validateConfig := validate.Config{ExpectRevisionUnique: s.Traffic.ExpectUniqueRevision()}
r.Visualize = validate.ValidateAndReturnVisualize(t, lg, validateConfig, r.Client, persistedRequests, 5*time.Minute)

panicked = false
}

func (s testScenario) run(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) (reports []report.ClientReport) {
func runScenario(ctx context.Context, t *testing.T, s scenarios.TestScenario, lg *zap.Logger, clus *e2e.EtcdProcessCluster) (reports []report.ClientReport) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
g := errgroup.Group{}
Expand All @@ -123,7 +124,7 @@ func (s testScenario) run(ctx context.Context, t *testing.T, lg *zap.Logger, clu
defer close(failpointInjected)
// Give some time for traffic to reach qps target before injecting failpoint.
time.Sleep(time.Second)
fr, err := failpoint.Inject(ctx, t, lg, clus, s.failpoint, baseTime, ids)
fr, err := failpoint.Inject(ctx, t, lg, clus, s.Failpoint, baseTime, ids)
if err != nil {
t.Error(err)
cancel()
Expand All @@ -139,14 +140,14 @@ func (s testScenario) run(ctx context.Context, t *testing.T, lg *zap.Logger, clu
maxRevisionChan := make(chan int64, 1)
g.Go(func() error {
defer close(maxRevisionChan)
operationReport = traffic.SimulateTraffic(ctx, t, lg, clus, s.profile, s.traffic, failpointInjected, baseTime, ids)
operationReport = traffic.SimulateTraffic(ctx, t, lg, clus, s.Profile, s.Traffic, failpointInjected, baseTime, ids)
maxRevision := operationsMaxRevision(operationReport)
maxRevisionChan <- maxRevision
lg.Info("Finished simulating traffic", zap.Int64("max-revision", maxRevision))
lg.Info("Finished simulating Traffic", zap.Int64("max-revision", maxRevision))
return nil
})
g.Go(func() error {
watchReport = client.CollectClusterWatchEvents(ctx, t, clus, maxRevisionChan, s.watch, baseTime, ids)
watchReport = client.CollectClusterWatchEvents(ctx, t, clus, maxRevisionChan, s.Watch, baseTime, ids)
return nil
})
g.Wait()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package robustness
package scenarios

import (
"path/filepath"
Expand Down Expand Up @@ -58,16 +58,16 @@ var trafficProfiles = []TrafficProfile{
},
}

type testScenario struct {
name string
failpoint failpoint.Failpoint
cluster e2e.EtcdProcessClusterConfig
traffic traffic.Traffic
profile traffic.Profile
watch client.WatchConfig
type TestScenario struct {
Name string
Failpoint failpoint.Failpoint
Cluster e2e.EtcdProcessClusterConfig
Traffic traffic.Traffic
Profile traffic.Profile
Watch client.WatchConfig
}

func exploratoryScenarios(_ *testing.T) []testScenario {
func Exploratory(_ *testing.T) []TestScenario {
randomizableOptions := []e2e.EPClusterOption{
options.WithClusterOptionGroups(
options.ClusterOptions{options.WithTickMs(29), options.WithElectionMs(271)},
Expand Down Expand Up @@ -101,16 +101,16 @@ func exploratoryScenarios(_ *testing.T) []testScenario {
if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
baseOptions = append(baseOptions, e2e.WithSnapshotCatchUpEntries(100))
}
scenarios := []testScenario{}
scenarios := []TestScenario{}
for _, tp := range trafficProfiles {
name := filepath.Join(tp.Name, "ClusterOfSize1")
clusterOfSize1Options := baseOptions
clusterOfSize1Options = append(clusterOfSize1Options, e2e.WithClusterSize(1))
scenarios = append(scenarios, testScenario{
name: name,
traffic: tp.Traffic,
profile: tp.Profile,
cluster: *e2e.NewConfig(clusterOfSize1Options...),
scenarios = append(scenarios, TestScenario{
Name: name,
Traffic: tp.Traffic,
Profile: tp.Profile,
Cluster: *e2e.NewConfig(clusterOfSize1Options...),
})
}

Expand All @@ -122,27 +122,27 @@ func exploratoryScenarios(_ *testing.T) []testScenario {
if fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
clusterOfSize3Options = append(clusterOfSize3Options, mixedVersionOption)
}
scenarios = append(scenarios, testScenario{
name: name,
traffic: tp.Traffic,
profile: tp.Profile,
cluster: *e2e.NewConfig(clusterOfSize3Options...),
scenarios = append(scenarios, TestScenario{
Name: name,
Traffic: tp.Traffic,
Profile: tp.Profile,
Cluster: *e2e.NewConfig(clusterOfSize3Options...),
})
}
if e2e.BinPath.LazyFSAvailable() {
newScenarios := scenarios
for _, s := range scenarios {
// LazyFS increases the load on CPU, so we run it with more lightweight case.
if s.profile.MinimalQPS <= 100 && s.cluster.ClusterSize == 1 {
lazyfsCluster := s.cluster
if s.Profile.MinimalQPS <= 100 && s.Cluster.ClusterSize == 1 {
lazyfsCluster := s.Cluster
lazyfsCluster.LazyFSEnabled = true
newScenarios = append(newScenarios, testScenario{
name: filepath.Join(s.name, "LazyFS"),
failpoint: s.failpoint,
cluster: lazyfsCluster,
traffic: s.traffic,
profile: s.profile.WithoutCompaction(),
watch: s.watch,
newScenarios = append(newScenarios, TestScenario{
Name: filepath.Join(s.Name, "LazyFS"),
Failpoint: s.Failpoint,
Cluster: lazyfsCluster,
Traffic: s.Traffic,
Profile: s.Profile.WithoutCompaction(),
Watch: s.Watch,
})
}
}
Expand All @@ -151,60 +151,60 @@ func exploratoryScenarios(_ *testing.T) []testScenario {
return scenarios
}

func regressionScenarios(t *testing.T) []testScenario {
func Regression(t *testing.T) []TestScenario {
v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
if err != nil {
t.Fatalf("Failed checking etcd version binary, binary: %q, err: %v", e2e.BinPath.Etcd, err)
}

scenarios := []testScenario{}
scenarios = append(scenarios, testScenario{
name: "Issue14370",
failpoint: failpoint.RaftBeforeSavePanic,
profile: traffic.LowTraffic,
traffic: traffic.EtcdPutDeleteLease,
cluster: *e2e.NewConfig(
scenarios := []TestScenario{}
scenarios = append(scenarios, TestScenario{
Name: "Issue14370",
Failpoint: failpoint.RaftBeforeSavePanic,
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, testScenario{
name: "Issue14685",
failpoint: failpoint.DefragBeforeCopyPanic,
profile: traffic.LowTraffic,
traffic: traffic.EtcdPutDeleteLease,
cluster: *e2e.NewConfig(
scenarios = append(scenarios, TestScenario{
Name: "Issue14685",
Failpoint: failpoint.DefragBeforeCopyPanic,
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, testScenario{
name: "Issue13766",
failpoint: failpoint.KillFailpoint,
profile: traffic.HighTrafficProfile,
traffic: traffic.EtcdPut,
cluster: *e2e.NewConfig(
scenarios = append(scenarios, TestScenario{
Name: "Issue13766",
Failpoint: failpoint.KillFailpoint,
Profile: traffic.HighTrafficProfile,
Traffic: traffic.EtcdPut,
Cluster: *e2e.NewConfig(
e2e.WithSnapshotCount(100),
),
})
scenarios = append(scenarios, testScenario{
name: "Issue15220",
watch: client.WatchConfig{
scenarios = append(scenarios, TestScenario{
Name: "Issue15220",
Watch: client.WatchConfig{
RequestProgress: true,
},
profile: traffic.LowTraffic,
traffic: traffic.EtcdPutDeleteLease,
failpoint: failpoint.KillFailpoint,
cluster: *e2e.NewConfig(
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Failpoint: failpoint.KillFailpoint,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
),
})
scenarios = append(scenarios, testScenario{
name: "Issue17529",
profile: traffic.HighTrafficProfile,
traffic: traffic.Kubernetes,
failpoint: failpoint.SleepBeforeSendWatchResponse,
cluster: *e2e.NewConfig(
scenarios = append(scenarios, TestScenario{
Name: "Issue17529",
Profile: traffic.HighTrafficProfile,
Traffic: traffic.Kubernetes,
Failpoint: failpoint.SleepBeforeSendWatchResponse,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
options.WithSnapshotCount(100),
Expand All @@ -219,12 +219,12 @@ func regressionScenarios(t *testing.T) []testScenario {
if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
opts = append(opts, e2e.WithSnapshotCatchUpEntries(100))
}
scenarios = append(scenarios, testScenario{
name: "Issue15271",
failpoint: failpoint.BlackholeUntilSnapshot,
profile: traffic.HighTrafficProfile,
traffic: traffic.EtcdPut,
cluster: *e2e.NewConfig(opts...),
scenarios = append(scenarios, TestScenario{
Name: "Issue15271",
Failpoint: failpoint.BlackholeUntilSnapshot,
Profile: traffic.HighTrafficProfile,
Traffic: traffic.EtcdPut,
Cluster: *e2e.NewConfig(opts...),
})
}
return scenarios
Expand Down

0 comments on commit 8cf1121

Please sign in to comment.