@@ -63,7 +63,7 @@ type tpccOptions struct {
6363// tpccFixturesCmd generates the command string to load tpcc data for the
6464// specified warehouse count into a cluster using either `fixtures import`
6565// or `fixtures load` depending on the cloud.
66- func tpccFixturesCmd (t * test , cloud string , warehouses int , checks bool ) string {
66+ func tpccFixturesCmd (t * test , cloud string , warehouses int , extraArgs string ) string {
6767 var action string
6868 switch cloud {
6969 case "gce" :
@@ -87,8 +87,8 @@ func tpccFixturesCmd(t *test, cloud string, warehouses int, checks bool) string
8787 default :
8888 t .Fatalf ("unknown cloud: %q" , cloud )
8989 }
90- return fmt .Sprintf ("./workload fixtures %s tpcc --checks=%v -- warehouses=%d {pgurl:1}" ,
91- action , checks , warehouses )
90+ return fmt .Sprintf ("./workload fixtures %s tpcc --warehouses=%d %s {pgurl:1}" ,
91+ action , warehouses , extraArgs )
9292}
9393
9494func runTPCC (ctx context.Context , t * test , c * cluster , opts tpccOptions ) {
@@ -145,7 +145,7 @@ func runTPCC(ctx context.Context, t *test, c *cluster, opts tpccOptions) {
145145 t .Status ("loading dataset" )
146146 c .Start (ctx , t , crdbNodes )
147147
148- c .Run (ctx , workloadNode , tpccFixturesCmd (t , cloud , opts .Warehouses , true /* checks */ ))
148+ c .Run (ctx , workloadNode , tpccFixturesCmd (t , cloud , opts .Warehouses , "" ))
149149 c .Stop (ctx , crdbNodes )
150150
151151 c .Run (ctx , crdbNodes , "test -e /sbin/zfs && sudo zfs snapshot data1@pristine" )
@@ -155,7 +155,7 @@ func runTPCC(ctx context.Context, t *test, c *cluster, opts tpccOptions) {
155155 c .Start (ctx , t , crdbNodes )
156156 } else {
157157 c .Start (ctx , t , crdbNodes )
158- c .Run (ctx , workloadNode , tpccFixturesCmd (t , cloud , opts .Warehouses , true /* checks */ ))
158+ c .Run (ctx , workloadNode , tpccFixturesCmd (t , cloud , opts .Warehouses , "" ))
159159 }
160160 }()
161161 t .Status ("waiting" )
@@ -583,31 +583,33 @@ func loadTPCCBench(
583583 t .Fatal (err )
584584 }
585585
586- // Load the corresponding fixture.
587- t .l .Printf ("restoring tpcc fixture\n " )
588- cmd := tpccFixturesCmd (t , cloud , b .LoadWarehouses , false /* checks */ )
589- if err := c .RunE (ctx , loadNode , cmd ); err != nil {
590- return err
591- }
592-
593- partArgs := ""
594- rebalanceWait := time .Duration (b .LoadWarehouses / 100 ) * time .Minute
586+ var loadArgs string
587+ var rebalanceWait time.Duration
595588 switch b .LoadConfig {
596589 case singleLoadgen :
597- t .l .Printf ("splitting and scattering\n " )
590+ loadArgs = `--split --scatter --checks=false`
591+ rebalanceWait = time .Duration (b .LoadWarehouses / 100 ) * time .Minute
598592 case singlePartitionedLoadgen :
599- t .l .Printf ("splitting, scattering, and partitioning\n " )
600- partArgs = fmt .Sprintf (`--partitions=%d` , b .partitions ())
593+ loadArgs = fmt .Sprintf (`--split --scatter --checks=false --partitions=%d` , b .partitions ())
601594 rebalanceWait = time .Duration (b .LoadWarehouses / 50 ) * time .Minute
602595 case multiLoadgen :
603- t .l .Printf ("splitting, scattering, and partitioning\n " )
604- partArgs = fmt .Sprintf (`--partitions=%d --zones="%s" --partition-affinity=0` ,
596+ loadArgs = fmt .Sprintf (`--split --scatter --checks=false --partitions=%d --zones="%s"` ,
605597 b .partitions (), strings .Join (b .Distribution .zones (), "," ))
606598 rebalanceWait = time .Duration (b .LoadWarehouses / 20 ) * time .Minute
607599 default :
608600 panic ("unexpected" )
609601 }
610602
603+ // Load the corresponding fixture.
604+ t .l .Printf ("restoring tpcc fixture\n " )
605+ cmd := tpccFixturesCmd (t , cloud , b .LoadWarehouses , loadArgs )
606+ if err := c .RunE (ctx , loadNode , cmd ); err != nil {
607+ return err
608+ }
609+ if rebalanceWait == 0 {
610+ return nil
611+ }
612+
611613 t .l .Printf ("waiting %v for rebalancing\n " , rebalanceWait )
612614 _ , err := db .ExecContext (ctx , `SET CLUSTER SETTING kv.snapshot_rebalance.max_rate='64MiB'` )
613615 if err != nil {
@@ -617,9 +619,9 @@ func loadTPCCBench(
617619 // Split and scatter the tables. Ramp up to the expected load in the desired
618620 // distribution. This should allow for load-based rebalancing to help
619621 // distribute load. Optionally pass some load configuration-specific flags.
620- cmd = fmt .Sprintf ("./workload run tpcc --warehouses=%d --workers=%d --split --scatter " +
621- "--wait=false --duration=%s --tolerate-errors %s {pgurl%s}" ,
622- b .LoadWarehouses , b .LoadWarehouses , rebalanceWait , partArgs , roachNodes )
622+ cmd = fmt .Sprintf ("./workload run tpcc --warehouses=%d --workers=%d " +
623+ "--wait=false --duration=%s --tolerate-errors {pgurl%s}" ,
624+ b .LoadWarehouses , b .LoadWarehouses , rebalanceWait , roachNodes )
623625 if out , err := c .RunWithBuffer (ctx , c .l , loadNode , cmd ); err != nil {
624626 return errors .Wrapf (err , "failed with output %q" , string (out ))
625627 }
@@ -770,9 +772,9 @@ func runTPCCBench(ctx context.Context, t *test, c *cluster, b tpccBenchSpec) {
770772 case singleLoadgen :
771773 // Nothing.
772774 case singlePartitionedLoadgen :
773- extraFlags = fmt .Sprintf (` --partitions=%d --split ` , b .partitions ())
775+ extraFlags = fmt .Sprintf (` --partitions=%d` , b .partitions ())
774776 case multiLoadgen :
775- extraFlags = fmt .Sprintf (" --partitions=%d --partition-affinity=%d --split" ,
777+ extraFlags = fmt .Sprintf (` --partitions=%d --partition-affinity=%d` ,
776778 b .partitions (), groupIdx )
777779 activeWarehouses = warehouses / numLoadGroups
778780 default :
0 commit comments