Skip to content

Commit f4f2f75

Browse files
committed
backup: disable AC in datadriven tests
These tests, via 'new-cluster', run a large number of nodes in a single process, overloading that process and causing bulk work to starve if it is restricted to using strictly spare capacity (of which there is none). Release note: none. Epic: none.
1 parent ceb320e commit f4f2f75

File tree

3 files changed

+14
-4
lines changed

3 files changed

+14
-4
lines changed

pkg/backup/datadriven_test.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
2828
"github.com/cockroachdb/cockroach/pkg/keys"
2929
"github.com/cockroachdb/cockroach/pkg/kv"
30+
"github.com/cockroachdb/cockroach/pkg/kv/bulk"
3031
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
3132
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
3233
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
@@ -45,6 +46,7 @@ import (
4546
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
4647
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
4748
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
49+
"github.com/cockroachdb/cockroach/pkg/util/admission"
4850
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
4951
"github.com/cockroachdb/datadriven"
5052
"github.com/cockroachdb/errors"
@@ -194,6 +196,12 @@ func (d *datadrivenTestState) addCluster(t *testing.T, cfg clusterCfg) error {
194196
closedts.SideTransportCloseInterval.Override(context.Background(), &settings.SV, 10*time.Millisecond)
195197
kvserver.RangeFeedRefreshInterval.Override(context.Background(), &settings.SV, 10*time.Millisecond)
196198
sql.TempObjectWaitInterval.Override(context.Background(), &settings.SV, time.Millisecond)
199+
// Disable AC yielding as these tests can run many in-process clusters at once
200+
// and overload the host. Generally overload would mean bulk work, which only
201+
// uses strictly spare capacitym gets starved, but these tests expect it to
202+
// still run (just slowly, along with everything else).
203+
bulk.YieldIfNoPacer.Override(context.Background(), &settings.SV, false)
204+
admission.YieldInPacer.Override(context.Background(), &settings.SV, false)
197205
params.ServerArgs.Settings = settings
198206

199207
clusterSize := cfg.nodes

pkg/kv/bulk/cpu_pacer.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ var cpuPacerRequestDuration = settings.RegisterDurationSetting(
2525
50*time.Millisecond,
2626
)
2727

28-
var yieldIfNoPacer = settings.RegisterBoolSetting(
28+
// YieldIfNoPacer is exported so it can be overridden in tests.
29+
var YieldIfNoPacer = settings.RegisterBoolSetting(
2930
settings.ApplicationLevel,
3031
"bulkio.elastic_cpu_control.always_yield.enabled",
3132
"if true, yield the CPU as needed even when time-based elastic pacing is not enabled",
@@ -38,7 +39,7 @@ func NewCPUPacer(ctx context.Context, db *kv.DB, setting *settings.BoolSetting)
3839
if db == nil || db.AdmissionPacerFactory == nil || !setting.Get(db.SettingsValues()) {
3940
log.Dev.Infof(ctx, "admission control is not configured to pace bulk ingestion")
4041

41-
if db != nil && yieldIfNoPacer.Get(db.SettingsValues()) {
42+
if db != nil && YieldIfNoPacer.Get(db.SettingsValues()) {
4243
// Return a Pacer that just yields.
4344
return &admission.Pacer{Yield: true}
4445
}

pkg/util/admission/elastic_cpu_grant_coordinator.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,8 @@ func (e *ElasticCPUGrantCoordinator) tryGrant() {
8484
e.elasticCPUGranter.tryGrant()
8585
}
8686

87-
var yieldInPacer = settings.RegisterBoolSetting(
87+
// YieldInPacer is exported so it can be overridden in tests.
88+
var YieldInPacer = settings.RegisterBoolSetting(
8889
settings.ApplicationLevel,
8990
"admission.elastic_cpu.yield_in_pacer.enabled",
9091
"when true, time-based elastic CPU pacing additionally yields CPU as-needed according to the scheduler",
@@ -100,6 +101,6 @@ func (e *ElasticCPUGrantCoordinator) NewPacer(unit time.Duration, wi WorkInfo) *
100101
unit: unit,
101102
wi: wi,
102103
wq: e.ElasticCPUWorkQueue,
103-
Yield: yieldInPacer.Get(&e.ElasticCPUWorkQueue.settings.SV),
104+
Yield: YieldInPacer.Get(&e.ElasticCPUWorkQueue.settings.SV),
104105
}
105106
}

0 commit comments

Comments
 (0)