Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions analyzer/network/analyzer/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ func AnalyzerInit(ctx context.Context) (*analyzerservice.AnalyzerDeps, error) {
ServiceAccount: *buildServiceAccount,
LogsBucket: *logsBucket,
Client: nil, // Defined depending on gcbPrivatePoolName
ExtraTags: map[string]string{"analyzer": "network"},
}
if *gcbPrivatePoolName != "" {
pool := &gcb.PrivatePoolConfig{
Expand Down
10 changes: 6 additions & 4 deletions analyzer/network/analyzerservice/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"encoding/json"
"io"
"path"
"time"

"github.com/google/oss-rebuild/internal/api"
"github.com/google/oss-rebuild/internal/cache"
Expand Down Expand Up @@ -68,7 +69,7 @@ func Analyze(ctx context.Context, req schema.AnalyzeRebuildRequest, deps *Analyz
return nil, api.AsStatus(codes.AlreadyExists, errors.New("analysis already exists"))
}
}
return analyzeRebuild(ctx, t, deps)
return analyzeRebuild(ctx, t, req.Timeout, deps)
}

func analysisExists(ctx context.Context, store rebuild.AssetStore, t rebuild.Target) (bool, error) {
Expand All @@ -81,7 +82,7 @@ func analysisExists(ctx context.Context, store rebuild.AssetStore, t rebuild.Tar
return true, nil
}

func analyzeRebuild(ctx context.Context, t rebuild.Target, deps *AnalyzerDeps) (*api.NoReturn, error) {
func analyzeRebuild(ctx context.Context, t rebuild.Target, timeout time.Duration, deps *AnalyzerDeps) (*api.NoReturn, error) {
rebuildAttestation, err := getRebuildAttestation(ctx, deps.InputAttestationStore, t, deps.Verifier)
if err != nil {
return nil, errors.Wrap(err, "getting rebuild attestation")
Expand All @@ -92,7 +93,7 @@ func analyzeRebuild(ctx context.Context, t rebuild.Target, deps *AnalyzerDeps) (
}
ctx = context.WithValue(ctx, rebuild.HTTPBasicClientID, deps.HTTPClient)
mux := meta.NewRegistryMux(httpx.NewCachedClient(deps.HTTPClient, &cache.CoalescingMemoryCache{}))
id, err := executeNetworkRebuild(ctx, deps, t, strategy, rebuildAttestation)
id, err := executeNetworkRebuild(ctx, deps, t, strategy, rebuildAttestation, timeout)
if err != nil {
return nil, errors.Wrap(err, "network rebuild failed")
}
Expand Down Expand Up @@ -188,7 +189,7 @@ func compareArtifacts(ctx context.Context, mux rebuild.RegistryMux, t rebuild.Ta
return &rb, &up, nil
}

func executeNetworkRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuild.Target, strategy rebuild.Strategy, rebuildAttestation *attestation.RebuildAttestation) (string, error) {
func executeNetworkRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuild.Target, strategy rebuild.Strategy, rebuildAttestation *attestation.RebuildAttestation, timeout time.Duration) (string, error) {
obID := uuid.New().String()
debugStore, err := deps.DebugStoreBuilder(context.WithValue(ctx, rebuild.RunID, obID))
if err != nil {
Expand Down Expand Up @@ -220,6 +221,7 @@ func executeNetworkRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuild.Ta
}
h, err := deps.GCBExecutor.Start(ctx, in, build.Options{
BuildID: obID,
Timeout: timeout,
UseTimewarp: meta.AllRebuilders[t.Ecosystem].UsesTimewarp(in),
UseNetworkProxy: true, // The whole point of the analyzer
Resources: build.Resources{
Expand Down
1 change: 1 addition & 0 deletions analyzer/system/analyzer/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ func AnalyzerInit(ctx context.Context) (*analyzerservice.AnalyzerDeps, error) {
ServiceAccount: *buildServiceAccount,
LogsBucket: *logsBucket,
Client: nil, // Defined depending on gcbPrivatePoolName
ExtraTags: map[string]string{"analyzer": "system"},
}
if *gcbPrivatePoolName != "" {
pool := &gcb.PrivatePoolConfig{
Expand Down
10 changes: 6 additions & 4 deletions analyzer/system/analyzerservice/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"encoding/json"
"io"
"path"
"time"

"github.com/google/oss-rebuild/internal/api"
"github.com/google/oss-rebuild/internal/cache"
Expand Down Expand Up @@ -68,7 +69,7 @@ func Analyze(ctx context.Context, req schema.AnalyzeRebuildRequest, deps *Analyz
return nil, api.AsStatus(codes.AlreadyExists, errors.New("analysis already exists"))
}
}
return analyzeRebuild(ctx, t, deps)
return analyzeRebuild(ctx, t, req.Timeout, deps)
}

func analysisExists(ctx context.Context, store rebuild.AssetStore, t rebuild.Target) (bool, error) {
Expand All @@ -81,7 +82,7 @@ func analysisExists(ctx context.Context, store rebuild.AssetStore, t rebuild.Tar
return true, nil
}

func analyzeRebuild(ctx context.Context, t rebuild.Target, deps *AnalyzerDeps) (*api.NoReturn, error) {
func analyzeRebuild(ctx context.Context, t rebuild.Target, timeout time.Duration, deps *AnalyzerDeps) (*api.NoReturn, error) {
rebuildAttestation, err := getRebuildAttestation(ctx, deps.InputAttestationStore, t, deps.Verifier)
if err != nil {
return nil, errors.Wrap(err, "getting rebuild attestation")
Expand All @@ -92,7 +93,7 @@ func analyzeRebuild(ctx context.Context, t rebuild.Target, deps *AnalyzerDeps) (
}
ctx = context.WithValue(ctx, rebuild.HTTPBasicClientID, deps.HTTPClient)
mux := meta.NewRegistryMux(httpx.NewCachedClient(deps.HTTPClient, &cache.CoalescingMemoryCache{}))
id, err := executeSystemTraceRebuild(ctx, deps, t, strategy, rebuildAttestation)
id, err := executeSystemTraceRebuild(ctx, deps, t, strategy, rebuildAttestation, timeout)
if err != nil {
return nil, errors.Wrap(err, "system trace rebuild failed")
}
Expand Down Expand Up @@ -188,7 +189,7 @@ func compareArtifacts(ctx context.Context, mux rebuild.RegistryMux, t rebuild.Ta
return &rb, &up, nil
}

func executeSystemTraceRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuild.Target, strategy rebuild.Strategy, rebuildAttestation *attestation.RebuildAttestation) (string, error) {
func executeSystemTraceRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuild.Target, strategy rebuild.Strategy, rebuildAttestation *attestation.RebuildAttestation, timeout time.Duration) (string, error) {
obID := uuid.New().String()
debugStore, err := deps.DebugStoreBuilder(context.WithValue(ctx, rebuild.RunID, obID))
if err != nil {
Expand Down Expand Up @@ -220,6 +221,7 @@ func executeSystemTraceRebuild(ctx context.Context, deps *AnalyzerDeps, t rebuil
}
h, err := deps.GCBExecutor.Start(ctx, in, build.Options{
BuildID: obID,
Timeout: timeout,
UseTimewarp: meta.AllRebuilders[t.Ecosystem].UsesTimewarp(in),
UseSyscallMonitor: true, // The whole point of the analyzer
Resources: build.Resources{
Expand Down
1 change: 1 addition & 0 deletions internal/api/agentapiservice/iteration.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ func AgentCreateIteration(ctx context.Context, req schema.AgentCreateIterationRe
h, err := deps.GCBExecutor.Start(ctx, input, build.Options{
BuildID: obliviousID,
UseTimewarp: meta.AllRebuilders[input.Target.Ecosystem].UsesTimewarp(input),
// TODO: Should we set a Timeout?
Resources: build.Resources{
AssetStore: store,
ToolURLs: toolURLs,
Expand Down
5 changes: 3 additions & 2 deletions internal/api/apiservice/rebuild.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func getStrategy(ctx context.Context, deps *RebuildPackageDeps, t rebuild.Target
return strategy, entry, nil
}

func buildAndAttest(ctx context.Context, deps *RebuildPackageDeps, mux rebuild.RegistryMux, a verifier.Attestor, t rebuild.Target, strategy rebuild.Strategy, entry *repoEntry, useProxy bool, useSyscallMonitor bool, mode schema.OverwriteMode) (err error) {
func buildAndAttest(ctx context.Context, deps *RebuildPackageDeps, mux rebuild.RegistryMux, a verifier.Attestor, t rebuild.Target, strategy rebuild.Strategy, entry *repoEntry, useProxy bool, useSyscallMonitor bool, timeout time.Duration, mode schema.OverwriteMode) (err error) {
debugStore, err := deps.DebugStoreBuilder(ctx)
if err != nil {
return errors.Wrap(err, "creating debug store")
Expand Down Expand Up @@ -184,6 +184,7 @@ func buildAndAttest(ctx context.Context, deps *RebuildPackageDeps, mux rebuild.R
}
h, err := deps.GCBExecutor.Start(ctx, in, build.Options{
BuildID: obID,
Timeout: timeout,
UseTimewarp: meta.AllRebuilders[t.Ecosystem].UsesTimewarp(in),
UseNetworkProxy: useProxy,
UseSyscallMonitor: useSyscallMonitor,
Expand Down Expand Up @@ -323,7 +324,7 @@ func rebuildPackage(ctx context.Context, req schema.RebuildPackageRequest, deps
if strategy != nil {
v.StrategyOneof = schema.NewStrategyOneOf(strategy)
}
err = buildAndAttest(ctx, deps, mux, a, t, strategy, entry, req.UseNetworkProxy, req.UseSyscallMonitor, req.OverwriteMode)
err = buildAndAttest(ctx, deps, mux, a, t, strategy, entry, req.UseNetworkProxy, req.UseSyscallMonitor, req.BuildTimeout, req.OverwriteMode)
if err != nil {
v.Message = errors.Wrap(err, "executing rebuild").Error()
return &v, nil
Expand Down
20 changes: 15 additions & 5 deletions pkg/build/gcb/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ type Executor struct {
outputBufferSize int
builderName string
terminateOnTimeout bool
extraTags map[string]string
activeBuilds syncx.Map[string, *gcbHandle]
}

Expand Down Expand Up @@ -68,6 +69,7 @@ func NewExecutor(config ExecutorConfig) (*Executor, error) {
outputBufferSize: outputBufferSize,
builderName: config.BuilderName,
terminateOnTimeout: config.TerminateOnTimeout,
extraTags: config.ExtraTags,
activeBuilds: syncx.Map[string, *gcbHandle]{},
}, nil
}
Expand All @@ -83,6 +85,7 @@ type ExecutorConfig struct {
OutputBufferSize int
BuilderName string
TerminateOnTimeout bool
ExtraTags map[string]string
}

// Start implements build.Executor
Expand All @@ -104,6 +107,9 @@ func (e *Executor) Start(ctx context.Context, input rebuild.Input, opts build.Op
}
// Make the Cloud Build "Build" request
gcbBuild := e.makeBuild(input.Target, plan)
if opts.Timeout > 0 {
gcbBuild.Timeout = fmt.Sprintf("%ds", int(opts.Timeout.Seconds()))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay so i think the better behavior would be to default to the max gcb time (which arguably gcb should be doing when, for worker pools, 60m isn't the max). then we could keep the timeout param client-side and focus on making sure that's handled appropriately (i.e. addressing any polling issues we're running into)

}
// Create a buffered pipe for streaming output
pipe := bufiox.NewBufferedPipe(bufiox.NewLineBuffer(e.outputBufferSize))
handle := &gcbHandle{
Expand Down Expand Up @@ -281,16 +287,20 @@ func (e *Executor) makeBuild(t rebuild.Target, plan *Plan) *cloudbuild.Build {
Name: e.privatePool.Name,
}
}
tags := []string{
buildTag("ecosystem", string(t.Ecosystem)),
buildTag("package", t.Package),
buildTag("version", t.Version),
}
for k, v := range e.extraTags {
tags = append(tags, buildTag(k, v))
}
return &cloudbuild.Build{
Steps: plan.Steps,
Options: buildOptions,
LogsBucket: e.logsBucket,
ServiceAccount: e.serviceAccount,
Tags: []string{
buildTag("ecosystem", string(t.Ecosystem)),
buildTag("package", t.Package),
buildTag("version", t.Version),
},
Tags: tags,
}
}

Expand Down
25 changes: 25 additions & 0 deletions pkg/build/gcb/executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ func TestGCBExecutorStart(t *testing.T) {
}
createChan := make(chan struct{})
defer close(createChan)
var gotBuild *cloudbuild.Build
mockClient.CreateBuildFunc = func(ctx context.Context, project string, build *cloudbuild.Build) (*cloudbuild.Operation, error) {
gotBuild = build
<-createChan
return operation, nil
}
Expand All @@ -72,6 +74,9 @@ func TestGCBExecutorStart(t *testing.T) {
ServiceAccount: "test@test.iam.gserviceaccount.com",
LogsBucket: "test-bucket",
OutputBufferSize: 1024,
ExtraTags: map[string]string{
"foo": "bar",
},
}

executor, err := NewExecutor(config)
Expand Down Expand Up @@ -109,6 +114,7 @@ func TestGCBExecutorStart(t *testing.T) {
Resources: build.Resources{
BaseImageConfig: baseImageConfig,
},
Timeout: 10 * time.Minute,
}

// Test Start method
Expand Down Expand Up @@ -138,6 +144,25 @@ func TestGCBExecutorStart(t *testing.T) {
t.Fatal(result.Error)
}

if gotBuild.Timeout != "600s" {
t.Errorf("Timeout = %s, want 600s", gotBuild.Timeout)
}

// Verify tags
expectedTags := []string{"ecosystem-npm", "package-test-package", "version-1.0.0", "foo-bar"}
for _, expected := range expectedTags {
found := false
for _, actual := range gotBuild.Tags {
if actual == expected {
found = true
break
}
}
if !found {
t.Errorf("Tag %s not found in %v", expected, gotBuild.Tags)
}
}

// Clean up
if err := executor.Close(ctx); err != nil {
t.Errorf("Close failed: %v", err)
Expand Down
Loading