Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.1.0 to 1.6.0 #1

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
827 changes: 2 additions & 825 deletions README.md

Large diffs are not rendered by default.

29 changes: 15 additions & 14 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module github.com/moby/buildkit
go 1.21

require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1
github.com/Microsoft/go-winio v0.6.1
github.com/Microsoft/hcsshim v0.11.4
Expand Down Expand Up @@ -65,7 +65,7 @@ require (
github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b
github.com/sirupsen/logrus v1.9.3
github.com/spdx/tools-golang v0.5.3
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c
github.com/tonistiigi/go-actions-cache v0.0.0-20240320205438-9794bdbb2fb4
github.com/tonistiigi/go-archvariant v1.0.0
Expand All @@ -90,11 +90,11 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
go.opentelemetry.io/proto/otlp v1.0.0
golang.org/x/crypto v0.21.0
golang.org/x/mod v0.13.0
golang.org/x/net v0.23.0
golang.org/x/sync v0.5.0
golang.org/x/sys v0.18.0
golang.org/x/crypto v0.24.0
golang.org/x/mod v0.17.0
golang.org/x/net v0.26.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0
golang.org/x/time v0.3.0
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b
google.golang.org/grpc v1.59.0
Expand All @@ -105,9 +105,9 @@ require (
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
Expand Down Expand Up @@ -141,9 +141,10 @@ require (
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hanwen/go-fuse/v2 v2.4.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
Expand All @@ -154,7 +155,7 @@ require (
github.com/moby/sys/mount v0.3.3 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
Expand All @@ -165,8 +166,8 @@ require (
github.com/vishvananda/netns v0.0.4 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.14.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
Expand Down
70 changes: 36 additions & 34 deletions go.sum

Large diffs are not rendered by default.

71 changes: 58 additions & 13 deletions solver/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,13 +176,60 @@ func (s *state) setEdge(index Index, targetEdge *edge, targetState *state) {
targetEdge.takeOwnership(e)

if targetState != nil {
targetState.addJobs(s, map[*state]struct{}{})

if _, ok := targetState.allPw[s.mpw]; !ok {
targetState.mpw.Add(s.mpw)
targetState.allPw[s.mpw] = struct{}{}
}
}
}

// addJobs recursively adds jobs to state and all its ancestors. currently
// only used during edge merges to add jobs from the source of the merge to the
// target and its ancestors.
// requires that Solver.mu is read-locked and srcState.mu is locked
func (s *state) addJobs(srcState *state, memo map[*state]struct{}) {
if _, ok := memo[s]; ok {
return
}
memo[s] = struct{}{}

s.mu.Lock()
defer s.mu.Unlock()

for j := range srcState.jobs {
s.jobs[j] = struct{}{}
}

for _, inputEdge := range s.vtx.Inputs() {
inputState, ok := s.solver.actives[inputEdge.Vertex.Digest()]
if !ok {
bklog.G(context.TODO()).
WithField("vertex_digest", inputEdge.Vertex.Digest()).
Error("input vertex not found during addJobs")
continue
}
inputState.addJobs(srcState, memo)

// tricky case: if the inputState's edge was *already* merged we should
// also add jobs to the merged edge's state
mergedInputEdge := inputState.getEdge(inputEdge.Index)
if mergedInputEdge == nil || mergedInputEdge.edge.Vertex.Digest() == inputEdge.Vertex.Digest() {
// not merged
continue
}
mergedInputState, ok := s.solver.actives[mergedInputEdge.edge.Vertex.Digest()]
if !ok {
bklog.G(context.TODO()).
WithField("vertex_digest", mergedInputEdge.edge.Vertex.Digest()).
Error("merged input vertex not found during addJobs")
continue
}
mergedInputState.addJobs(srcState, memo)
}
}

func (s *state) combinedCacheManager() CacheManager {
s.mu.Lock()
cms := make([]CacheManager, 0, len(s.cache)+1)
Expand Down Expand Up @@ -470,16 +517,25 @@ func (jl *Solver) loadUnlocked(ctx context.Context, v, parent Vertex, j *Job, ca
if debugScheduler {
lg := bklog.G(ctx).
WithField("vertex_name", v.Name()).
WithField("vertex_digest", v.Digest())
WithField("vertex_digest", v.Digest()).
WithField("actives_digest_key", dgst)
if j != nil {
lg = lg.WithField("job", j.id)
}
lg.Debug("adding active vertex")
for i, inp := range v.Inputs() {
lg.WithField("input_index", i).
WithField("input_vertex_name", inp.Vertex.Name()).
WithField("input_vertex_digest", inp.Vertex.Digest()).
WithField("input_edge_index", inp.Index).
Debug("new active vertex input")
}
}
} else if debugScheduler {
lg := bklog.G(ctx).
WithField("vertex_name", v.Name()).
WithField("vertex_digest", v.Digest())
WithField("vertex_digest", v.Digest()).
WithField("actives_digest_key", dgst)
if j != nil {
lg = lg.WithField("job", j.id)
}
Expand All @@ -499,17 +555,6 @@ func (jl *Solver) loadUnlocked(ctx context.Context, v, parent Vertex, j *Job, ca
if _, ok := st.jobs[j]; !ok {
st.jobs[j] = struct{}{}
}
if debugScheduler {
jobIDs := make([]string, 0, len(st.jobs))
for j := range st.jobs {
jobIDs = append(jobIDs, j.id)
}
bklog.G(ctx).
WithField("vertex_name", v.Name()).
WithField("vertex_digest", v.Digest()).
WithField("jobs", jobIDs).
Debug("current jobs for vertex")
}
}
st.mu.Unlock()

Expand Down
72 changes: 60 additions & 12 deletions solver/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -423,38 +423,77 @@ func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, erro
}

func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pipe.Receiver) {
log := bklog.G(context.TODO())

log.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest())
log := bklog.G(context.TODO()).
WithField("edge_vertex_name", e.edge.Vertex.Name()).
WithField("edge_vertex_digest", e.edge.Vertex.Digest()).
WithField("edge_index", e.edge.Index)

log.
WithField("edge_state", e.state).
WithField("req", len(inc)).
WithField("upt", len(updates)).
WithField("out", len(allPipes)).
Debug(">> unpark")

for i, dep := range e.deps {
des := edgeStatusInitial
if dep.req != nil {
des = dep.req.Request().(*edgeRequest).desiredState
}
log.Debugf(":: dep%d %s state=%s des=%s keys=%d hasslowcache=%v preprocessfunc=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil, e.preprocessFunc(dep) != nil)
log.
WithField("dep_index", i).
WithField("dep_vertex_name", e.edge.Vertex.Inputs()[i].Vertex.Name()).
WithField("dep_vertex_digest", e.edge.Vertex.Inputs()[i].Vertex.Digest()).
WithField("dep_state", dep.state).
WithField("dep_desired_state", des).
WithField("dep_keys", len(dep.keys)).
WithField("dep_has_slow_cache", e.slowCacheFunc(dep) != nil).
WithField("dep_preprocess_func", e.preprocessFunc(dep) != nil).
Debug(":: dep")
}

for i, in := range inc {
req := in.Request()
log.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled)
log.
WithField("incoming_index", i).
WithField("incoming_pointer", in).
WithField("incoming_desired_state", req.Payload.(*edgeRequest).desiredState).
WithField("incoming_canceled", req.Canceled).
Debug("> incoming")
}

for i, up := range updates {
if up == e.cacheMapReq {
log.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed)
log.
WithField("update_index", i).
WithField("update_pointer", up).
WithField("update_complete", up.Status().Completed).
Debug("> update cacheMapReq")
} else if up == e.execReq {
log.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed)
log.
WithField("update_index", i).
WithField("update_pointer", up).
WithField("update_complete", up.Status().Completed).
Debug("> update execReq")
} else {
st, ok := up.Status().Value.(*edgeState)
if ok {
index := -1
if dep, ok := e.depRequests[up]; ok {
index = int(dep.index)
}
log.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state)
log.
WithField("update_index", i).
WithField("update_pointer", up).
WithField("update_complete", up.Status().Completed).
WithField("update_input_index", index).
WithField("update_keys", len(st.keys)).
WithField("update_state", st.state).
Debugf("> update edgeState")
} else {
log.Debugf("> update-%d: unknown", i)
log.
WithField("update_index", i).
Debug("> update unknown")
}
}
}
Expand All @@ -463,7 +502,16 @@ func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pip
func debugSchedulerPostUnpark(e *edge, inc []pipe.Sender) {
log := bklog.G(context.TODO())
for i, in := range inc {
log.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed)
}
log.Debugf("<< unpark %s\n", e.edge.Vertex.Name())
log.
WithField("incoming_index", i).
WithField("incoming_pointer", in).
WithField("incoming_complete", in.Status().Completed).
Debug("< incoming")
}
log.
WithField("edge_vertex_name", e.edge.Vertex.Name()).
WithField("edge_vertex_digest", e.edge.Vertex.Digest()).
WithField("edge_index", e.edge.Index).
WithField("edge_state", e.state).
Debug("<< unpark")
}
Loading