-
Notifications
You must be signed in to change notification settings - Fork 47
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add lots of debug logging to investigate flakiness #421
Closed
cosmicexplorer
wants to merge
2
commits into
twitter:master
from
cosmicexplorer:add-debug-logging-for-grpc-write-requests
Closed
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,10 +8,10 @@ import ( | |
"fmt" | ||
"io" | ||
"io/ioutil" | ||
"math/rand" | ||
"net" | ||
"sync" | ||
"time" | ||
|
||
"github.com/golang/protobuf/proto" | ||
log "github.com/sirupsen/logrus" | ||
remoteexecution "github.com/twitter/scoot/bazel/remoteexecution" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. related to above comment, as long as you're in here, could you move this down to the scoot stuff below? thanks! |
||
|
@@ -496,9 +496,12 @@ func (s *casServer) Read(req *bytestream.ReadRequest, ser bytestream.ByteStream_ | |
// store.Stores do not support partial Writes, and neither does our implementation. | ||
// We can support partial Write by keeping buffers for inflight requests in the casServer. | ||
// When the entire Write is buffered, we can Write to the Store and return a response with the result. | ||
// NOTE We also no not currently attempt any resolution between multiple client UUIDs writing the same resource | ||
// NOTE We also do not currently attempt any resolution between multiple client UUIDs writing the same resource | ||
func (s *casServer) Write(ser bytestream.ByteStream_WriteServer) error { | ||
log.Debug("Received CAS Write request") | ||
|
||
var intId = rand.Intn(200000) | ||
|
||
log.Debugf("%s(%d) -- Received CAS Write request", time.Now().UTC(), intId) | ||
|
||
if !s.IsInitialized() { | ||
return status.Error(codes.Internal, "Server not initialized") | ||
|
@@ -529,7 +532,7 @@ func (s *casServer) Write(ser bytestream.ByteStream_WriteServer) error { | |
for { | ||
wr, err := ser.Recv() | ||
if err != nil { | ||
log.Errorf("Failed to Recv(): %v", err) | ||
log.Errorf("%s(%d) -- Failed to Recv(): %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Failed to Recv: %v", err)) | ||
} | ||
|
||
|
@@ -539,19 +542,19 @@ func (s *casServer) Write(ser bytestream.ByteStream_WriteServer) error { | |
|
||
resource, err = ParseWriteResource(resourceName) | ||
if err != nil { | ||
log.Errorf("Error parsing resource: %v", err) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%v", err)) | ||
log.Errorf("%s(%d) -- Error parsing resource: %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s(%d) -- %v", time.Now().UTC(), intId, err)) | ||
} | ||
log.Debugf("Using resource name: %s", resourceName) | ||
log.Debugf("%s(%d) -- Using resource name: %s", time.Now().UTC(), intId, resourceName) | ||
|
||
// If the client is attempting to write empty/nil/size-0 data, just return as if we succeeded | ||
if resource.Digest.GetHash() == bazel.EmptySha { | ||
log.Infof("Request to write empty sha - bypassing Store write and Closing") | ||
log.Infof("%s(%d) -- Request to write empty sha - bypassing Store write and Closing", time.Now().UTC(), intId) | ||
res := &bytestream.WriteResponse{CommittedSize: bazel.EmptySize} | ||
err = ser.SendAndClose(res) | ||
if err != nil { | ||
log.Errorf("Error during SendAndClose() for EmptySha: %s", err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Failed to SendAndClose: %v", err)) | ||
log.Errorf("%s(%d) -- Error during SendAndClose() for EmptySha: %s", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("%s(%d) -- Failed to SendAndClose: %v", time.Now().UTC(), intId, err)) | ||
} | ||
return nil | ||
} | ||
|
@@ -563,28 +566,28 @@ func (s *casServer) Write(ser bytestream.ByteStream_WriteServer) error { | |
// Note that Store does not support `stat`, so we trust client-provided size to avoid reading the data | ||
storeName = bazel.DigestStoreName(resource.Digest) | ||
if exists, err := s.storeConfig.Store.Exists(storeName); err != nil { | ||
log.Errorf("Error checking existence: %v", err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Store failed checking existence of %s: %v", storeName, err)) | ||
log.Errorf("%s(%d) -- Error checking existence: %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("%s(%d) -- Store failed checking existence of %s: %v", time.Now().UTC(), intId, storeName, err)) | ||
} else if exists { | ||
log.Infof("Resource exists in store: %s. Using client digest size: %d", storeName, resource.Digest.GetSizeBytes()) | ||
log.Infof("%s(%d) -- Resource exists in store: %s. Using client digest size: %d", time.Now().UTC(), intId, storeName, resource.Digest.GetSizeBytes()) | ||
res := &bytestream.WriteResponse{CommittedSize: resource.Digest.GetSizeBytes()} | ||
err = ser.SendAndClose(res) | ||
if err != nil { | ||
log.Errorf("Error during SendAndClose() for Existing: %v", err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Failed to SendAndClose WriteResponse: %v", err)) | ||
log.Errorf("%s(%d) -- Error during SendAndClose() for Existing: %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("%s(%d) -- Failed to SendAndClose WriteResponse: %v", time.Now().UTC(), intId, err)) | ||
} | ||
return nil | ||
} | ||
} | ||
|
||
// Validate subsequent WriteRequest fields | ||
if wr.GetResourceName() != "" && resourceName != wr.GetResourceName() { | ||
log.Errorf("Invalid resource name in subsequent request: %s", wr.GetResourceName()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("ResourceName %s mismatch with previous %s", wr.GetResourceName(), resourceName)) | ||
log.Errorf("%s(%d) -- Invalid resource name in subsequent request: %s", time.Now().UTC(), intId, wr.GetResourceName()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s(%d) -- ResourceName %s mismatch with previous %s", time.Now().UTC(), intId, wr.GetResourceName(), resourceName)) | ||
} | ||
if wr.GetWriteOffset() != committed { | ||
log.Error("Invalid write offset") | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("WriteOffset invalid: got %d after committing %d bytes", wr.GetWriteOffset(), committed)) | ||
log.Errorf("%s(%d) -- Invalid write offset", time.Now().UTC(), intId) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s(%d) -- WriteOffset invalid: got %d after committing %d bytes", time.Now().UTC(), intId, wr.GetWriteOffset(), committed)) | ||
} | ||
|
||
buffer.Write(wr.GetData()) | ||
|
@@ -598,30 +601,30 @@ func (s *casServer) Write(ser bytestream.ByteStream_WriteServer) error { | |
|
||
// Verify committed length with Digest size | ||
if committed != resource.Digest.GetSizeBytes() { | ||
log.Errorf("Data length/digest mismatch: %d/%d", committed, resource.Digest.GetSizeBytes()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("Data to be written len: %d mismatch with request Digest size: %d", committed, resource.Digest.GetSizeBytes())) | ||
log.Errorf("%s(%d) -- Data length/digest mismatch: %d/%d", time.Now().UTC(), intId, committed, resource.Digest.GetSizeBytes()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s(%d) -- Data to be written len: %d mismatch with request Digest size: %d", time.Now().UTC(), intId, committed, resource.Digest.GetSizeBytes())) | ||
} | ||
// Verify buffer SHA with Digest SHA | ||
if bufferHash := fmt.Sprintf("%x", sha256.Sum256(buffer.Bytes())); bufferHash != resource.Digest.GetHash() { | ||
log.Errorf("Data hash/digest hash mismatch: %s/%s", bufferHash, resource.Digest.GetHash()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("Data to be written did not hash to given Digest")) | ||
log.Errorf("%s(%d) -- Data hash/digest hash mismatch: %s/%s", time.Now().UTC(), intId, bufferHash, resource.Digest.GetHash()) | ||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s(%d) -- Data to be written did not hash to given Digest", time.Now().UTC(), intId)) | ||
} | ||
|
||
// Write to underlying Store | ||
err = s.writeToStore(storeName, buffer) | ||
if err != nil { | ||
log.Errorf("Store failed to Write: %v", err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Store failed writing to %s: %v", storeName, err)) | ||
log.Errorf("%s(%d) -- Store failed to Write: %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("%s(%d) -- Store failed writing to %s: %v", time.Now().UTC(), intId, storeName, err)) | ||
} | ||
|
||
res := &bytestream.WriteResponse{CommittedSize: committed} | ||
err = ser.SendAndClose(res) | ||
if err != nil { | ||
log.Errorf("Error during SendAndClose(): %v", err) | ||
return status.Error(codes.Internal, fmt.Sprintf("Failed to SendAndClose WriteResponse: %v", err)) | ||
log.Errorf("%s(%d) -- Error during SendAndClose(): %v", time.Now().UTC(), intId, err) | ||
return status.Error(codes.Internal, fmt.Sprintf("%s(%d) -- Failed to SendAndClose WriteResponse: %v", time.Now().UTC(), intId, err)) | ||
} | ||
|
||
log.Infof("Finished handling Write request for %s, %d bytes", storeName, committed) | ||
log.Infof("%s(%d) -- Finished handling Write request for %s, %d bytes", time.Now().UTC(), intId, storeName, committed) | ||
return nil | ||
} | ||
|
||
|
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could you keep this newline? we like to keep our imports separated by stdlib / 3rdparty / scoot stuff