add DeleteBulk support, add sufficient deadlines per rename() (#20185)

deadlines per moveToTrash() allows for a more granular timeout
approach for syscalls, instead of an aggregate timeout.

This PR also enhances multipart state cleanup to be optimal by
removing 100's of multipart network rename() calls into single
network call.
This commit is contained in:
Harshavardhana
2024-07-29 18:56:40 -07:00
committed by GitHub
parent 673df6d517
commit 80ff907d08
12 changed files with 909 additions and 251 deletions

View File

@@ -737,7 +737,9 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
}
dErrResp := &DeleteVersionsErrsResp{}
if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil {
decoder := msgpNewReader(reader)
defer readMsgpReaderPoolPut(decoder)
if err = dErrResp.DecodeMsg(decoder); err != nil {
for i := range errs {
errs[i] = toStorageErr(err)
}
@@ -745,7 +747,11 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
}
for i, dErr := range dErrResp.Errs {
errs[i] = toStorageErr(dErr)
if dErr != "" {
errs[i] = toStorageErr(errors.New(dErr))
} else {
errs[i] = nil
}
}
return errs
@@ -795,6 +801,26 @@ func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path st
return verifyResp, nil
}
func (client *storageRESTClient) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) {
values := make(url.Values)
values.Set(storageRESTVolume, volume)
req := &DeleteBulkReq{Paths: paths}
body, err := req.MarshalMsg(nil)
if err != nil {
return err
}
respBody, err := client.call(ctx, storageRESTMethodDeleteBulk, values, bytes.NewReader(body), int64(len(body)))
if err != nil {
return err
}
defer xhttp.DrainBody(respBody)
_, err = waitForHTTPResponse(respBody)
return toStorageErr(err)
}
func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) {
values := make(url.Values)
values.Set(storageRESTVolume, volume)