mirror of
https://github.com/minio/minio.git
synced 2026-02-05 02:10:14 -05:00
Compare commits
57 Commits
RELEASE.20
...
release
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
404d2ebe3f | ||
|
|
46964eb764 | ||
|
|
bfab990c33 | ||
|
|
94018588fe | ||
|
|
8b76ba8d5d | ||
|
|
7eb7f65e48 | ||
|
|
c608c0688a | ||
|
|
41a9d1d778 | ||
|
|
e21e80841e | ||
|
|
98c792bbeb | ||
|
|
f687ba53bc | ||
|
|
e3da59c923 | ||
|
|
781b9b051c | ||
|
|
438becfde8 | ||
|
|
16ef338649 | ||
|
|
3242847ec0 | ||
|
|
cf87303094 | ||
|
|
90d8ec6310 | ||
|
|
b383522743 | ||
|
|
6d42036dd4 | ||
|
|
b4d8bcf644 | ||
|
|
d7f32ad649 | ||
|
|
906d68c356 | ||
|
|
749e9c5771 | ||
|
|
410e84d273 | ||
|
|
75741dbf4a | ||
|
|
fad7b27f15 | ||
|
|
79564656eb | ||
|
|
21cfc4aa49 | ||
|
|
e80239a661 | ||
|
|
6a2ed44095 | ||
|
|
8adfeb0d84 | ||
|
|
d23485e571 | ||
|
|
da70e6ddf6 | ||
|
|
922c7b57f5 | ||
|
|
726d80dbb7 | ||
|
|
23b03dadb8 | ||
|
|
7b3719c17b | ||
|
|
9a6487319a | ||
|
|
94ff624242 | ||
|
|
98ff91b484 | ||
|
|
4d86384dc7 | ||
|
|
27eb4ae3bc | ||
|
|
b5dcaaccb4 | ||
|
|
0843280dc3 | ||
|
|
61a1ea60c2 | ||
|
|
b92a220db1 | ||
|
|
be5910b87e | ||
|
|
51a8619a79 | ||
|
|
d46c3c07a8 | ||
|
|
14d89eaae4 | ||
|
|
32b088a2ff | ||
|
|
eed3b66d98 | ||
|
|
add3cd4e44 | ||
|
|
60b0f2324e | ||
|
|
0eb146e1b2 | ||
|
|
b379ca3bb0 |
2
Makefile
2
Makefile
@@ -38,7 +38,7 @@ lint:
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
|
||||
@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
||||
@@ -28,6 +28,8 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
|
||||
@@ -172,7 +172,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -157,6 +158,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -397,6 +399,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
@@ -409,6 +412,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -677,6 +681,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -689,6 +694,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -737,7 +743,19 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should not happen!")
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -263,6 +264,7 @@ type ServerHTTPStats struct {
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
}
|
||||
|
||||
// ServerInfoData holds storage, connections and other
|
||||
@@ -1469,30 +1471,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
reportCh := make(chan bandwidth.Report)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
@@ -1559,6 +1564,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI != nil {
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil {
|
||||
|
||||
@@ -69,7 +69,8 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil {
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
// only need Disks information in server mode.
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
|
||||
@@ -162,6 +162,7 @@ func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolic
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -272,7 +273,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
@@ -282,14 +283,13 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -299,18 +299,18 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -320,7 +320,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
return cred, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -329,7 +329,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return accessKey, owner, s3Error
|
||||
return cred, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -350,7 +350,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -365,15 +365,16 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -382,7 +383,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -390,6 +391,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -398,11 +400,11 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -549,6 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -558,6 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -581,6 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
@@ -591,6 +596,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -646,6 +652,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
@@ -659,6 +666,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
|
||||
@@ -37,7 +37,7 @@ func (err *errHashMismatch) Error() string {
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
iow io.WriteCloser
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
@@ -71,9 +71,10 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
|
||||
go func() {
|
||||
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
||||
@@ -81,8 +82,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
|
||||
@@ -96,9 +96,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
@@ -291,7 +292,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -343,10 +344,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
@@ -494,7 +496,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
@@ -509,9 +511,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
@@ -555,13 +554,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
@@ -617,12 +621,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
if objInfo.DeleteMarker {
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
@@ -925,10 +929,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
|
||||
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
|
||||
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
authType := getRequestAuthType(r)
|
||||
var signatureVersion string
|
||||
switch authType {
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
signatureVersion = signV2Algorithm
|
||||
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
|
||||
signatureVersion = signV4Algorithm
|
||||
}
|
||||
|
||||
var authtype string
|
||||
switch authType {
|
||||
case authTypePresignedV2, authTypePresigned:
|
||||
authtype = "REST-QUERY-STRING"
|
||||
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
|
||||
authtype = "REST-HEADER"
|
||||
case authTypePostPolicy:
|
||||
authtype = "POST"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"signatureversion": {signatureVersion},
|
||||
"authType": {authtype},
|
||||
}
|
||||
|
||||
if lc != "" {
|
||||
|
||||
@@ -175,10 +175,10 @@ func isStandardHeader(matchHeaderKey string) bool {
|
||||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (replicate, sync bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false, sync
|
||||
return false, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
@@ -198,19 +198,19 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
return oi.DeleteMarker, true, sync
|
||||
return true, sync
|
||||
}
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
return oi.DeleteMarker, oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
return oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
if tgt == nil {
|
||||
return oi.DeleteMarker, false, false
|
||||
return false, false
|
||||
}
|
||||
return oi.DeleteMarker, replicate, tgt.replicateSync
|
||||
return replicate, tgt.replicateSync
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
@@ -697,19 +697,25 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
|
||||
// r takes over closing gr.
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
opts := &bandwidth.MonitorReaderOptions{
|
||||
Bucket: objInfo.Bucket,
|
||||
Object: objInfo.Name,
|
||||
HeaderSize: headerSize,
|
||||
BandwidthBytesPerSec: target.BandwidthLimit / int64(totalNodesCount),
|
||||
ClusterBandwidth: target.BandwidthLimit,
|
||||
}
|
||||
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
defer r.Close()
|
||||
}
|
||||
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
|
||||
@@ -100,7 +100,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
@@ -111,7 +111,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -124,7 +124,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
|
||||
@@ -23,21 +23,23 @@ const (
|
||||
|
||||
// Top level common ENVs
|
||||
const (
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
|
||||
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
@@ -81,8 +81,6 @@ var (
|
||||
|
||||
errInvalidInternalIV = Errorf("The internal encryption IV is malformed")
|
||||
errInvalidInternalSealAlgorithm = Errorf("The internal seal algorithm is invalid and not supported")
|
||||
|
||||
errMissingUpdatedKey = Errorf("The key update returned no error but also no sealed key")
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -181,7 +181,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, nil, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
|
||||
@@ -203,7 +206,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
@@ -33,74 +32,29 @@ import (
|
||||
// associated with a certain object.
|
||||
type Context map[string]string
|
||||
|
||||
// WriteTo writes the context in a canonical from to w.
|
||||
// It returns the number of bytes and the first error
|
||||
// encounter during writing to w, if any.
|
||||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Sort(sortedKeys)
|
||||
// MarshalText returns a canonical text representation of
|
||||
// the Context.
|
||||
|
||||
escape := func(s string) string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
|
||||
EscapeStringJSON(buf, s)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
nn, err := io.WriteString(w, "{")
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
for i, k := range sortedKeys {
|
||||
s := fmt.Sprintf("\"%s\":\"%s\",", escape(k), escape(c[k]))
|
||||
if i == len(sortedKeys)-1 {
|
||||
s = s[:len(s)-1] // remove last ','
|
||||
}
|
||||
|
||||
nn, err = io.WriteString(w, s)
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
}
|
||||
nn, err = io.WriteString(w, "}")
|
||||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// AppendTo appends the context in a canonical from to dst.
|
||||
//
|
||||
// AppendTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
// MarshalText sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object. The sort order
|
||||
// is based on the un-escaped keys.
|
||||
func (c Context) MarshalText() ([]byte, error) {
|
||||
if len(c) == 0 {
|
||||
return append(dst, '{', '}')
|
||||
return []byte{'{', '}'}, nil
|
||||
}
|
||||
|
||||
// out should not escape.
|
||||
out := bytes.NewBuffer(dst)
|
||||
|
||||
// No need to copy+sort
|
||||
// Pre-allocate a buffer - 128 bytes is an arbitrary
|
||||
// heuristic value that seems like a good starting size.
|
||||
var b = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
if len(c) == 1 {
|
||||
for k, v := range c {
|
||||
out.WriteString(`{"`)
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, v)
|
||||
out.WriteString(`"}`)
|
||||
b.WriteString(`{"`)
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, v)
|
||||
b.WriteString(`"}`)
|
||||
}
|
||||
return out.Bytes()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
sortedKeys := make([]string, 0, len(c))
|
||||
@@ -109,19 +63,19 @@ func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
out.WriteByte('{')
|
||||
b.WriteByte('{')
|
||||
for i, k := range sortedKeys {
|
||||
out.WriteByte('"')
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, c[k])
|
||||
out.WriteByte('"')
|
||||
b.WriteByte('"')
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, c[k])
|
||||
b.WriteByte('"')
|
||||
if i < len(sortedKeys)-1 {
|
||||
out.WriteByte(',')
|
||||
b.WriteByte(',')
|
||||
}
|
||||
}
|
||||
out.WriteByte('}')
|
||||
return out.Bytes()
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
@@ -225,9 +179,11 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
}
|
||||
ctxBytes, _ := context.MarshalText()
|
||||
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
mac.Write(context.AppendTo(make([]byte, 0, 128)))
|
||||
mac.Write(ctxBytes)
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -61,7 +60,7 @@ func TestMasterKeyKMS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var contextWriteToTests = []struct {
|
||||
var contextMarshalTextTests = []struct {
|
||||
Context Context
|
||||
ExpectedJSON string
|
||||
}{
|
||||
@@ -76,43 +75,29 @@ var contextWriteToTests = []struct {
|
||||
6: {Context: Context{"a": "<>&"}, ExpectedJSON: `{"a":"\u003c\u003e\u0026"}`},
|
||||
}
|
||||
|
||||
func TestContextWriteTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
var jsonContext strings.Builder
|
||||
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
|
||||
t.Errorf("Test %d: Failed to encode context: %v", i, err)
|
||||
continue
|
||||
func TestContextMarshalText(t *testing.T) {
|
||||
for i, test := range contextMarshalTextTests {
|
||||
text, err := test.Context.MarshalText()
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encode context: %v", i, err)
|
||||
}
|
||||
if s := jsonContext.String(); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
if string(text) != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, string(text), test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextAppendTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
dst := make([]byte, 0, 1024)
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
// Append one more
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContext_AppendTo(b *testing.B) {
|
||||
func BenchmarkContext(b *testing.B) {
|
||||
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%<>&"}}
|
||||
for _, test := range tests {
|
||||
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
|
||||
dst := make([]byte, 0, 1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst = test.AppendTo(dst[:0])
|
||||
_, err := test.MarshalText()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -223,7 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
@@ -258,7 +261,10 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
@@ -282,29 +288,3 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
copy(key[:], plainKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, Errorf("crypto: client error %w", err)
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = []byte(ciphertext.(string))
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
||||
@@ -797,42 +797,39 @@ type actionMeta struct {
|
||||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return res.ObjectSize
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
|
||||
size, err := meta.oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
versionID := meta.oi.VersionID
|
||||
@@ -866,7 +863,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
@@ -878,19 +875,18 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
case ObjectNotFound, VersionNotFound:
|
||||
// object not found or version not found return 0
|
||||
return 0
|
||||
return false, 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
}
|
||||
|
||||
var applied bool
|
||||
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
|
||||
if action != lifecycle.NoneAction {
|
||||
applied = applyLifecycleAction(ctx, action, o, obj)
|
||||
@@ -899,9 +895,26 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
if applied {
|
||||
switch action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
default: // for all lifecycle actions that remove data
|
||||
return 0
|
||||
return true, size
|
||||
}
|
||||
// For all other lifecycle actions that remove data
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, size
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) int64 {
|
||||
applied, size := i.applyLifecycle(ctx, o, meta)
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied && i.heal {
|
||||
size = i.applyHealing(ctx, o, meta)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -522,7 +522,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(r),
|
||||
ObjectOptions{NoLock: true})
|
||||
ObjectOptions{})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -144,12 +144,12 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs
|
||||
if err == errVolumeNotEmpty {
|
||||
// Attempt to delete bucket again.
|
||||
if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty {
|
||||
_ = cleanupDir(ctx, storageDisks[index], bucket, "")
|
||||
_ = storageDisks[index].Delete(ctx, bucket, "", true)
|
||||
|
||||
_ = storageDisks[index].DeleteVol(ctx, bucket, false)
|
||||
|
||||
// Cleanup all the previously incomplete multiparts.
|
||||
_ = cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
_ = storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,8 +170,7 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
err := cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
if err != nil && err != errVolumeNotFound {
|
||||
if err := storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true); err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -108,7 +108,8 @@ func TestErasureDecode(t *testing.T) {
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -234,7 +235,8 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
// 10000 iterations with random offsets and lengths.
|
||||
@@ -304,7 +306,8 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
content := make([]byte, size)
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestErasureEncode(t *testing.T) {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -132,14 +132,14 @@ func TestErasureEncode(t *testing.T) {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
for j := range disks[:test.offDisks] {
|
||||
switch w := writers[j].(type) {
|
||||
case *wholeBitrotWriter:
|
||||
w.disk = badDisk{nil}
|
||||
case *streamingBitrotWriter:
|
||||
w.iow.CloseWithError(errFaultyDisk)
|
||||
w.iow.(*io.PipeWriter).CloseWithError(errFaultyDisk)
|
||||
}
|
||||
}
|
||||
if test.offDisks > 0 {
|
||||
@@ -196,7 +196,8 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
|
||||
continue
|
||||
}
|
||||
disk.Delete(context.Background(), "testbucket", "object", false)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
|
||||
@@ -87,7 +87,8 @@ func TestErasureHeal(t *testing.T) {
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject",
|
||||
erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -130,7 +131,8 @@ func TestErasureHeal(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject",
|
||||
erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
|
||||
// test case setup is complete - now call Heal()
|
||||
|
||||
@@ -422,7 +422,8 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
continue
|
||||
}
|
||||
partPath := pathJoin(tmpID, dataDir, fmt.Sprintf("part.%d", partNumber))
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath,
|
||||
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
err = erasure.Heal(ctx, readers, writers, partSize)
|
||||
closeBitrotReaders(readers)
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -134,7 +135,8 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
|
||||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.LogOnceIf(ctx, err, disks[index].String())
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s returned an error (%w)", disks[index], err),
|
||||
disks[index].String())
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -276,7 +276,6 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
// disks. `uploads.json` carries metadata regarding on-going multipart
|
||||
// operation(s) on the object.
|
||||
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
|
||||
|
||||
onlineDisks := er.getDisks()
|
||||
parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
||||
if parityBlocks <= 0 {
|
||||
@@ -317,7 +316,12 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
// Delete the tmp path later in case we fail to commit (ignore
|
||||
// returned errors) - this will be a no-op in case of a commit
|
||||
// success.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
var partsMetadata = make([]FileInfo, len(onlineDisks))
|
||||
for i := range onlineDisks {
|
||||
@@ -339,6 +343,8 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
// Return success.
|
||||
return uploadID, nil
|
||||
}
|
||||
@@ -441,7 +447,12 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
tmpPartPath := pathJoin(tmpPart, partSuffix)
|
||||
|
||||
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tmpPart, writeQuorum)
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tmpPart, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
@@ -476,7 +487,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath,
|
||||
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
n, err := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
@@ -562,6 +574,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
// Return success.
|
||||
return PartInfo{
|
||||
PartNumber: partID,
|
||||
|
||||
@@ -42,6 +42,15 @@ var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
|
||||
|
||||
/// Object Operations
|
||||
|
||||
func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
|
||||
for _, onlineDisk := range onlineDisks {
|
||||
if onlineDisk != nil && onlineDisk.IsOnline() {
|
||||
online++
|
||||
}
|
||||
}
|
||||
return online
|
||||
}
|
||||
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
@@ -116,8 +125,13 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
|
||||
tempObj := mustGetUUID()
|
||||
|
||||
var online int
|
||||
// Cleanup in case of xl.meta writing failure
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
// Write unique `xl.meta` for each disk.
|
||||
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||
@@ -129,6 +143,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
return fi.ToObjectInfo(srcBucket, srcObject), nil
|
||||
}
|
||||
|
||||
@@ -641,11 +657,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
writeQuorum++
|
||||
}
|
||||
|
||||
// Delete temporary object in the event of failure.
|
||||
// If PutObject succeeded there would be no temporary
|
||||
// object to delete.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
||||
@@ -713,12 +724,23 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
partName := "part.1"
|
||||
tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName)
|
||||
|
||||
// Delete temporary object in the event of failure.
|
||||
// If PutObject succeeded there would be no temporary
|
||||
// object to delete.
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
writers := make([]io.Writer, len(onlineDisks))
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj,
|
||||
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
@@ -805,6 +827,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
break
|
||||
}
|
||||
}
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
return fi.ToObjectInfo(bucket, object), nil
|
||||
}
|
||||
@@ -858,8 +881,8 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
|
||||
var err error
|
||||
defer ObjectPathUpdated(pathJoin(bucket, object))
|
||||
|
||||
tmpObj := mustGetUUID()
|
||||
disks := er.getDisks()
|
||||
tmpObj := mustGetUUID()
|
||||
if bucket == minioMetaTmpBucket {
|
||||
tmpObj = object
|
||||
} else {
|
||||
@@ -880,7 +903,7 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
}
|
||||
return cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj)
|
||||
return disks[index].Delete(ctx, minioMetaTmpBucket, tmpObj, true)
|
||||
}, index)
|
||||
}
|
||||
|
||||
|
||||
@@ -1563,10 +1563,15 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
|
||||
bucket: bucket,
|
||||
}
|
||||
|
||||
path := baseDirFromPrefix(prefix)
|
||||
if path == "" {
|
||||
path = prefix
|
||||
}
|
||||
|
||||
if err := listPathRaw(ctx, listPathRawOptions{
|
||||
disks: disks,
|
||||
bucket: bucket,
|
||||
path: baseDirFromPrefix(prefix),
|
||||
path: path,
|
||||
recursive: true,
|
||||
forwardTo: "",
|
||||
minDisks: 1,
|
||||
@@ -1618,7 +1623,7 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics - no op
|
||||
// GetMetrics - returns metrics of local disks
|
||||
func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, error) {
|
||||
logger.LogIf(ctx, NotImplemented{})
|
||||
return &BackendMetrics{}, NotImplemented{}
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/minio/minio/pkg/bpool"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
@@ -44,10 +45,7 @@ import (
|
||||
// setsDsyncLockers is encapsulated type for Close()
|
||||
type setsDsyncLockers [][]dsync.NetLocker
|
||||
|
||||
// Information of a new disk connection
|
||||
type diskConnectInfo struct {
|
||||
setIndex int
|
||||
}
|
||||
const envMinioDeleteCleanupInterval = "MINIO_DELETE_CLEANUP_INTERVAL"
|
||||
|
||||
// erasureSets implements ObjectLayer combining a static list of erasure coded
|
||||
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
||||
@@ -86,7 +84,9 @@ type erasureSets struct {
|
||||
|
||||
poolIndex int
|
||||
|
||||
disksConnectEvent chan diskConnectInfo
|
||||
// A channel to send the set index to the MRF when
|
||||
// any disk belonging to that set is connected
|
||||
setReconnectEvent chan int
|
||||
|
||||
// Distribution algorithm of choice.
|
||||
distributionAlgo string
|
||||
@@ -196,6 +196,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
||||
// and re-arranges the disks in proper position.
|
||||
func (s *erasureSets) connectDisks() {
|
||||
var wg sync.WaitGroup
|
||||
var setsJustConnected = make([]bool, s.setCount)
|
||||
diskMap := s.getDiskMap()
|
||||
for _, endpoint := range s.endpoints {
|
||||
diskPath := endpoint.String()
|
||||
@@ -249,20 +250,30 @@ func (s *erasureSets) connectDisks() {
|
||||
}
|
||||
disk.SetDiskLoc(s.poolIndex, setIndex, diskIndex)
|
||||
s.endpointStrings[setIndex*s.setDriveCount+diskIndex] = disk.String()
|
||||
setsJustConnected[setIndex] = true
|
||||
s.erasureDisksMu.Unlock()
|
||||
go func(setIndex int) {
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
// Send a new disk connect event with a timeout
|
||||
select {
|
||||
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
|
||||
case <-idler.C:
|
||||
}
|
||||
}(setIndex)
|
||||
}(endpoint)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
go func() {
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
for setIndex, justConnected := range setsJustConnected {
|
||||
if !justConnected {
|
||||
continue
|
||||
}
|
||||
|
||||
// Send a new set connect event with a timeout
|
||||
idler.Reset(100 * time.Millisecond)
|
||||
select {
|
||||
case s.setReconnectEvent <- setIndex:
|
||||
case <-idler.C:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
|
||||
@@ -351,7 +362,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
setDriveCount: setDriveCount,
|
||||
defaultParityCount: defaultParityCount,
|
||||
format: format,
|
||||
disksConnectEvent: make(chan diskConnectInfo),
|
||||
setReconnectEvent: make(chan int),
|
||||
distributionAlgo: format.Erasure.DistributionAlgo,
|
||||
deploymentID: uuid.MustParse(format.ID),
|
||||
mrfOperations: make(map[healSource]int),
|
||||
@@ -413,15 +424,20 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 10*time.Second),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup ".trash/" folder every 30 minutes with sufficient sleep cycles.
|
||||
const deletedObjectsCleanupInterval = 10 * time.Minute
|
||||
// cleanup ".trash/" folder every 5m minutes with sufficient sleep cycles, between each
|
||||
// deletes a dynamic sleeper is used with a factor of 10 ratio with max delay between
|
||||
// deletes to be 2 seconds.
|
||||
deletedObjectsCleanupInterval, err := time.ParseDuration(env.Get(envMinioDeleteCleanupInterval, "5m"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start cleanup stale uploads go-routine.
|
||||
go s.cleanupStaleUploads(ctx, GlobalStaleUploadsCleanupInterval, GlobalStaleUploadsExpiry)
|
||||
@@ -653,12 +669,12 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
select {
|
||||
case _, ok := <-s.disksConnectEvent:
|
||||
case _, ok := <-s.setReconnectEvent:
|
||||
if ok {
|
||||
close(s.disksConnectEvent)
|
||||
close(s.setReconnectEvent)
|
||||
}
|
||||
default:
|
||||
close(s.disksConnectEvent)
|
||||
close(s.setReconnectEvent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1347,47 +1363,25 @@ func (s *erasureSets) maintainMRFList() {
|
||||
bucket: fOp.bucket,
|
||||
object: fOp.object,
|
||||
versionID: fOp.versionID,
|
||||
opts: &madmin.HealOpts{Remove: true},
|
||||
}] = fOp.failedSet
|
||||
s.mrfMU.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func toSourceChTimed(t *time.Timer, sourceCh chan healSource, u healSource) {
|
||||
t.Reset(100 * time.Millisecond)
|
||||
|
||||
// No defer, as we don't know which
|
||||
// case will be selected
|
||||
|
||||
select {
|
||||
case sourceCh <- u:
|
||||
case <-t.C:
|
||||
return
|
||||
}
|
||||
|
||||
// We still need to check the return value
|
||||
// of Stop, because t could have fired
|
||||
// between the send on sourceCh and this line.
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
}
|
||||
|
||||
// healMRFRoutine monitors new disks connection, sweep the MRF list
|
||||
// to find objects related to the new disk that needs to be healed.
|
||||
func (s *erasureSets) healMRFRoutine() {
|
||||
// Wait until background heal state is initialized
|
||||
bgSeq := mustGetHealSequence(GlobalContext)
|
||||
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
for e := range s.disksConnectEvent {
|
||||
for setIndex := range s.setReconnectEvent {
|
||||
// Get the list of objects related the er.set
|
||||
// to which the connected disk belongs.
|
||||
var mrfOperations []healSource
|
||||
s.mrfMU.Lock()
|
||||
for k, v := range s.mrfOperations {
|
||||
if v == e.setIndex {
|
||||
if v == setIndex {
|
||||
mrfOperations = append(mrfOperations, k)
|
||||
}
|
||||
}
|
||||
@@ -1395,8 +1389,10 @@ func (s *erasureSets) healMRFRoutine() {
|
||||
|
||||
// Heal objects
|
||||
for _, u := range mrfOperations {
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
|
||||
// Send an object to background heal
|
||||
toSourceChTimed(idler, bgSeq.sourceCh, u)
|
||||
bgSeq.sourceCh <- u
|
||||
|
||||
s.mrfMU.Lock()
|
||||
delete(s.mrfOperations, u)
|
||||
|
||||
@@ -204,6 +204,16 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
|
||||
di.HealInfo = &hd
|
||||
}
|
||||
}
|
||||
di.Metrics = &madmin.DiskMetrics{
|
||||
APILatencies: make(map[string]string),
|
||||
APICalls: make(map[string]uint64),
|
||||
}
|
||||
for k, v := range info.Metrics.APILatencies {
|
||||
di.Metrics.APILatencies[k] = v
|
||||
}
|
||||
for k, v := range info.Metrics.APICalls {
|
||||
di.Metrics.APICalls[k] = v
|
||||
}
|
||||
if info.Total > 0 {
|
||||
di.Utilization = float64(info.Used / info.Total * 100)
|
||||
}
|
||||
|
||||
@@ -233,10 +233,15 @@ func extractReqParams(r *http.Request) map[string]string {
|
||||
region := globalServerRegion
|
||||
cred := getReqAccessCred(r, region)
|
||||
|
||||
principalID := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
principalID = cred.ParentUser
|
||||
}
|
||||
|
||||
// Success.
|
||||
m := map[string]string{
|
||||
"region": region,
|
||||
"accessKey": cred.AccessKey,
|
||||
"principalId": principalID,
|
||||
"sourceIPAddress": handlers.GetSourceIP(r),
|
||||
// Add more fields here.
|
||||
}
|
||||
|
||||
@@ -141,6 +141,7 @@ type HTTPStats struct {
|
||||
currentS3Requests HTTPAPIStats
|
||||
totalS3Requests HTTPAPIStats
|
||||
totalS3Errors HTTPAPIStats
|
||||
totalS3Canceled HTTPAPIStats
|
||||
}
|
||||
|
||||
func (st *HTTPStats) addRequestsInQueue(i int32) {
|
||||
@@ -160,6 +161,9 @@ func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
|
||||
serverStats.TotalS3Errors = ServerHTTPAPIStats{
|
||||
APIStats: st.totalS3Errors.Load(),
|
||||
}
|
||||
serverStats.TotalS3Canceled = ServerHTTPAPIStats{
|
||||
APIStats: st.totalS3Canceled.Load(),
|
||||
}
|
||||
return serverStats
|
||||
}
|
||||
|
||||
@@ -172,8 +176,15 @@ func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.Response
|
||||
!strings.HasSuffix(r.URL.Path, prometheusMetricsV2ClusterPath) ||
|
||||
!strings.HasSuffix(r.URL.Path, prometheusMetricsV2NodePath) {
|
||||
st.totalS3Requests.Inc(api)
|
||||
if !successReq && w.StatusCode != 0 {
|
||||
st.totalS3Errors.Inc(api)
|
||||
if !successReq {
|
||||
switch w.StatusCode {
|
||||
case 0:
|
||||
case 499:
|
||||
// 499 is a good error, shall be counted at canceled.
|
||||
st.totalS3Canceled.Inc(api)
|
||||
default:
|
||||
st.totalS3Errors.Inc(api)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
122
cmd/iam.go
122
cmd/iam.go
@@ -577,15 +577,6 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Hold the lock for migration only.
|
||||
txnLk := objAPI.NewNSLock(minioMetaBucket, minioConfigPrefix+"/iam.lock")
|
||||
|
||||
// Initializing IAM sub-system needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed, migration is needed etc.
|
||||
rquorum := InsufficientReadQuorum{}
|
||||
wquorum := InsufficientWriteQuorum{}
|
||||
|
||||
// allocate dynamic timeout once before the loop
|
||||
iamLockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
|
||||
|
||||
@@ -620,12 +611,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Migrate IAM configuration, if necessary.
|
||||
if err := sys.doIAMConfigMigration(ctx); err != nil {
|
||||
txnLk.Unlock()
|
||||
if errors.Is(err, errDiskNotFound) ||
|
||||
errors.Is(err, errConfigNotFound) ||
|
||||
errors.Is(err, context.DeadlineExceeded) ||
|
||||
errors.As(err, &rquorum) ||
|
||||
errors.As(err, &wquorum) ||
|
||||
isErrBucketNotFound(err) {
|
||||
if configRetriableErrors(err) {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err)
|
||||
continue
|
||||
}
|
||||
@@ -641,12 +627,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
for {
|
||||
if err := sys.store.loadAll(ctx, sys); err != nil {
|
||||
if errors.Is(err, errDiskNotFound) ||
|
||||
errors.Is(err, errConfigNotFound) ||
|
||||
errors.Is(err, context.DeadlineExceeded) ||
|
||||
errors.As(err, &rquorum) ||
|
||||
errors.As(err, &wquorum) ||
|
||||
isErrBucketNotFound(err) {
|
||||
if configRetriableErrors(err) {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err)
|
||||
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
|
||||
continue
|
||||
@@ -1621,6 +1602,10 @@ func (sys *IAMSys) PolicyDBSet(name, policy string, isGroup bool) error {
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
|
||||
if sys.usersSysType == LDAPUsersSysType {
|
||||
return sys.policyDBSet(name, policy, stsUser, isGroup)
|
||||
}
|
||||
|
||||
return sys.policyDBSet(name, policy, regularUser, isGroup)
|
||||
}
|
||||
|
||||
@@ -1676,10 +1661,9 @@ func (sys *IAMSys) policyDBSet(name, policyName string, userType IAMUserType, is
|
||||
return nil
|
||||
}
|
||||
|
||||
// PolicyDBGet - gets policy set on a user or group. Since a user may
|
||||
// be a member of multiple groups, this function returns an array of
|
||||
// applicable policies
|
||||
func (sys *IAMSys) PolicyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
// PolicyDBGet - gets policy set on a user or group. If a list of groups is
|
||||
// given, policies associated with them are included as well.
|
||||
func (sys *IAMSys) PolicyDBGet(name string, isGroup bool, groups ...string) ([]string, error) {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -1691,40 +1675,68 @@ func (sys *IAMSys) PolicyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
sys.store.rlock()
|
||||
defer sys.store.runlock()
|
||||
|
||||
return sys.policyDBGet(name, isGroup)
|
||||
policies, err := sys.policyDBGet(name, isGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isGroup {
|
||||
for _, group := range groups {
|
||||
ps, err := sys.policyDBGet(group, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
policies = append(policies, ps...)
|
||||
}
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
// This call assumes that caller has the sys.RLock()
|
||||
func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
// This call assumes that caller has the sys.RLock().
|
||||
//
|
||||
// If a group is passed, it returns policies associated with the group.
|
||||
//
|
||||
// If a user is passed, it returns policies of the user along with any groups
|
||||
// that the server knows the user is a member of.
|
||||
//
|
||||
// In LDAP users mode, the server does not store any group membership
|
||||
// information in IAM (i.e sys.iam*Map) - this info is stored only in the STS
|
||||
// generated credentials. Thus we skip looking up group memberships, user map,
|
||||
// and group map and check the appropriate policy maps directly.
|
||||
func (sys *IAMSys) policyDBGet(name string, isGroup bool) (policies []string, err error) {
|
||||
if isGroup {
|
||||
g, ok := sys.iamGroupsMap[name]
|
||||
if !ok {
|
||||
return nil, errNoSuchGroup
|
||||
if sys.usersSysType == MinIOUsersSysType {
|
||||
g, ok := sys.iamGroupsMap[name]
|
||||
if !ok {
|
||||
return nil, errNoSuchGroup
|
||||
}
|
||||
|
||||
// Group is disabled, so we return no policy - this
|
||||
// ensures the request is denied.
|
||||
if g.Status == statusDisabled {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Group is disabled, so we return no policy - this
|
||||
// ensures the request is denied.
|
||||
if g.Status == statusDisabled {
|
||||
return sys.iamGroupPolicyMap[name].toSlice(), nil
|
||||
}
|
||||
|
||||
var u auth.Credentials
|
||||
var ok bool
|
||||
if sys.usersSysType == MinIOUsersSysType {
|
||||
// When looking for a user's policies, we also check if the user
|
||||
// and the groups they are member of are enabled.
|
||||
|
||||
u, ok = sys.iamUsersMap[name]
|
||||
if !ok {
|
||||
return nil, errNoSuchUser
|
||||
}
|
||||
if !u.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
mp := sys.iamGroupPolicyMap[name]
|
||||
return mp.toSlice(), nil
|
||||
}
|
||||
|
||||
// When looking for a user's policies, we also check if the
|
||||
// user and the groups they are member of are enabled.
|
||||
u, ok := sys.iamUsersMap[name]
|
||||
if !ok {
|
||||
return nil, errNoSuchUser
|
||||
}
|
||||
|
||||
if !u.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var policies []string
|
||||
|
||||
mp, ok := sys.iamUserPolicyMap[name]
|
||||
if !ok {
|
||||
if u.ParentUser != "" {
|
||||
@@ -1742,8 +1754,7 @@ func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
p := sys.iamGroupPolicyMap[group]
|
||||
policies = append(policies, p.toSlice()...)
|
||||
policies = append(policies, sys.iamGroupPolicyMap[group].toSlice()...)
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
@@ -1773,8 +1784,9 @@ func (sys *IAMSys) IsAllowedServiceAccount(args iampolicy.Args, parent string) b
|
||||
}
|
||||
|
||||
// Check policy for this service account.
|
||||
svcPolicies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
svcPolicies, err := sys.PolicyDBGet(parent, false, args.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1872,7 +1884,7 @@ func (sys *IAMSys) IsAllowedLDAPSTS(args iampolicy.Args, parentUser string) bool
|
||||
}
|
||||
|
||||
// Check policy for this LDAP user.
|
||||
ldapPolicies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
ldapPolicies, err := sys.PolicyDBGet(parentUser, false, args.Groups...)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -2057,7 +2069,7 @@ func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool {
|
||||
}
|
||||
|
||||
// Continue with the assumption of a regular user
|
||||
policies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
policies, err := sys.PolicyDBGet(args.AccountName, false, args.Groups...)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -81,6 +81,15 @@ type MapClaims struct {
|
||||
jwtgo.MapClaims
|
||||
}
|
||||
|
||||
// GetAccessKey will return the access key.
|
||||
// If nil an empty string will be returned.
|
||||
func (c *MapClaims) GetAccessKey() string {
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.AccessKey
|
||||
}
|
||||
|
||||
// NewStandardClaims - initializes standard claims
|
||||
func NewStandardClaims() *StandardClaims {
|
||||
return &StandardClaims{}
|
||||
|
||||
@@ -49,7 +49,7 @@ type ResponseWriter struct {
|
||||
}
|
||||
|
||||
// NewResponseWriter - returns a wrapped response writer to trap
|
||||
// http status codes for auditiing purposes.
|
||||
// http status codes for auditing purposes.
|
||||
func NewResponseWriter(w http.ResponseWriter) *ResponseWriter {
|
||||
return &ResponseWriter{
|
||||
ResponseWriter: w,
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -57,11 +56,6 @@ type WalkDirOptions struct {
|
||||
// WalkDir will traverse a directory and return all entries found.
|
||||
// On success a sorted meta cache stream will be returned.
|
||||
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolDir(opts.Bucket)
|
||||
if err != nil {
|
||||
@@ -266,6 +260,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error {
|
||||
defer p.updateStorageMetrics(storageMetricWalkDir)()
|
||||
if err := p.checkDiskStale(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ type MetricName string
|
||||
const (
|
||||
total MetricName = "total"
|
||||
errorsTotal MetricName = "error_total"
|
||||
canceledTotal MetricName = "canceled_total"
|
||||
healTotal MetricName = "heal_total"
|
||||
hitsTotal MetricName = "hits_total"
|
||||
inflightTotal MetricName = "inflight_total"
|
||||
@@ -106,18 +107,19 @@ const (
|
||||
versionInfo MetricName = "version_info"
|
||||
|
||||
sizeDistribution = "size_distribution"
|
||||
ttfbDistribution = "ttbf_seconds_distribution"
|
||||
ttfbDistribution = "ttfb_seconds_distribution"
|
||||
|
||||
lastActivityTime = "last_activity_nano_seconds"
|
||||
startTime = "starttime_seconds"
|
||||
upTime = "uptime_seconds"
|
||||
)
|
||||
|
||||
const (
|
||||
serverName = "server"
|
||||
)
|
||||
|
||||
// GaugeMetricType for the types of metrics supported
|
||||
type GaugeMetricType string
|
||||
// MetricType for the types of metrics supported
|
||||
type MetricType string
|
||||
|
||||
const (
|
||||
gaugeMetric = "gaugeMetric"
|
||||
@@ -131,7 +133,7 @@ type MetricDescription struct {
|
||||
Subsystem MetricSubsystem `json:"Subsystem"`
|
||||
Name MetricName `json:"MetricName"`
|
||||
Help string `json:"Help"`
|
||||
Type GaugeMetricType `json:"Type"`
|
||||
Type MetricType `json:"Type"`
|
||||
}
|
||||
|
||||
// Metric captures the details for a metric
|
||||
@@ -144,10 +146,66 @@ type Metric struct {
|
||||
Histogram map[string]uint64 `json:"Histogram"`
|
||||
}
|
||||
|
||||
func (m *Metric) copyMetric() Metric {
|
||||
metric := Metric{
|
||||
Description: m.Description,
|
||||
Value: m.Value,
|
||||
HistogramBucketLabel: m.HistogramBucketLabel,
|
||||
StaticLabels: make(map[string]string),
|
||||
VariableLabels: make(map[string]string),
|
||||
Histogram: make(map[string]uint64),
|
||||
}
|
||||
for k, v := range m.StaticLabels {
|
||||
metric.StaticLabels[k] = v
|
||||
}
|
||||
for k, v := range m.VariableLabels {
|
||||
metric.VariableLabels[k] = v
|
||||
}
|
||||
for k, v := range m.Histogram {
|
||||
metric.Histogram[k] = v
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// MetricsGroup are a group of metrics that are initialized together.
|
||||
type MetricsGroup struct {
|
||||
Metrics []Metric
|
||||
initialize func(ctx context.Context, m *MetricsGroup)
|
||||
id string
|
||||
cacheInterval time.Duration
|
||||
cachedRead func(ctx context.Context, mg *MetricsGroup) []Metric
|
||||
read func(ctx context.Context) []Metric
|
||||
}
|
||||
|
||||
var metricsGroupCache = make(map[string]*timedValue)
|
||||
var cacheLock sync.Mutex
|
||||
|
||||
func cachedRead(ctx context.Context, mg *MetricsGroup) (metrics []Metric) {
|
||||
cacheLock.Lock()
|
||||
defer cacheLock.Unlock()
|
||||
v, ok := metricsGroupCache[mg.id]
|
||||
if !ok {
|
||||
interval := mg.cacheInterval
|
||||
if interval == 0 {
|
||||
interval = 30 * time.Second
|
||||
}
|
||||
v = &timedValue{}
|
||||
v.Once.Do(func() {
|
||||
v.Update = func() (interface{}, error) {
|
||||
c := mg.read(ctx)
|
||||
return c, nil
|
||||
}
|
||||
v.TTL = interval
|
||||
})
|
||||
metricsGroupCache[mg.id] = v
|
||||
}
|
||||
c, err := v.Get()
|
||||
if err != nil {
|
||||
return []Metric{}
|
||||
}
|
||||
m := c.([]Metric)
|
||||
for i := range m {
|
||||
metrics = append(metrics, m[i].copyMetric())
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
// MetricsGenerator are functions that generate metric groups.
|
||||
@@ -417,7 +475,7 @@ func getS3RequestsInQueueMD() MetricDescription {
|
||||
Subsystem: requestsSubsystem,
|
||||
Name: waitingTotal,
|
||||
Help: "Number of S3 requests in the waiting queue",
|
||||
Type: counterMetric,
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getS3RequestsTotalMD() MetricDescription {
|
||||
@@ -438,6 +496,15 @@ func getS3RequestsErrorsMD() MetricDescription {
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getS3RequestsCanceledMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: s3MetricNamespace,
|
||||
Subsystem: requestsSubsystem,
|
||||
Name: canceledTotal,
|
||||
Help: "Total number S3 requests that were canceled from the client while processing",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getCacheHitsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: minioNamespace,
|
||||
@@ -519,6 +586,7 @@ func getHealObjectsHealTotalMD() MetricDescription {
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func getHealObjectsFailTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: healMetricNamespace,
|
||||
@@ -668,14 +736,28 @@ func getMinIOProcessStartTimeMD() MetricDescription {
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: processSubsystem,
|
||||
Name: startTime,
|
||||
Help: "Start time for MinIO process per node in seconds.",
|
||||
Help: "Start time for MinIO process per node, time in seconds since Unix epoc.",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getMinIOProcessUptimeMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: processSubsystem,
|
||||
Name: upTime,
|
||||
Help: "Uptime for MinIO process per node in seconds.",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getMinioProcMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "MinioProcMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return nil
|
||||
}
|
||||
metrics = make([]Metric, 0, 20)
|
||||
p, err := procfs.Self()
|
||||
if err != nil {
|
||||
logger.LogOnceIf(ctx, err, nodeMetricNamespace)
|
||||
@@ -708,70 +790,79 @@ func getMinioProcMetrics() MetricsGroup {
|
||||
return
|
||||
}
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioFDOpenMD(),
|
||||
Value: float64(openFDs),
|
||||
},
|
||||
)
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioFDLimitMD(),
|
||||
Value: float64(l.OpenFiles),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinIOProcessSysCallRMD(),
|
||||
Value: float64(io.SyscR),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinIOProcessSysCallWMD(),
|
||||
Value: float64(io.SyscW),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioProcessIOReadBytesMD(),
|
||||
Value: float64(io.ReadBytes),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioProcessIOWriteBytesMD(),
|
||||
Value: float64(io.WriteBytes),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioProcessIOReadCachedBytesMD(),
|
||||
Value: float64(io.RChar),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinioProcessIOWriteCachedBytesMD(),
|
||||
Value: float64(io.WChar),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics,
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinIOProcessStartTimeMD(),
|
||||
Value: startTime,
|
||||
})
|
||||
metrics = append(metrics,
|
||||
Metric{
|
||||
Description: getMinIOProcessUptimeMD(),
|
||||
Value: time.Since(globalBootTime).Seconds(),
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
func getGoMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
id: "GoMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getMinIOGORoutineCountMD(),
|
||||
Value: float64(runtime.NumGoroutine()),
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
func getS3TTFBMetric() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "s3TTFBMetric",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
|
||||
// Read prometheus metric on this channel
|
||||
ch := make(chan prometheus.Metric)
|
||||
@@ -800,7 +891,7 @@ func getS3TTFBMetric() MetricsGroup {
|
||||
VariableLabels: labels,
|
||||
Value: float64(b.GetCumulativeCount()),
|
||||
}
|
||||
metrics.Metrics = append(metrics.Metrics, metric)
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -809,53 +900,54 @@ func getS3TTFBMetric() MetricsGroup {
|
||||
httpRequestsDuration.Collect(ch)
|
||||
close(ch)
|
||||
wg.Wait()
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinioVersionMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(_ context.Context, m *MetricsGroup) {
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
id: "MinioVersionMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(_ context.Context) (metrics []Metric) {
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getMinIOCommitMD(),
|
||||
VariableLabels: map[string]string{"commit": CommitID},
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getMinIOVersionMD(),
|
||||
VariableLabels: map[string]string{"version": Version},
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeHealthMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{
|
||||
{
|
||||
Description: getNodeOnlineTotalMD(),
|
||||
}, {
|
||||
Description: getNodeOfflineTotalMD(),
|
||||
},
|
||||
},
|
||||
initialize: func(_ context.Context, m *MetricsGroup) {
|
||||
id: "NodeHealthMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(_ context.Context) (metrics []Metric) {
|
||||
nodesUp, nodesDown := GetPeerOnlineCount()
|
||||
for i := range m.Metrics {
|
||||
switch {
|
||||
case m.Metrics[i].Description.Name == onlineTotal:
|
||||
m.Metrics[i].Value = float64(nodesUp)
|
||||
case m.Metrics[i].Description.Name == offlineTotal:
|
||||
m.Metrics[i].Value = float64(nodesDown)
|
||||
}
|
||||
}
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getNodeOnlineTotalMD(),
|
||||
Value: float64(nodesUp),
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getNodeOfflineTotalMD(),
|
||||
Value: float64(nodesDown),
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinioHealingMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(_ context.Context, m *MetricsGroup) {
|
||||
id: "minioHealingMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(_ context.Context) (metrics []Metric) {
|
||||
metrics = make([]Metric, 0, 5)
|
||||
if !globalIsErasure {
|
||||
return
|
||||
}
|
||||
@@ -867,19 +959,20 @@ func getMinioHealingMetrics() MetricsGroup {
|
||||
if !bgSeq.lastHealActivity.IsZero() {
|
||||
dur = time.Since(bgSeq.lastHealActivity)
|
||||
}
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getHealLastActivityTimeMD(),
|
||||
Value: float64(dur),
|
||||
})
|
||||
m.Metrics = append(m.Metrics, getObjectsScanned(bgSeq)...)
|
||||
m.Metrics = append(m.Metrics, getScannedItems(bgSeq)...)
|
||||
m.Metrics = append(m.Metrics, getFailedItems(bgSeq)...)
|
||||
metrics = append(metrics, getObjectsScanned(bgSeq)...)
|
||||
metrics = append(metrics, getScannedItems(bgSeq)...)
|
||||
metrics = append(metrics, getFailedItems(bgSeq)...)
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFailedItems(seq *healSequence) (m []Metric) {
|
||||
m = make([]Metric, 0)
|
||||
m = make([]Metric, 0, 1)
|
||||
for k, v := range seq.gethealFailedItemsMap() {
|
||||
s := strings.Split(k, ",")
|
||||
m = append(m, Metric{
|
||||
@@ -895,8 +988,9 @@ func getFailedItems(seq *healSequence) (m []Metric) {
|
||||
}
|
||||
|
||||
func getScannedItems(seq *healSequence) (m []Metric) {
|
||||
m = make([]Metric, 0)
|
||||
for k, v := range seq.getHealedItemsMap() {
|
||||
items := seq.getHealedItemsMap()
|
||||
m = make([]Metric, 0, len(items))
|
||||
for k, v := range items {
|
||||
m = append(m, Metric{
|
||||
Description: getHealObjectsHealTotalMD(),
|
||||
VariableLabels: map[string]string{"type": string(k)},
|
||||
@@ -907,7 +1001,8 @@ func getScannedItems(seq *healSequence) (m []Metric) {
|
||||
}
|
||||
|
||||
func getObjectsScanned(seq *healSequence) (m []Metric) {
|
||||
m = make([]Metric, 0)
|
||||
items := seq.getHealedItemsMap()
|
||||
m = make([]Metric, 0, len(items))
|
||||
for k, v := range seq.getScannedItemsMap() {
|
||||
m = append(m, Metric{
|
||||
Description: getHealObjectsTotalMD(),
|
||||
@@ -919,118 +1014,139 @@ func getObjectsScanned(seq *healSequence) (m []Metric) {
|
||||
}
|
||||
func getCacheMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, m *MetricsGroup) {
|
||||
id: "CacheMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
metrics = make([]Metric, 0, 20)
|
||||
cacheObjLayer := newCachedObjectLayerFn()
|
||||
// Service not initialized yet
|
||||
if cacheObjLayer == nil {
|
||||
return
|
||||
}
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheHitsTotalMD(),
|
||||
Value: float64(cacheObjLayer.CacheStats().getHits()),
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheHitsMissedTotalMD(),
|
||||
Value: float64(cacheObjLayer.CacheStats().getMisses()),
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheSentBytesMD(),
|
||||
Value: float64(cacheObjLayer.CacheStats().getBytesServed()),
|
||||
})
|
||||
for _, cdStats := range cacheObjLayer.CacheStats().GetDiskStats() {
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheUsagePercentMD(),
|
||||
Value: float64(cdStats.UsagePercent),
|
||||
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheUsageInfoMD(),
|
||||
Value: float64(cdStats.UsageState),
|
||||
VariableLabels: map[string]string{"disk": cdStats.Dir, "level": cdStats.GetUsageLevelString()},
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheUsedBytesMD(),
|
||||
Value: float64(cdStats.UsageSize),
|
||||
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
||||
})
|
||||
m.Metrics = append(m.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getCacheTotalBytesMD(),
|
||||
Value: float64(cdStats.TotalCapacity),
|
||||
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
||||
})
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getHTTPMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "httpMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
httpStats := globalHTTPStats.toServerHTTPStats()
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = make([]Metric, 0, 3+
|
||||
len(httpStats.CurrentS3Requests.APIStats)+
|
||||
len(httpStats.TotalS3Requests.APIStats)+
|
||||
len(httpStats.TotalS3Errors.APIStats))
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsInQueueMD(),
|
||||
Value: float64(httpStats.S3RequestsInQueue),
|
||||
})
|
||||
for api, value := range httpStats.CurrentS3Requests.APIStats {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsInFlightMD(),
|
||||
Value: float64(value),
|
||||
VariableLabels: map[string]string{"api": api},
|
||||
})
|
||||
}
|
||||
for api, value := range httpStats.TotalS3Requests.APIStats {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsTotalMD(),
|
||||
Value: float64(value),
|
||||
VariableLabels: map[string]string{"api": api},
|
||||
})
|
||||
}
|
||||
for api, value := range httpStats.TotalS3Errors.APIStats {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsErrorsMD(),
|
||||
Value: float64(value),
|
||||
VariableLabels: map[string]string{"api": api},
|
||||
})
|
||||
}
|
||||
for api, value := range httpStats.TotalS3Canceled.APIStats {
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsCanceledMD(),
|
||||
Value: float64(value),
|
||||
VariableLabels: map[string]string{"api": api},
|
||||
})
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getNetworkMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
id: "networkMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
metrics = make([]Metric, 0, 10)
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getInternodeFailedRequests(),
|
||||
Value: float64(loadAndResetRPCNetworkErrsCounter()),
|
||||
})
|
||||
connStats := globalConnStats.toServerConnStats()
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getInterNodeSentBytesMD(),
|
||||
Value: float64(connStats.TotalOutputBytes),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getInterNodeReceivedBytesMD(),
|
||||
Value: float64(connStats.TotalInputBytes),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3SentBytesMD(),
|
||||
Value: float64(connStats.S3OutputBytes),
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3ReceivedBytesMD(),
|
||||
Value: float64(connStats.S3InputBytes),
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getBucketUsageMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "BucketUsageMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
metrics = make([]Metric, 0, 50)
|
||||
objLayer := newObjectLayerFn()
|
||||
// Service not initialized yet
|
||||
if objLayer == nil {
|
||||
@@ -1052,42 +1168,42 @@ func getBucketUsageMetrics() MetricsGroup {
|
||||
}
|
||||
|
||||
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketUsageTotalBytesMD(),
|
||||
Value: float64(usage.Size),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketUsageObjectsTotalMD(),
|
||||
Value: float64(usage.ObjectsCount),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
|
||||
if usage.hasReplicationUsage() {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepPendingBytesMD(),
|
||||
Value: float64(usage.ReplicationPendingSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepFailedBytesMD(),
|
||||
Value: float64(usage.ReplicationFailedSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepSentBytesMD(),
|
||||
Value: float64(usage.ReplicatedSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepReceivedBytesMD(),
|
||||
Value: float64(usage.ReplicaSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
}
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketObjectDistributionMD(),
|
||||
Histogram: usage.ObjectSizesHistogram,
|
||||
HistogramBucketLabel: "range",
|
||||
@@ -1095,13 +1211,15 @@ func getBucketUsageMetrics() MetricsGroup {
|
||||
})
|
||||
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
func getLocalStorageMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "localStorageMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
objLayer := newObjectLayerFn()
|
||||
// Service not initialized yet
|
||||
if objLayer == nil {
|
||||
@@ -1112,33 +1230,36 @@ func getLocalStorageMetrics() MetricsGroup {
|
||||
return
|
||||
}
|
||||
|
||||
metrics = make([]Metric, 0, 50)
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(ctx)
|
||||
for _, disk := range storageInfo.Disks {
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getNodeDiskUsedBytesMD(),
|
||||
Value: float64(disk.UsedSpace),
|
||||
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getNodeDiskFreeBytesMD(),
|
||||
Value: float64(disk.AvailableSpace),
|
||||
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getNodeDiskTotalBytesMD(),
|
||||
Value: float64(disk.TotalSpace),
|
||||
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
||||
})
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
func getClusterStorageMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
Metrics: []Metric{},
|
||||
initialize: func(ctx context.Context, metrics *MetricsGroup) {
|
||||
id: "ClusterStorageMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(ctx context.Context) (metrics []Metric) {
|
||||
objLayer := newObjectLayerFn()
|
||||
// Service not initialized yet
|
||||
if objLayer == nil {
|
||||
@@ -1150,44 +1271,46 @@ func getClusterStorageMetrics() MetricsGroup {
|
||||
}
|
||||
|
||||
// Fetch disk space info, ignore errors
|
||||
metrics = make([]Metric, 0, 10)
|
||||
storageInfo, _ := objLayer.StorageInfo(ctx)
|
||||
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
|
||||
totalDisks := onlineDisks.Merge(offlineDisks)
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterCapacityTotalBytesMD(),
|
||||
Value: float64(GetTotalCapacity(storageInfo.Disks)),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterCapacityFreeBytesMD(),
|
||||
Value: float64(GetTotalCapacityFree(storageInfo.Disks)),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterCapacityUsageBytesMD(),
|
||||
Value: GetTotalUsableCapacity(storageInfo.Disks, storageInfo),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterCapacityUsageFreeBytesMD(),
|
||||
Value: GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterDisksOfflineTotalMD(),
|
||||
Value: float64(offlineDisks.Sum()),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterDisksOnlineTotalMD(),
|
||||
Value: float64(onlineDisks.Sum()),
|
||||
})
|
||||
|
||||
metrics.Metrics = append(metrics.Metrics, Metric{
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getClusterDisksTotalMD(),
|
||||
Value: float64(totalDisks.Sum()),
|
||||
})
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1312,9 +1435,9 @@ func (c *minioCollectorV2) Describe(ch chan<- *prometheus.Desc) {
|
||||
func populateAndPublish(generatorFn func() []MetricsGenerator, publish func(m Metric) bool) {
|
||||
generators := generatorFn()
|
||||
for _, g := range generators {
|
||||
metrics := g()
|
||||
metrics.initialize(GlobalContext, &metrics)
|
||||
for _, metric := range metrics.Metrics {
|
||||
metricsGroup := g()
|
||||
metrics := metricsGroup.cachedRead(GlobalContext, &metricsGroup)
|
||||
for _, metric := range metrics {
|
||||
if !publish(metric) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -372,6 +372,18 @@ func httpMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
api,
|
||||
)
|
||||
}
|
||||
|
||||
for api, value := range httpStats.TotalS3Canceled.APIStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(s3Namespace, "canceled", "total"),
|
||||
"Total number of client canceled s3 request in current MinIO server instance",
|
||||
[]string{"api"}, nil),
|
||||
prometheus.CounterValue,
|
||||
float64(value),
|
||||
api,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// collects network metrics for MinIO server in Prometheus specific format
|
||||
|
||||
@@ -1368,7 +1368,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
||||
AwsRegion: args.ReqParams["region"],
|
||||
EventTime: eventTime.Format(event.AMZTimeFormat),
|
||||
EventName: args.EventName,
|
||||
UserIdentity: event.Identity{PrincipalID: args.ReqParams["accessKey"]},
|
||||
UserIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
|
||||
RequestParameters: args.ReqParams,
|
||||
ResponseElements: respElements,
|
||||
S3: event.Metadata{
|
||||
@@ -1376,7 +1376,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
||||
ConfigurationID: "Config",
|
||||
Bucket: event.Bucket{
|
||||
Name: args.BucketName,
|
||||
OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["accessKey"]},
|
||||
OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
|
||||
ARN: policy.ResourceARNPrefix + args.BucketName,
|
||||
},
|
||||
Object: event.Object{
|
||||
|
||||
@@ -18,12 +18,10 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -69,7 +67,7 @@ func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &xlStorageDiskIDCheck{storage: storage}, nil
|
||||
return newXLStorageDiskIDCheck(storage), nil
|
||||
}
|
||||
|
||||
return newStorageRESTClient(endpoint, false), nil
|
||||
@@ -82,78 +80,12 @@ func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &xlStorageDiskIDCheck{storage: storage}, nil
|
||||
return newXLStorageDiskIDCheck(storage), nil
|
||||
}
|
||||
|
||||
return newStorageRESTClient(endpoint, true), nil
|
||||
}
|
||||
|
||||
// Cleanup a directory recursively.
|
||||
func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) error {
|
||||
var delFunc func(string) error
|
||||
// Function to delete entries recursively.
|
||||
delFunc = func(entryPath string) error {
|
||||
if !HasSuffix(entryPath, SlashSeparator) {
|
||||
// Delete the file entry.
|
||||
err := storage.Delete(ctx, volume, entryPath, false)
|
||||
if !IsErrIgnored(err, []error{
|
||||
errDiskNotFound,
|
||||
errUnformattedDisk,
|
||||
errFileNotFound,
|
||||
}...) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// If it's a directory, list and call delFunc() for each entry.
|
||||
entries, err := storage.ListDir(ctx, volume, entryPath, -1)
|
||||
// If entryPath prefix never existed, safe to ignore
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
return nil
|
||||
} else if err != nil { // For any other errors fail.
|
||||
if !IsErrIgnored(err, []error{
|
||||
errDiskNotFound,
|
||||
errUnformattedDisk,
|
||||
errFileNotFound,
|
||||
}...) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return err
|
||||
} // else on success..
|
||||
|
||||
// Entry path is empty, just delete it.
|
||||
if len(entries) == 0 {
|
||||
err = storage.Delete(ctx, volume, entryPath, false)
|
||||
if !IsErrIgnored(err, []error{
|
||||
errDiskNotFound,
|
||||
errUnformattedDisk,
|
||||
errFileNotFound,
|
||||
}...) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Recurse and delete all other entries.
|
||||
for _, entry := range entries {
|
||||
if err = delFunc(pathJoin(entryPath, entry)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := delFunc(retainSlash(pathJoin(dirPath)))
|
||||
if IsErrIgnored(err, []error{
|
||||
errVolumeNotFound,
|
||||
errVolumeAccessDenied,
|
||||
}...) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
||||
endWalkCh := make(chan struct{})
|
||||
defer close(endWalkCh)
|
||||
|
||||
@@ -426,7 +426,7 @@ func (e BucketRemoteTargetNotFound) Error() string {
|
||||
type BucketRemoteConnectionErr GenericError
|
||||
|
||||
func (e BucketRemoteConnectionErr) Error() string {
|
||||
return "Remote service endpoint or target bucket not available: " + e.Bucket
|
||||
return fmt.Sprintf("Remote service endpoint or target bucket not available: %s \n\t%s", e.Bucket, e.Err.Error())
|
||||
}
|
||||
|
||||
// BucketRemoteAlreadyExists remote already exists for this target type.
|
||||
|
||||
@@ -2371,8 +2371,20 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
etag := partInfo.ETag
|
||||
if isEncrypted {
|
||||
etag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
|
||||
switch kind, encrypted := crypto.IsEncrypted(mi.UserDefined); {
|
||||
case encrypted:
|
||||
switch kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
etag = tryDecryptETag(objectEncryptionKey[:], etag, false)
|
||||
case crypto.SSEC:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
|
||||
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
|
||||
etag = etag[len(etag)-32:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We must not use the http.Header().Set method here because some (broken)
|
||||
@@ -2817,7 +2829,8 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
VersionID: opts.VersionID,
|
||||
})
|
||||
}
|
||||
_, replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
|
||||
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
if replicateDel {
|
||||
if opts.VersionID != "" {
|
||||
opts.VersionPurgeStatus = Pending
|
||||
@@ -2825,6 +2838,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
opts.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
}
|
||||
|
||||
vID := opts.VersionID
|
||||
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
||||
// check if replica has permission to be deleted.
|
||||
|
||||
161
cmd/os-instrumented.go
Normal file
161
cmd/os-instrumented.go
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
var (
|
||||
logTime bool = false
|
||||
threshold time.Duration
|
||||
)
|
||||
|
||||
func init() {
|
||||
logTime = env.IsSet(config.EnvLogPosixTimes)
|
||||
t, _ := env.GetInt(
|
||||
config.EnvLogPosixThresholdInMS,
|
||||
100,
|
||||
)
|
||||
threshold = time.Duration(t) * time.Millisecond
|
||||
}
|
||||
|
||||
func reportTime(name *strings.Builder, startTime time.Time) {
|
||||
delta := time.Since(startTime)
|
||||
if delta > threshold {
|
||||
name.WriteString(" ")
|
||||
name.WriteString(delta.String())
|
||||
fmt.Println(name.String())
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveAll captures time taken to call the underlying os.RemoveAll
|
||||
func RemoveAll(dirPath string) error {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.RemoveAll: ")
|
||||
s.WriteString(dirPath)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.RemoveAll(dirPath)
|
||||
}
|
||||
|
||||
// MkdirAll captures time taken to call os.MkdirAll
|
||||
func MkdirAll(dirPath string, mode os.FileMode) error {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.MkdirAll: ")
|
||||
s.WriteString(dirPath)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.MkdirAll(dirPath, mode)
|
||||
}
|
||||
|
||||
// Rename captures time taken to call os.Rename
|
||||
func Rename(src, dst string) error {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.Rename: ")
|
||||
s.WriteString(src)
|
||||
s.WriteString(" to ")
|
||||
s.WriteString(dst)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.Rename(src, dst)
|
||||
}
|
||||
|
||||
// OpenFile captures time taken to call os.OpenFile
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.OpenFile: ")
|
||||
s.WriteString(name)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// Open captures time taken to call os.Open
|
||||
func Open(name string) (*os.File, error) {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.Open: ")
|
||||
s.WriteString(name)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileDirectIO captures time taken to call disk.OpenFileDirectIO
|
||||
func OpenFileDirectIO(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("disk.OpenFileDirectIO: ")
|
||||
s.WriteString(name)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return disk.OpenFileDirectIO(name, flag, perm)
|
||||
}
|
||||
|
||||
// Lstat captures time taken to call os.Lstat
|
||||
func Lstat(name string) (os.FileInfo, error) {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.Lstat: ")
|
||||
s.WriteString(name)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.Lstat(name)
|
||||
}
|
||||
|
||||
// Remove captures time taken to call os.Remove
|
||||
func Remove(deletePath string) error {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.Remove: ")
|
||||
s.WriteString(deletePath)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.Remove(deletePath)
|
||||
}
|
||||
|
||||
// Stat captures time taken to call os.Stat
|
||||
func Stat(name string) (os.FileInfo, error) {
|
||||
if logTime {
|
||||
startTime := time.Now()
|
||||
var s strings.Builder
|
||||
s.WriteString("os.Stat: ")
|
||||
s.WriteString(name)
|
||||
defer reportTime(&s, startTime)
|
||||
}
|
||||
return os.Stat(name)
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func readDir(dirPath string) (entries []string, err error) {
|
||||
// the directory itself, if the dirPath doesn't exist this function doesn't return
|
||||
// an error.
|
||||
func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error) error {
|
||||
d, err := os.Open(dirPath)
|
||||
d, err := Open(dirPath)
|
||||
if err != nil {
|
||||
if osErrToFileErr(err) == errFileNotFound {
|
||||
return nil
|
||||
@@ -58,7 +58,7 @@ func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
fi, err = os.Stat(pathJoin(dirPath, fi.Name()))
|
||||
fi, err = Stat(pathJoin(dirPath, fi.Name()))
|
||||
if err != nil {
|
||||
// It got deleted in the meantime, not found
|
||||
// or returns too many symlinks ignore this
|
||||
@@ -86,7 +86,7 @@ func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error)
|
||||
|
||||
// Return N entries at the directory dirPath. If count is -1, return all entries
|
||||
func readDirN(dirPath string, count int) (entries []string, err error) {
|
||||
d, err := os.Open(dirPath)
|
||||
d, err := Open(dirPath)
|
||||
if err != nil {
|
||||
return nil, osErrToFileErr(err)
|
||||
}
|
||||
@@ -117,7 +117,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
fi, err = os.Stat(pathJoin(dirPath, fi.Name()))
|
||||
fi, err = Stat(pathJoin(dirPath, fi.Name()))
|
||||
if err != nil {
|
||||
// It got deleted in the meantime, not found
|
||||
// or returns too many symlinks ignore this
|
||||
|
||||
@@ -57,7 +57,7 @@ func reliableRemoveAll(dirPath string) (err error) {
|
||||
i := 0
|
||||
for {
|
||||
// Removes all the directories and files.
|
||||
if err = os.RemoveAll(dirPath); err != nil {
|
||||
if err = RemoveAll(dirPath); err != nil {
|
||||
// Retry only for the first retryable error.
|
||||
if isSysErrNotEmpty(err) && i == 0 {
|
||||
i++
|
||||
@@ -101,7 +101,7 @@ func reliableMkdirAll(dirPath string, mode os.FileMode) (err error) {
|
||||
i := 0
|
||||
for {
|
||||
// Creates all the parent directories, with mode 0777 mkdir honors system umask.
|
||||
if err = os.MkdirAll(dirPath, mode); err != nil {
|
||||
if err = MkdirAll(dirPath, mode); err != nil {
|
||||
// Retry only for the first retryable error.
|
||||
if osIsNotExist(err) && i == 0 {
|
||||
i++
|
||||
@@ -166,7 +166,7 @@ func reliableRename(srcFilePath, dstFilePath string) (err error) {
|
||||
i := 0
|
||||
for {
|
||||
// After a successful parent directory create attempt a renameAll.
|
||||
if err = os.Rename(srcFilePath, dstFilePath); err != nil {
|
||||
if err = Rename(srcFilePath, dstFilePath); err != nil {
|
||||
// Retry only for the first retryable error.
|
||||
if osIsNotExist(err) && i == 0 {
|
||||
i++
|
||||
|
||||
@@ -17,14 +17,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bcicen/jstream"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
)
|
||||
|
||||
// startWithConds - map which indicates if a given condition supports starts-with policy operator
|
||||
@@ -110,8 +115,45 @@ type PostPolicyForm struct {
|
||||
}
|
||||
}
|
||||
|
||||
// implemented to ensure that duplicate keys in JSON
|
||||
// are merged together into a single JSON key, also
|
||||
// to remove any extraneous JSON bodies.
|
||||
//
|
||||
// Go stdlib doesn't support parsing JSON with duplicate
|
||||
// keys, so we need to use this technique to merge the
|
||||
// keys.
|
||||
func sanitizePolicy(r io.Reader) (io.Reader, error) {
|
||||
var buf bytes.Buffer
|
||||
e := json.NewEncoder(&buf)
|
||||
d := jstream.NewDecoder(r, 0).ObjectAsKVS()
|
||||
sset := set.NewStringSet()
|
||||
for mv := range d.Stream() {
|
||||
var kvs jstream.KVS
|
||||
if mv.ValueType == jstream.Object {
|
||||
// This is a JSON object type (that preserves key order)
|
||||
kvs = mv.Value.(jstream.KVS)
|
||||
for _, kv := range kvs {
|
||||
if sset.Contains(kv.Key) {
|
||||
// Reject duplicate conditions or expiration.
|
||||
return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key)
|
||||
}
|
||||
sset.Add(kv.Key)
|
||||
}
|
||||
e.Encode(kvs)
|
||||
}
|
||||
}
|
||||
return &buf, d.Err()
|
||||
}
|
||||
|
||||
// parsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure.
|
||||
func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
||||
func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) {
|
||||
reader, err := sanitizePolicy(r)
|
||||
if err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
d := json.NewDecoder(reader)
|
||||
|
||||
// Convert po into interfaces and
|
||||
// perform strict type conversion using reflection.
|
||||
var rawPolicy struct {
|
||||
@@ -119,9 +161,9 @@ func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal([]byte(policy), &rawPolicy)
|
||||
if err != nil {
|
||||
return ppf, err
|
||||
d.DisallowUnknownFields()
|
||||
if err := d.Decode(&rawPolicy); err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
parsedPolicy := PostPolicyForm{}
|
||||
@@ -129,7 +171,7 @@ func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
||||
// Parse expiry time.
|
||||
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
|
||||
if err != nil {
|
||||
return ppf, err
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
// Parse conditions.
|
||||
|
||||
@@ -17,14 +17,66 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func TestParsePostPolicyForm(t *testing.T) {
|
||||
testCases := []struct {
|
||||
policy string
|
||||
success bool
|
||||
}{
|
||||
// missing expiration, will fail.
|
||||
{
|
||||
policy: `{"conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// invalid json.
|
||||
{
|
||||
policy: `{"conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]`,
|
||||
success: false,
|
||||
},
|
||||
// duplicate 'expiration' reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","evil"],["eq","$key","hello.txt"],["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
},
|
||||
// duplicate '$bucket' reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","good"],["eq","$key","hello.txt"]],"conditions":[["eq","$bucket","evil"],["eq","$key","hello.txt"],["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// duplicate conditions, reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// no duplicates, shall be parsed properly.
|
||||
{
|
||||
policy: `{"expiration":"2021-03-27T20:35:28.458Z","conditions":[["eq","$bucket","testbucket"],["eq","$key","wtf.txt"],["eq","$x-amz-date","20210320T203528Z"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210320/us-east-1/s3/aws4_request"]]}`,
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := parsePostPolicyForm(strings.NewReader(testCase.policy))
|
||||
if testCase.success && err != nil {
|
||||
t.Errorf("Expected success but failed with %s", err)
|
||||
}
|
||||
if !testCase.success && err == nil {
|
||||
t.Errorf("Expected failed but succeeded")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test Post Policy parsing and checking conditions
|
||||
func TestPostPolicyForm(t *testing.T) {
|
||||
pp := minio.NewPostPolicy()
|
||||
@@ -94,7 +146,7 @@ func TestPostPolicyForm(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -237,6 +237,28 @@ func newAllSubsystems() {
|
||||
globalBucketTargetSys = NewBucketTargetSys()
|
||||
}
|
||||
|
||||
func configRetriableErrors(err error) bool {
|
||||
// Initializing sub-systems needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed, migration is needed etc.
|
||||
rquorum := InsufficientReadQuorum{}
|
||||
wquorum := InsufficientWriteQuorum{}
|
||||
|
||||
// One of these retriable errors shall be retried.
|
||||
return errors.Is(err, errDiskNotFound) ||
|
||||
errors.Is(err, errConfigNotFound) ||
|
||||
errors.Is(err, context.DeadlineExceeded) ||
|
||||
errors.Is(err, errErasureWriteQuorum) ||
|
||||
errors.Is(err, errErasureReadQuorum) ||
|
||||
errors.As(err, &rquorum) ||
|
||||
errors.As(err, &wquorum) ||
|
||||
isErrBucketNotFound(err) ||
|
||||
errors.Is(err, os.ErrDeadlineExceeded)
|
||||
}
|
||||
|
||||
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||
// Once the config is fully loaded, initialize the new object layer.
|
||||
setObjectLayer(newObject)
|
||||
@@ -252,15 +274,6 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||
// Migrating to encrypted backend should happen before initialization of any
|
||||
// sub-systems, make sure that we do not move the above codeblock elsewhere.
|
||||
|
||||
// Initializing sub-systems needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed, migration is needed etc.
|
||||
rquorum := InsufficientReadQuorum{}
|
||||
wquorum := InsufficientWriteQuorum{}
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
|
||||
@@ -307,15 +320,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||
|
||||
txnLk.Unlock() // Unlock the transaction lock and allow other nodes to acquire the lock if possible.
|
||||
|
||||
// One of these retriable errors shall be retried.
|
||||
if errors.Is(err, errDiskNotFound) ||
|
||||
errors.Is(err, errConfigNotFound) ||
|
||||
errors.Is(err, context.DeadlineExceeded) ||
|
||||
errors.Is(err, errErasureWriteQuorum) ||
|
||||
errors.Is(err, errErasureReadQuorum) ||
|
||||
errors.As(err, &rquorum) ||
|
||||
errors.As(err, &wquorum) ||
|
||||
isErrBucketNotFound(err) {
|
||||
if configRetriableErrors(err) {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err)
|
||||
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
|
||||
continue
|
||||
@@ -333,8 +338,6 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
// you want to add extra context to your error. This
|
||||
// ensures top level retry works accordingly.
|
||||
// List buckets to heal, and be re-used for loading configs.
|
||||
rquorum := InsufficientReadQuorum{}
|
||||
wquorum := InsufficientWriteQuorum{}
|
||||
|
||||
buckets, err := newObject.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
@@ -368,14 +371,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
|
||||
// Initialize config system.
|
||||
if err = globalConfigSys.Init(newObject); err != nil {
|
||||
if errors.Is(err, errDiskNotFound) ||
|
||||
errors.Is(err, errConfigNotFound) ||
|
||||
errors.Is(err, context.DeadlineExceeded) ||
|
||||
errors.Is(err, errErasureWriteQuorum) ||
|
||||
errors.Is(err, errErasureReadQuorum) ||
|
||||
errors.As(err, &rquorum) ||
|
||||
errors.As(err, &wquorum) ||
|
||||
isErrBucketNotFound(err) {
|
||||
if configRetriableErrors(err) {
|
||||
return fmt.Errorf("Unable to initialize config system: %w", err)
|
||||
}
|
||||
// Any other config errors we simply print a message and proceed forward.
|
||||
|
||||
@@ -37,9 +37,18 @@ type DiskInfo struct {
|
||||
Endpoint string
|
||||
MountPath string
|
||||
ID string
|
||||
Metrics DiskMetrics
|
||||
Error string // carries the error over the network
|
||||
}
|
||||
|
||||
// DiskMetrics has the information about XL Storage APIs
|
||||
// the number of calls of each API and the moving average of
|
||||
// the duration of each API.
|
||||
type DiskMetrics struct {
|
||||
APILatencies map[string]string `json:"apiLatencies,omitempty"`
|
||||
APICalls map[string]uint64 `json:"apiCalls,omitempty"`
|
||||
}
|
||||
|
||||
// VolsInfo is a collection of volume(bucket) information
|
||||
type VolsInfo []VolInfo
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 11 {
|
||||
err = msgp.ArrayError{Wanted: 11, Got: zb0001}
|
||||
if zb0001 != 12 {
|
||||
err = msgp.ArrayError{Wanted: 12, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Total, err = dc.ReadUint64()
|
||||
@@ -68,6 +68,11 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
err = z.Metrics.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metrics")
|
||||
return
|
||||
}
|
||||
z.Error, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Error")
|
||||
@@ -78,8 +83,8 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 11
|
||||
err = en.Append(0x9b)
|
||||
// array header, size 12
|
||||
err = en.Append(0x9c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -133,6 +138,11 @@ func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
err = z.Metrics.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metrics")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Error)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Error")
|
||||
@@ -144,8 +154,8 @@ func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *DiskInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 11
|
||||
o = append(o, 0x9b)
|
||||
// array header, size 12
|
||||
o = append(o, 0x9c)
|
||||
o = msgp.AppendUint64(o, z.Total)
|
||||
o = msgp.AppendUint64(o, z.Free)
|
||||
o = msgp.AppendUint64(o, z.Used)
|
||||
@@ -156,6 +166,11 @@ func (z *DiskInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
o = msgp.AppendString(o, z.MountPath)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
o, err = z.Metrics.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metrics")
|
||||
return
|
||||
}
|
||||
o = msgp.AppendString(o, z.Error)
|
||||
return
|
||||
}
|
||||
@@ -168,8 +183,8 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 11 {
|
||||
err = msgp.ArrayError{Wanted: 11, Got: zb0001}
|
||||
if zb0001 != 12 {
|
||||
err = msgp.ArrayError{Wanted: 12, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Total, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
@@ -222,6 +237,11 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
bts, err = z.Metrics.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metrics")
|
||||
return
|
||||
}
|
||||
z.Error, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Error")
|
||||
@@ -233,7 +253,276 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *DiskInfo) Msgsize() (s int) {
|
||||
s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.StringPrefixSize + len(z.FSType) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.Endpoint) + msgp.StringPrefixSize + len(z.MountPath) + msgp.StringPrefixSize + len(z.ID) + msgp.StringPrefixSize + len(z.Error)
|
||||
s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.StringPrefixSize + len(z.FSType) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.Endpoint) + msgp.StringPrefixSize + len(z.MountPath) + msgp.StringPrefixSize + len(z.ID) + z.Metrics.Msgsize() + msgp.StringPrefixSize + len(z.Error)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *DiskMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APILatencies":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
if z.APILatencies == nil {
|
||||
z.APILatencies = make(map[string]string, zb0002)
|
||||
} else if len(z.APILatencies) > 0 {
|
||||
for key := range z.APILatencies {
|
||||
delete(z.APILatencies, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
var za0001 string
|
||||
var za0002 string
|
||||
za0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
za0002, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies", za0001)
|
||||
return
|
||||
}
|
||||
z.APILatencies[za0001] = za0002
|
||||
}
|
||||
case "APICalls":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
if z.APICalls == nil {
|
||||
z.APICalls = make(map[string]uint64, zb0003)
|
||||
} else if len(z.APICalls) > 0 {
|
||||
for key := range z.APICalls {
|
||||
delete(z.APICalls, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var za0003 string
|
||||
var za0004 uint64
|
||||
za0003, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
za0004, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls", za0003)
|
||||
return
|
||||
}
|
||||
z.APICalls[za0003] = za0004
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *DiskMetrics) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 2
|
||||
// write "APILatencies"
|
||||
err = en.Append(0x82, 0xac, 0x41, 0x50, 0x49, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.APILatencies)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
for za0001, za0002 := range z.APILatencies {
|
||||
err = en.WriteString(za0001)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0002)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "APICalls"
|
||||
err = en.Append(0xa8, 0x41, 0x50, 0x49, 0x43, 0x61, 0x6c, 0x6c, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.APICalls)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
for za0003, za0004 := range z.APICalls {
|
||||
err = en.WriteString(za0003)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(za0004)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls", za0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *DiskMetrics) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 2
|
||||
// string "APILatencies"
|
||||
o = append(o, 0x82, 0xac, 0x41, 0x50, 0x49, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.APILatencies)))
|
||||
for za0001, za0002 := range z.APILatencies {
|
||||
o = msgp.AppendString(o, za0001)
|
||||
o = msgp.AppendString(o, za0002)
|
||||
}
|
||||
// string "APICalls"
|
||||
o = append(o, 0xa8, 0x41, 0x50, 0x49, 0x43, 0x61, 0x6c, 0x6c, 0x73)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.APICalls)))
|
||||
for za0003, za0004 := range z.APICalls {
|
||||
o = msgp.AppendString(o, za0003)
|
||||
o = msgp.AppendUint64(o, za0004)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *DiskMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APILatencies":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
if z.APILatencies == nil {
|
||||
z.APILatencies = make(map[string]string, zb0002)
|
||||
} else if len(z.APILatencies) > 0 {
|
||||
for key := range z.APILatencies {
|
||||
delete(z.APILatencies, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
var za0001 string
|
||||
var za0002 string
|
||||
zb0002--
|
||||
za0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies")
|
||||
return
|
||||
}
|
||||
za0002, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APILatencies", za0001)
|
||||
return
|
||||
}
|
||||
z.APILatencies[za0001] = za0002
|
||||
}
|
||||
case "APICalls":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
if z.APICalls == nil {
|
||||
z.APICalls = make(map[string]uint64, zb0003)
|
||||
} else if len(z.APICalls) > 0 {
|
||||
for key := range z.APICalls {
|
||||
delete(z.APICalls, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var za0003 string
|
||||
var za0004 uint64
|
||||
zb0003--
|
||||
za0003, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls")
|
||||
return
|
||||
}
|
||||
za0004, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APICalls", za0003)
|
||||
return
|
||||
}
|
||||
z.APICalls[za0003] = za0004
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *DiskMetrics) Msgsize() (s int) {
|
||||
s = 1 + 13 + msgp.MapHeaderSize
|
||||
if z.APILatencies != nil {
|
||||
for za0001, za0002 := range z.APILatencies {
|
||||
_ = za0002
|
||||
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
|
||||
}
|
||||
}
|
||||
s += 9 + msgp.MapHeaderSize
|
||||
if z.APICalls != nil {
|
||||
for za0003, za0004 := range z.APICalls {
|
||||
_ = za0004
|
||||
s += msgp.StringPrefixSize + len(za0003) + msgp.Uint64Size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -122,6 +122,119 @@ func BenchmarkDecodeDiskInfo(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalDiskMetrics(t *testing.T) {
|
||||
v := DiskMetrics{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgDiskMetrics(b *testing.B) {
|
||||
v := DiskMetrics{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgDiskMetrics(b *testing.B) {
|
||||
v := DiskMetrics{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalDiskMetrics(b *testing.B) {
|
||||
v := DiskMetrics{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeDiskMetrics(t *testing.T) {
|
||||
v := DiskMetrics{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeDiskMetrics Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := DiskMetrics{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeDiskMetrics(b *testing.B) {
|
||||
v := DiskMetrics{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeDiskMetrics(b *testing.B) {
|
||||
v := DiskMetrics{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalFileInfo(t *testing.T) {
|
||||
v := FileInfo{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
@@ -337,6 +337,10 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path st
|
||||
values.Set(storageRESTFilePath, path)
|
||||
values.Set(storageRESTLength, strconv.Itoa(int(size)))
|
||||
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, ioutil.NopCloser(reader), size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = waitForHTTPResponse(respBody)
|
||||
defer http.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -287,10 +287,9 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
err = s.storage.CreateFile(r.Context(), volume, filePath, int64(fileSize), r.Body)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
}
|
||||
|
||||
done := keepHTTPResponseAlive(w)
|
||||
done(s.storage.CreateFile(r.Context(), volume, filePath, int64(fileSize), r.Body))
|
||||
}
|
||||
|
||||
// DeleteVersion delete updated metadata.
|
||||
|
||||
46
cmd/storagemetric_string.go
Normal file
46
cmd/storagemetric_string.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Code generated by "stringer -type=storageMetric -trimprefix=storageMetric xl-storage-disk-id-check.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[storageMetricMakeVolBulk-0]
|
||||
_ = x[storageMetricMakeVol-1]
|
||||
_ = x[storageMetricListVols-2]
|
||||
_ = x[storageMetricStatVol-3]
|
||||
_ = x[storageMetricDeleteVol-4]
|
||||
_ = x[storageMetricWalkDir-5]
|
||||
_ = x[storageMetricListDir-6]
|
||||
_ = x[storageMetricReadFile-7]
|
||||
_ = x[storageMetricAppendFile-8]
|
||||
_ = x[storageMetricCreateFile-9]
|
||||
_ = x[storageMetricReadFileStream-10]
|
||||
_ = x[storageMetricRenameFile-11]
|
||||
_ = x[storageMetricRenameData-12]
|
||||
_ = x[storageMetricCheckParts-13]
|
||||
_ = x[storageMetricCheckFile-14]
|
||||
_ = x[storageMetricDelete-15]
|
||||
_ = x[storageMetricDeleteVersions-16]
|
||||
_ = x[storageMetricVerifyFile-17]
|
||||
_ = x[storageMetricWriteAll-18]
|
||||
_ = x[storageMetricDeleteVersion-19]
|
||||
_ = x[storageMetricWriteMetadata-20]
|
||||
_ = x[storageMetricReadVersion-21]
|
||||
_ = x[storageMetricReadAll-22]
|
||||
_ = x[metricLast-23]
|
||||
}
|
||||
|
||||
const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataReadVersionReadAllmetricLast"
|
||||
|
||||
var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 212, 219, 229}
|
||||
|
||||
func (i storageMetric) String() string {
|
||||
if i >= storageMetric(len(_storageMetric_index)-1) {
|
||||
return "storageMetric(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _storageMetric_name[_storageMetric_index[i]:_storageMetric_index[i+1]]
|
||||
}
|
||||
@@ -498,20 +498,11 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
// Check if this user or their groups have a policy applied.
|
||||
globalIAMSys.Lock()
|
||||
found := false
|
||||
if _, ok := globalIAMSys.iamUserPolicyMap[ldapUserDN]; ok {
|
||||
found = true
|
||||
}
|
||||
for _, groupDistName := range groupDistNames {
|
||||
if _, ok := globalIAMSys.iamGroupPolicyMap[groupDistName]; ok {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
globalIAMSys.Unlock()
|
||||
if !found {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("expecting a policy to be set for user `%s` or one of their groups: `%s` - rejecting this request", ldapUserDN, strings.Join(groupDistNames, "`,`")))
|
||||
ldapPolicies, _ := globalIAMSys.PolicyDBGet(ldapUserDN, false, groupDistNames...)
|
||||
if len(ldapPolicies) == 0 {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue,
|
||||
fmt.Errorf("expecting a policy to be set for user `%s` or one of their groups: `%s` - rejecting this request",
|
||||
ldapUserDN, strings.Join(groupDistNames, "`,`")))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -226,7 +226,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
|
||||
reply.UIVersion = Version
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.BucketCreated,
|
||||
@@ -723,7 +723,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
|
||||
)
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
sourceIP := handlers.GetSourceIP(r)
|
||||
|
||||
next:
|
||||
@@ -767,7 +767,7 @@ next:
|
||||
}
|
||||
if hasReplicationRules(ctx, args.BucketName, []ObjectToDelete{{ObjectName: objectName}}) || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, args.BucketName, objectName, opts)
|
||||
if _, replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{
|
||||
if replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
VersionID: goi.VersionID,
|
||||
}, goi, gerr); replicateDel {
|
||||
@@ -903,7 +903,7 @@ next:
|
||||
}
|
||||
}
|
||||
}
|
||||
_, replicateDel, _ := checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ObjectName: obj.Name, VersionID: obj.VersionID}, obj, nil)
|
||||
replicateDel, _ := checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ObjectName: obj.Name, VersionID: obj.VersionID}, obj, nil)
|
||||
// since versioned delete is not available on web browser, yet - this is a simple DeleteMarker replication
|
||||
objToDel := ObjectToDelete{ObjectName: obj.Name}
|
||||
if replicateDel {
|
||||
@@ -1340,7 +1340,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
// Notify object created event.
|
||||
sendEvent(eventArgs{
|
||||
@@ -1529,7 +1529,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
@@ -1684,7 +1684,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||
defer archive.Close()
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
respElements := extractRespElements(w)
|
||||
|
||||
for i, object := range args.Objects {
|
||||
|
||||
@@ -19,12 +19,104 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
ewma "github.com/VividCortex/ewma"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=storageMetric -trimprefix=storageMetric $GOFILE
|
||||
|
||||
type storageMetric uint8
|
||||
|
||||
const (
|
||||
storageMetricMakeVolBulk storageMetric = iota
|
||||
storageMetricMakeVol
|
||||
storageMetricListVols
|
||||
storageMetricStatVol
|
||||
storageMetricDeleteVol
|
||||
storageMetricWalkDir
|
||||
storageMetricListDir
|
||||
storageMetricReadFile
|
||||
storageMetricAppendFile
|
||||
storageMetricCreateFile
|
||||
storageMetricReadFileStream
|
||||
storageMetricRenameFile
|
||||
storageMetricRenameData
|
||||
storageMetricCheckParts
|
||||
storageMetricCheckFile
|
||||
storageMetricDelete
|
||||
storageMetricDeleteVersions
|
||||
storageMetricVerifyFile
|
||||
storageMetricWriteAll
|
||||
storageMetricDeleteVersion
|
||||
storageMetricWriteMetadata
|
||||
storageMetricReadVersion
|
||||
storageMetricReadAll
|
||||
|
||||
// .... add more
|
||||
|
||||
metricLast
|
||||
)
|
||||
|
||||
// Detects change in underlying disk.
|
||||
type xlStorageDiskIDCheck struct {
|
||||
storage *xlStorage
|
||||
storage StorageAPI
|
||||
diskID string
|
||||
|
||||
apiCalls [metricLast]uint64
|
||||
apiLatencies [metricLast]ewma.MovingAverage
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics {
|
||||
diskMetric := DiskMetrics{
|
||||
APILatencies: make(map[string]string),
|
||||
APICalls: make(map[string]uint64),
|
||||
}
|
||||
for i, v := range p.apiLatencies {
|
||||
diskMetric.APILatencies[storageMetric(i).String()] = time.Duration(v.Value()).String()
|
||||
}
|
||||
for i := range p.apiCalls {
|
||||
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
|
||||
}
|
||||
return diskMetric
|
||||
}
|
||||
|
||||
type lockedSimpleEWMA struct {
|
||||
sync.RWMutex
|
||||
*ewma.SimpleEWMA
|
||||
}
|
||||
|
||||
func (e *lockedSimpleEWMA) Add(value float64) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
e.SimpleEWMA.Add(value)
|
||||
}
|
||||
|
||||
func (e *lockedSimpleEWMA) Set(value float64) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
e.SimpleEWMA.Set(value)
|
||||
}
|
||||
|
||||
func (e *lockedSimpleEWMA) Value() float64 {
|
||||
e.RLock()
|
||||
defer e.RUnlock()
|
||||
return e.SimpleEWMA.Value()
|
||||
}
|
||||
|
||||
func newXLStorageDiskIDCheck(storage *xlStorage) *xlStorageDiskIDCheck {
|
||||
xl := xlStorageDiskIDCheck{
|
||||
storage: storage,
|
||||
}
|
||||
for i := range xl.apiLatencies[:] {
|
||||
xl.apiLatencies[i] = &lockedSimpleEWMA{
|
||||
SimpleEWMA: new(ewma.SimpleEWMA),
|
||||
}
|
||||
}
|
||||
return &xl
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) String() string {
|
||||
@@ -117,6 +209,8 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context) (info DiskInfo, err
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
info.Metrics = p.getMetrics()
|
||||
// check cached diskID against backend
|
||||
// only if its non-empty.
|
||||
if p.diskID != "" {
|
||||
@@ -128,6 +222,8 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context) (info DiskInfo, err
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...string) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricMakeVolBulk)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -141,6 +237,8 @@ func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...strin
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricMakeVol)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -154,6 +252,8 @@ func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) ([]VolInfo, error) {
|
||||
defer p.updateStorageMetrics(storageMetricListVols)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
@@ -167,6 +267,8 @@ func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) ([]VolInfo, error)
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
|
||||
defer p.updateStorageMetrics(storageMetricStatVol)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return VolInfo{}, ctx.Err()
|
||||
@@ -180,6 +282,8 @@ func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricDeleteVol)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -193,6 +297,8 @@ func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, for
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error) {
|
||||
defer p.updateStorageMetrics(storageMetricListDir)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
@@ -207,6 +313,8 @@ func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath stri
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
|
||||
defer p.updateStorageMetrics(storageMetricReadFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
@@ -221,6 +329,8 @@ func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricAppendFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -235,6 +345,8 @@ func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, pa
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error {
|
||||
defer p.updateStorageMetrics(storageMetricCreateFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -249,6 +361,8 @@ func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path stri
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
|
||||
defer p.updateStorageMetrics(storageMetricReadFileStream)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
@@ -263,6 +377,8 @@ func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error {
|
||||
defer p.updateStorageMetrics(storageMetricRenameFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -277,6 +393,8 @@ func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPat
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath, dataDir, dstVolume, dstPath string) error {
|
||||
defer p.updateStorageMetrics(storageMetricRenameData)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -291,6 +409,8 @@ func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPat
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricCheckParts)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -305,6 +425,8 @@ func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, pa
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) CheckFile(ctx context.Context, volume string, path string) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricCheckFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -319,6 +441,8 @@ func (p *xlStorageDiskIDCheck) CheckFile(ctx context.Context, volume string, pat
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricDelete)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -333,6 +457,18 @@ func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path s
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) (errs []error) {
|
||||
defer p.updateStorageMetrics(storageMetricDeleteVersions)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
errs = make([]error, len(versions))
|
||||
for i := range errs {
|
||||
errs[i] = ctx.Err()
|
||||
}
|
||||
return errs
|
||||
default:
|
||||
}
|
||||
|
||||
if err := p.checkDiskStale(); err != nil {
|
||||
errs = make([]error, len(versions))
|
||||
for i := range errs {
|
||||
@@ -344,6 +480,8 @@ func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error {
|
||||
defer p.updateStorageMetrics(storageMetricVerifyFile)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -358,6 +496,8 @@ func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path stri
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricWriteAll)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -372,6 +512,8 @@ func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricDeleteVersion)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -386,6 +528,8 @@ func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path s
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
|
||||
defer p.updateStorageMetrics(storageMetricWriteMetadata)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -400,6 +544,8 @@ func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path s
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
|
||||
defer p.updateStorageMetrics(storageMetricReadVersion)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fi, ctx.Err()
|
||||
@@ -414,6 +560,8 @@ func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, ve
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
|
||||
defer p.updateStorageMetrics(storageMetricReadAll)()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
@@ -426,3 +574,12 @@ func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path
|
||||
|
||||
return p.storage.ReadAll(ctx, volume, path)
|
||||
}
|
||||
|
||||
// Update storage metrics
|
||||
func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric) func() {
|
||||
startTime := time.Now()
|
||||
return func() {
|
||||
atomic.AddUint64(&p.apiCalls[s], 1)
|
||||
p.apiLatencies[s].Add(float64(time.Since(startTime)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -72,6 +71,13 @@ const (
|
||||
xlStorageFormatFile = "xl.meta"
|
||||
)
|
||||
|
||||
var alignedBuf []byte
|
||||
|
||||
func init() {
|
||||
alignedBuf = disk.AlignedBlock(4096)
|
||||
_, _ = rand.Read(alignedBuf)
|
||||
}
|
||||
|
||||
// isValidVolname verifies a volname name in accordance with object
|
||||
// layer requirements.
|
||||
func isValidVolname(volname string) bool {
|
||||
@@ -89,8 +95,6 @@ func isValidVolname(volname string) bool {
|
||||
|
||||
// xlStorage - implements StorageAPI interface.
|
||||
type xlStorage struct {
|
||||
activeIOCount int32
|
||||
|
||||
diskPath string
|
||||
endpoint Endpoint
|
||||
|
||||
@@ -101,8 +105,6 @@ type xlStorage struct {
|
||||
|
||||
rootDisk bool
|
||||
|
||||
readODirectSupported bool
|
||||
|
||||
diskID string
|
||||
|
||||
// Indexes, will be -1 until assigned a set.
|
||||
@@ -170,7 +172,7 @@ func getValidPath(path string) (string, error) {
|
||||
return path, err
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(path)
|
||||
fi, err := Lstat(path)
|
||||
if err != nil && !osIsNotExist(err) {
|
||||
return path, err
|
||||
}
|
||||
@@ -227,17 +229,17 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
// is emphemeral and we should treat it
|
||||
// as root disk from the baremetal
|
||||
// terminology.
|
||||
rootDisk, err = disk.IsRootDisk(path, "/")
|
||||
rootDisk, err = disk.IsRootDisk(path, SlashSeparator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !rootDisk {
|
||||
// No root disk was found, its possible that
|
||||
// path is referenced at "/data" which has
|
||||
// path is referenced at "/etc/hosts" which has
|
||||
// different device ID that points to the original
|
||||
// "/" on the host system, fall back to that instead
|
||||
// to verify of the device id is same.
|
||||
rootDisk, err = disk.IsRootDisk(path, "/data")
|
||||
rootDisk, err = disk.IsRootDisk(path, "/etc/hosts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -245,7 +247,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
|
||||
} else {
|
||||
// On baremetal setups its always "/" is the root disk.
|
||||
rootDisk, err = disk.IsRootDisk(path, "/")
|
||||
rootDisk, err = disk.IsRootDisk(path, SlashSeparator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -267,13 +269,12 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
return &b
|
||||
},
|
||||
},
|
||||
globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn,
|
||||
ctx: GlobalContext,
|
||||
rootDisk: rootDisk,
|
||||
readODirectSupported: true,
|
||||
poolIndex: -1,
|
||||
setIndex: -1,
|
||||
diskIndex: -1,
|
||||
globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn,
|
||||
ctx: GlobalContext,
|
||||
rootDisk: rootDisk,
|
||||
poolIndex: -1,
|
||||
setIndex: -1,
|
||||
diskIndex: -1,
|
||||
}
|
||||
|
||||
// Create all necessary bucket folders if possible.
|
||||
@@ -285,25 +286,17 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
var rnd [8]byte
|
||||
_, _ = rand.Read(rnd[:])
|
||||
tmpFile := ".writable-check-" + hex.EncodeToString(rnd[:]) + ".tmp"
|
||||
if err = p.CreateFile(GlobalContext, minioMetaTmpBucket, tmpFile, 1, strings.NewReader("0")); err != nil {
|
||||
return p, err
|
||||
}
|
||||
defer os.Remove(pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile))
|
||||
|
||||
volumeDir, err := p.getVolDir(minioMetaTmpBucket)
|
||||
filePath := pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile)
|
||||
w, err := disk.OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
// Check if backend is readable, and optionally supports O_DIRECT.
|
||||
if _, err = p.readAllData(volumeDir, pathJoin(volumeDir, tmpFile), true); err != nil {
|
||||
if err != errUnsupportedDisk {
|
||||
return p, err
|
||||
}
|
||||
// error is unsupported disk, turn-off directIO for reads
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive %s does not support O_DIRECT for reads, proceeding to use the drive without O_DIRECT", ep), ep.String())
|
||||
p.readODirectSupported = false
|
||||
if _, err = w.Write(alignedBuf[:]); err != nil {
|
||||
w.Close()
|
||||
return p, err
|
||||
}
|
||||
w.Close()
|
||||
defer Remove(filePath)
|
||||
|
||||
// Success.
|
||||
return p, nil
|
||||
@@ -354,6 +347,8 @@ func (s *xlStorage) IsLocal() bool {
|
||||
|
||||
// Retrieve location indexes.
|
||||
func (s *xlStorage) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
// If unset, see if we can locate it.
|
||||
if s.poolIndex < 0 || s.setIndex < 0 || s.diskIndex < 0 {
|
||||
return getXLDiskLoc(s.diskID)
|
||||
@@ -458,11 +453,6 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache) (dataUs
|
||||
// DiskInfo provides current information about disk space usage,
|
||||
// total free inodes and underlying filesystem.
|
||||
func (s *xlStorage) DiskInfo(context.Context) (info DiskInfo, err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
s.diskInfoCache.Once.Do(func() {
|
||||
s.diskInfoCache.TTL = time.Second
|
||||
s.diskInfoCache.Update = func() (interface{}, error) {
|
||||
@@ -537,11 +527,11 @@ func (s *xlStorage) GetDiskID() (string, error) {
|
||||
s.Unlock()
|
||||
|
||||
formatFile := pathJoin(s.diskPath, minioMetaBucket, formatConfigFile)
|
||||
fi, err := os.Lstat(formatFile)
|
||||
fi, err := Lstat(formatFile)
|
||||
if err != nil {
|
||||
// If the disk is still not initialized.
|
||||
if osIsNotExist(err) {
|
||||
_, err = os.Lstat(s.diskPath)
|
||||
_, err = Lstat(s.diskPath)
|
||||
if err == nil {
|
||||
// Disk is present but missing `format.json`
|
||||
return "", errUnformattedDisk
|
||||
@@ -572,7 +562,7 @@ func (s *xlStorage) GetDiskID() (string, error) {
|
||||
if err != nil {
|
||||
// If the disk is still not initialized.
|
||||
if osIsNotExist(err) {
|
||||
_, err = os.Lstat(s.diskPath)
|
||||
_, err = Lstat(s.diskPath)
|
||||
if err == nil {
|
||||
// Disk is present but missing `format.json`
|
||||
return "", errUnformattedDisk
|
||||
@@ -630,17 +620,12 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(volumeDir); err != nil {
|
||||
if _, err := Lstat(volumeDir); err != nil {
|
||||
// Volume does not exist we proceed to create.
|
||||
if osIsNotExist(err) {
|
||||
// Make a volume entry, with mode 0777 mkdir honors system umask.
|
||||
@@ -660,11 +645,6 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
|
||||
|
||||
// ListVols - list volumes.
|
||||
func (s *xlStorage) ListVols(context.Context) (volsInfo []VolInfo, err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
return listVols(s.diskPath)
|
||||
}
|
||||
|
||||
@@ -692,11 +672,6 @@ func listVols(dirPath string) ([]VolInfo, error) {
|
||||
|
||||
// StatVol - get volume info.
|
||||
func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
@@ -704,7 +679,7 @@ func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, er
|
||||
}
|
||||
// Stat a volume entry.
|
||||
var st os.FileInfo
|
||||
st, err = os.Lstat(volumeDir)
|
||||
st, err = Lstat(volumeDir)
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsNotExist(err):
|
||||
@@ -728,11 +703,6 @@ func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, er
|
||||
|
||||
// DeleteVol - delete a volume.
|
||||
func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
@@ -740,9 +710,9 @@ func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bo
|
||||
}
|
||||
|
||||
if forceDelete {
|
||||
err = os.RemoveAll(volumeDir)
|
||||
err = RemoveAll(volumeDir)
|
||||
} else {
|
||||
err = os.Remove(volumeDir)
|
||||
err = Remove(volumeDir)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -768,7 +738,7 @@ func (s *xlStorage) isLeaf(volume string, leafPath string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = os.Lstat(pathJoin(volumeDir, leafPath, xlStorageFormatFile))
|
||||
_, err = Lstat(pathJoin(volumeDir, leafPath, xlStorageFormatFile))
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
@@ -782,23 +752,9 @@ func (s *xlStorage) isLeaf(volume string, leafPath string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *xlStorage) isLeafDir(volume, leafPath string) bool {
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return isDirEmpty(pathJoin(volumeDir, leafPath))
|
||||
}
|
||||
|
||||
// ListDir - return all the entries at the given directory path.
|
||||
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
||||
func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
@@ -813,7 +769,7 @@ func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count i
|
||||
}
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
if _, verr := os.Lstat(volumeDir); verr != nil {
|
||||
if _, verr := Lstat(volumeDir); verr != nil {
|
||||
if osIsNotExist(verr) {
|
||||
return nil, errVolumeNotFound
|
||||
} else if isSysErrIO(verr) {
|
||||
@@ -831,6 +787,7 @@ func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count i
|
||||
// or multiple objects.
|
||||
func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) []error {
|
||||
errs := make([]error, len(versions))
|
||||
|
||||
for i, version := range versions {
|
||||
if err := s.DeleteVersion(ctx, volume, version.Name, version, false); err != nil {
|
||||
errs[i] = err
|
||||
@@ -869,11 +826,6 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
||||
return errFileNotFound
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -938,11 +890,6 @@ func (s *xlStorage) WriteMetadata(ctx context.Context, volume, path string, fi F
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
var xlMeta xlMetaV2
|
||||
if !isXL2V1Format(buf) {
|
||||
xlMeta, err = newXLMetaV2(fi)
|
||||
@@ -979,11 +926,6 @@ func (s *xlStorage) renameLegacyMetadata(volumeDir, path string) (err error) {
|
||||
return errFileNotFound
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
// Validate file path length, before reading.
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
@@ -1003,7 +945,7 @@ func (s *xlStorage) renameLegacyMetadata(volumeDir, path string) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err = os.Rename(srcFilePath, dstFilePath); err != nil {
|
||||
if err = Rename(srcFilePath, dstFilePath); err != nil {
|
||||
switch {
|
||||
case isSysErrNotDir(err):
|
||||
return errFileNotFound
|
||||
@@ -1079,7 +1021,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
|
||||
// - object has maximum of 1 parts
|
||||
if fi.TransitionStatus == "" && fi.DataDir != "" && fi.Size <= smallFileThreshold && len(fi.Parts) == 1 {
|
||||
// Enable O_DIRECT optionally only if drive supports it.
|
||||
requireDirectIO := globalStorageClass.GetDMA() == storageclass.DMAReadWrite && s.readODirectSupported
|
||||
requireDirectIO := globalStorageClass.GetDMA() == storageclass.DMAReadWrite
|
||||
partPath := fmt.Sprintf("part.%d", fi.Parts[0].Number)
|
||||
fi.Data, err = s.readAllData(volumeDir, pathJoin(volumeDir, path, fi.DataDir, partPath), requireDirectIO)
|
||||
if err != nil {
|
||||
@@ -1096,13 +1038,13 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
|
||||
if requireDirectIO {
|
||||
f, err = disk.OpenFileDirectIO(filePath, readMode, 0666)
|
||||
} else {
|
||||
f, err = os.OpenFile(filePath, readMode, 0)
|
||||
f, err = OpenFile(filePath, readMode, 0)
|
||||
}
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
// Check if the object doesn't exist because its bucket
|
||||
// is missing in order to return the correct error.
|
||||
_, err = os.Lstat(volumeDir)
|
||||
_, err = Lstat(volumeDir)
|
||||
if err != nil && osIsNotExist(err) {
|
||||
return nil, errVolumeNotFound
|
||||
}
|
||||
@@ -1119,7 +1061,7 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
|
||||
} else if isSysErrTooManyFiles(err) {
|
||||
return nil, errTooManyOpenFiles
|
||||
} else if isSysErrInvalidArg(err) {
|
||||
st, _ := os.Lstat(filePath)
|
||||
st, _ := Lstat(filePath)
|
||||
if st != nil && st.IsDir() {
|
||||
// Linux returns InvalidArg for directory O_DIRECT
|
||||
// we need to keep this fallback code to return correct
|
||||
@@ -1131,20 +1073,10 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
or := &odirectReader{f, nil, nil, true, true, s, nil}
|
||||
rd := struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{Reader: or, Closer: closeWrapper(func() error {
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
return or.Close()
|
||||
})}
|
||||
defer rd.Close() // activeIOCount is decremented in Close()
|
||||
defer or.Close()
|
||||
|
||||
buf, err = ioutil.ReadAll(rd)
|
||||
buf, err = ioutil.ReadAll(or)
|
||||
if err != nil {
|
||||
err = osErrToFileErr(err)
|
||||
}
|
||||
@@ -1169,7 +1101,7 @@ func (s *xlStorage) ReadAll(ctx context.Context, volume string, path string) (bu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requireDirectIO := globalStorageClass.GetDMA() == storageclass.DMAReadWrite && s.readODirectSupported
|
||||
requireDirectIO := globalStorageClass.GetDMA() == storageclass.DMAReadWrite
|
||||
return s.readAllData(volumeDir, filePath, requireDirectIO)
|
||||
}
|
||||
|
||||
@@ -1191,11 +1123,6 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
|
||||
return 0, errInvalidArgument
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -1204,7 +1131,7 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
|
||||
var n int
|
||||
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(volumeDir)
|
||||
_, err = Lstat(volumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return 0, errVolumeNotFound
|
||||
@@ -1221,7 +1148,7 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
|
||||
}
|
||||
|
||||
// Open the file for reading.
|
||||
file, err := os.Open(filePath)
|
||||
file, err := Open(filePath)
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsNotExist(err):
|
||||
@@ -1282,24 +1209,14 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
|
||||
return int64(len(buffer)), nil
|
||||
}
|
||||
|
||||
func (s *xlStorage) openFile(volume, path string, mode int) (f *os.File, err error) {
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (s *xlStorage) openFile(filePath string, mode int) (f *os.File, err error) {
|
||||
// Create top level directories if they don't exist.
|
||||
// with mode 0777 mkdir honors system umask.
|
||||
if err = mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
|
||||
return nil, err
|
||||
return nil, osErrToFileErr(err)
|
||||
}
|
||||
|
||||
w, err := os.OpenFile(filePath, mode|writeMode, 0666)
|
||||
w, err := OpenFile(filePath, mode|writeMode, 0666)
|
||||
if err != nil {
|
||||
// File path cannot be verified since one of the parents is a file.
|
||||
switch {
|
||||
@@ -1346,8 +1263,17 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
|
||||
o.buf = *o.bufp
|
||||
n, err = o.f.Read(o.buf)
|
||||
if err != nil && err != io.EOF {
|
||||
o.err = err
|
||||
return n, err
|
||||
if isSysErrInvalidArg(err) {
|
||||
if err = disk.DisableDirectIO(o.f); err != nil {
|
||||
o.err = err
|
||||
return n, err
|
||||
}
|
||||
n, err = o.f.Read(o.buf)
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
o.err = err
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
// err is io.EOF
|
||||
@@ -1396,16 +1322,16 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
|
||||
|
||||
var file *os.File
|
||||
// O_DIRECT only supported if offset is zero
|
||||
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite && s.readODirectSupported {
|
||||
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {
|
||||
file, err = disk.OpenFileDirectIO(filePath, readMode, 0666)
|
||||
} else {
|
||||
// Open the file for reading.
|
||||
file, err = os.OpenFile(filePath, readMode, 0666)
|
||||
file, err = OpenFile(filePath, readMode, 0666)
|
||||
}
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsNotExist(err):
|
||||
_, err = os.Lstat(volumeDir)
|
||||
_, err = Lstat(volumeDir)
|
||||
if err != nil && osIsNotExist(err) {
|
||||
return nil, errVolumeNotFound
|
||||
}
|
||||
@@ -1438,8 +1364,7 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
|
||||
return nil, errIsNotRegular
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite && s.readODirectSupported {
|
||||
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {
|
||||
or := &odirectReader{file, nil, nil, true, false, s, nil}
|
||||
if length <= smallFileThreshold {
|
||||
or = &odirectReader{file, nil, nil, true, true, s, nil}
|
||||
@@ -1448,9 +1373,6 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{Reader: io.LimitReader(or, length), Closer: closeWrapper(func() error {
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
return or.Close()
|
||||
})}
|
||||
return r, nil
|
||||
@@ -1460,9 +1382,6 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{Reader: io.LimitReader(file, length), Closer: closeWrapper(func() error {
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
return file.Close()
|
||||
})}
|
||||
|
||||
@@ -1502,87 +1421,55 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(volumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
} else if isSysErrIO(err) {
|
||||
return errFaultyDisk
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create top level directories if they don't exist.
|
||||
// with mode 0777 mkdir honors system umask.
|
||||
if err = mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
|
||||
switch {
|
||||
case osIsPermission(err):
|
||||
return errFileAccessDenied
|
||||
case osIsExist(err):
|
||||
return errFileAccessDenied
|
||||
case isSysErrIO(err):
|
||||
return errFaultyDisk
|
||||
case isSysErrInvalidArg(err):
|
||||
return errUnsupportedDisk
|
||||
case isSysErrNoSpace(err):
|
||||
return errDiskFull
|
||||
parentFilePath := pathutil.Dir(filePath)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if volume == minioMetaTmpBucket {
|
||||
removeAll(parentFilePath)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}()
|
||||
|
||||
w, err := disk.OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsPermission(err):
|
||||
return errFileAccessDenied
|
||||
case osIsExist(err):
|
||||
return errFileAccessDenied
|
||||
case isSysErrIO(err):
|
||||
return errFaultyDisk
|
||||
case isSysErrInvalidArg(err):
|
||||
return errUnsupportedDisk
|
||||
case isSysErrNoSpace(err):
|
||||
return errDiskFull
|
||||
default:
|
||||
if fileSize >= 0 && fileSize <= smallFileThreshold {
|
||||
// For streams smaller than 128KiB we simply write them as O_DSYNC (fdatasync)
|
||||
// and not O_DIRECT to avoid the complexities of aligned I/O.
|
||||
w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
var e error
|
||||
if fileSize > 0 {
|
||||
// Allocate needed disk space to append data
|
||||
e = Fallocate(int(w.Fd()), 0, fileSize)
|
||||
}
|
||||
|
||||
// Ignore errors when Fallocate is not supported in the current system
|
||||
if e != nil && !isSysErrNoSys(e) && !isSysErrOpNotSupported(e) {
|
||||
switch {
|
||||
case isSysErrNoSpace(e):
|
||||
err = errDiskFull
|
||||
case isSysErrIO(e):
|
||||
err = errFaultyDisk
|
||||
default:
|
||||
// For errors: EBADF, EINTR, EINVAL, ENODEV, EPERM, ESPIPE and ETXTBSY
|
||||
// Appending was failed anyway, returns unexpected error
|
||||
err = errUnexpected
|
||||
written, err := io.Copy(w, r)
|
||||
if err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
return err
|
||||
|
||||
if written > fileSize {
|
||||
return errMoreData
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create top level directories if they don't exist.
|
||||
// with mode 0777 mkdir honors system umask.
|
||||
if err = mkdirAll(parentFilePath, 0777); err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
|
||||
w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -1590,23 +1477,17 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
||||
w.Close()
|
||||
}()
|
||||
|
||||
var bufp *[]byte
|
||||
if fileSize <= smallFileThreshold {
|
||||
bufp = s.poolSmall.Get().(*[]byte)
|
||||
defer s.poolSmall.Put(bufp)
|
||||
} else {
|
||||
bufp = s.poolLarge.Get().(*[]byte)
|
||||
defer s.poolLarge.Put(bufp)
|
||||
}
|
||||
bufp := s.poolLarge.Get().(*[]byte)
|
||||
defer s.poolLarge.Put(bufp)
|
||||
|
||||
written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if written < fileSize {
|
||||
if written < fileSize && fileSize >= 0 {
|
||||
return errLessData
|
||||
} else if written > fileSize {
|
||||
} else if written > fileSize && fileSize >= 0 {
|
||||
return errMoreData
|
||||
}
|
||||
|
||||
@@ -1614,12 +1495,17 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
||||
}
|
||||
|
||||
func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := s.openFile(volume, path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC)
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1640,28 +1526,28 @@ func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b
|
||||
// AppendFile - append a byte array at path, if file doesn't exist at
|
||||
// path this call explicitly creates it.
|
||||
func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
if _, err = os.Lstat(volumeDir); err != nil {
|
||||
if _, err = Lstat(volumeDir); err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var w *os.File
|
||||
// Create file if not found. Not doing O_DIRECT here to avoid the code that does buffer aligned writes.
|
||||
// AppendFile() is only used by healing code to heal objects written in old format.
|
||||
w, err = s.openFile(volume, path, os.O_CREATE|os.O_APPEND|os.O_WRONLY)
|
||||
w, err = s.openFile(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1681,18 +1567,13 @@ func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string,
|
||||
|
||||
// CheckParts check if path has necessary parts available.
|
||||
func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
if _, err = os.Lstat(volumeDir); err != nil {
|
||||
if _, err = Lstat(volumeDir); err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
}
|
||||
@@ -1708,7 +1589,7 @@ func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string,
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
st, err := os.Lstat(filePath)
|
||||
st, err := Lstat(filePath)
|
||||
if err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
@@ -1732,15 +1613,13 @@ func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string,
|
||||
// - "a/b/"
|
||||
// - "a/"
|
||||
func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) error {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.RLock()
|
||||
formatLegacy := s.formatLegacy
|
||||
s.RUnlock()
|
||||
|
||||
var checkFile func(p string) error
|
||||
checkFile = func(p string) error {
|
||||
@@ -1752,10 +1631,10 @@ func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) e
|
||||
if err := checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st, _ := os.Lstat(filePath)
|
||||
st, _ := Lstat(filePath)
|
||||
if st == nil {
|
||||
if !s.formatLegacy {
|
||||
|
||||
if !formatLegacy {
|
||||
return errPathNotFound
|
||||
}
|
||||
|
||||
@@ -1764,7 +1643,7 @@ func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) e
|
||||
return err
|
||||
}
|
||||
|
||||
st, _ = os.Lstat(filePathOld)
|
||||
st, _ = Lstat(filePathOld)
|
||||
if st == nil {
|
||||
return errPathNotFound
|
||||
}
|
||||
@@ -1803,10 +1682,9 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) erro
|
||||
|
||||
var err error
|
||||
if recursive {
|
||||
tmpuuid := mustGetUUID()
|
||||
err = renameAll(deletePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
err = renameAll(deletePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID()))
|
||||
} else {
|
||||
err = os.Remove(deletePath)
|
||||
err = Remove(deletePath)
|
||||
}
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -1842,18 +1720,13 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) erro
|
||||
|
||||
// DeleteFile - delete a file at path.
|
||||
func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(volumeDir)
|
||||
_, err = Lstat(volumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
@@ -1878,11 +1751,6 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
|
||||
|
||||
// RenameData - rename source path to destination path atomically, metadata and data directory.
|
||||
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
srcVolumeDir, err := s.getVolDir(srcVolume)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1894,7 +1762,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(srcVolumeDir)
|
||||
_, err = Lstat(srcVolumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
@@ -1904,7 +1772,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = os.Lstat(dstVolumeDir); err != nil {
|
||||
if _, err = Lstat(dstVolumeDir); err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
} else if isSysErrIO(err) {
|
||||
@@ -2017,10 +1885,13 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
||||
legacyPreserved = true
|
||||
}
|
||||
} else {
|
||||
s.RLock()
|
||||
formatLegacy := s.formatLegacy
|
||||
s.RUnlock()
|
||||
// It is possible that some drives may not have `xl.meta` file
|
||||
// in such scenarios verify if atleast `part.1` files exist
|
||||
// to verify for legacy version.
|
||||
if s.formatLegacy {
|
||||
if formatLegacy {
|
||||
// We only need this code if we are moving
|
||||
// from `xl.json` to `xl.meta`, we can avoid
|
||||
// one extra readdir operation here for all
|
||||
@@ -2067,7 +1938,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
||||
continue
|
||||
}
|
||||
|
||||
if err = os.Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil {
|
||||
if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
}
|
||||
@@ -2119,26 +1990,13 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
||||
}
|
||||
|
||||
// Remove parent dir of the source file if empty
|
||||
if parentDir := pathutil.Dir(srcFilePath); isDirEmpty(parentDir) {
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
}
|
||||
|
||||
if srcDataPath != "" {
|
||||
if parentDir := pathutil.Dir(srcDataPath); isDirEmpty(parentDir) {
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
}
|
||||
}
|
||||
|
||||
parentDir := pathutil.Dir(srcFilePath)
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RenameFile - rename source path to destination path atomically.
|
||||
func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
srcVolumeDir, err := s.getVolDir(srcVolume)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2148,7 +2006,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
|
||||
return err
|
||||
}
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(srcVolumeDir)
|
||||
_, err = Lstat(srcVolumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
@@ -2157,7 +2015,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
|
||||
}
|
||||
return err
|
||||
}
|
||||
_, err = os.Lstat(dstVolumeDir)
|
||||
_, err = Lstat(dstVolumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
@@ -2185,19 +2043,19 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
|
||||
// If source is a directory, we expect the destination to be non-existent but we
|
||||
// we still need to allow overwriting an empty directory since it represents
|
||||
// an object empty directory.
|
||||
_, err = os.Lstat(dstFilePath)
|
||||
dirInfo, err := Lstat(dstFilePath)
|
||||
if isSysErrIO(err) {
|
||||
return errFaultyDisk
|
||||
}
|
||||
if err == nil && !isDirEmpty(dstFilePath) {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
if err != nil && !osIsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Empty destination remove it before rename.
|
||||
if isDirEmpty(dstFilePath) {
|
||||
if err = os.Remove(dstFilePath); err != nil {
|
||||
if err != nil {
|
||||
if !osIsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !dirInfo.IsDir() {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
if err = Remove(dstFilePath); err != nil {
|
||||
if isSysErrNotEmpty(err) {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
@@ -2211,16 +2069,15 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
|
||||
}
|
||||
|
||||
// Remove parent dir of the source file if empty
|
||||
if parentDir := pathutil.Dir(srcFilePath); isDirEmpty(parentDir) {
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
}
|
||||
parentDir := pathutil.Dir(srcFilePath)
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error {
|
||||
// Open the file for reading.
|
||||
file, err := os.Open(partPath)
|
||||
file, err := Open(partPath)
|
||||
if err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
@@ -2287,18 +2144,13 @@ func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlg
|
||||
}
|
||||
|
||||
func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) {
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat a volume entry.
|
||||
_, err = os.Lstat(volumeDir)
|
||||
_, err = Lstat(volumeDir)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
|
||||
@@ -132,7 +132,9 @@ func newXLStorageTestSetup() (*xlStorageDiskIDCheck, string, error) {
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return &xlStorageDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil
|
||||
disk := newXLStorageDiskIDCheck(storage)
|
||||
disk.diskID = "da017d62-70e3-45f1-8a1a-587707e69ad1"
|
||||
return disk, diskPath, nil
|
||||
}
|
||||
|
||||
// createPermDeniedFile - creates temporary directory and file with path '/mybucket/myobject'
|
||||
@@ -1728,7 +1730,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
// 4) Streaming bitrot check on corrupted file
|
||||
|
||||
// create xlStorage test setup
|
||||
xlStorage, path, err := newXLStorageTestSetup()
|
||||
storage, path, err := newXLStorageTestSetup()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create xlStorage test setup, %s", err)
|
||||
}
|
||||
@@ -1736,7 +1738,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
|
||||
volName := "testvol"
|
||||
fileName := "testfile"
|
||||
if err := xlStorage.MakeVol(context.Background(), volName); err != nil {
|
||||
if err := storage.MakeVol(context.Background(), volName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1750,29 +1752,29 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
h := algo.New()
|
||||
h.Write(data)
|
||||
hashBytes := h.Sum(nil)
|
||||
if err := xlStorage.WriteAll(context.Background(), volName, fileName, data); err != nil {
|
||||
if err := storage.WriteAll(context.Background(), volName, fileName, data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil {
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 2) Whole-file bitrot check on corrupted file
|
||||
if err := xlStorage.AppendFile(context.Background(), volName, fileName, []byte("a")); err != nil {
|
||||
if err := storage.AppendFile(context.Background(), volName, fileName, []byte("a")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check if VerifyFile reports the incorrect file length (the correct length is `size+1`)
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil {
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil {
|
||||
t.Fatal("expected to fail bitrot check")
|
||||
}
|
||||
|
||||
// Check if bitrot fails
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil {
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil {
|
||||
t.Fatal("expected to fail bitrot check")
|
||||
}
|
||||
|
||||
if err := xlStorage.Delete(context.Background(), volName, fileName, false); err != nil {
|
||||
if err := storage.Delete(context.Background(), volName, fileName, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1780,7 +1782,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
algo = HighwayHash256S
|
||||
shardSize := int64(1024 * 1024)
|
||||
shard := make([]byte, shardSize)
|
||||
w := newStreamingBitrotWriter(xlStorage, volName, fileName, size, algo, shardSize)
|
||||
w := newStreamingBitrotWriter(storage, volName, fileName, size, algo, shardSize, false)
|
||||
reader := bytes.NewReader(data)
|
||||
for {
|
||||
// Using io.Copy instead of this loop will not work for us as io.Copy
|
||||
@@ -1796,13 +1798,13 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
w.Close()
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil {
|
||||
w.(io.Closer).Close()
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 4) Streaming bitrot check on corrupted file
|
||||
filePath := pathJoin(xlStorage.String(), volName, fileName)
|
||||
filePath := pathJoin(storage.String(), volName, fileName)
|
||||
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1812,10 +1814,10 @@ func TestXLStorageVerifyFile(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil {
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil {
|
||||
t.Fatal("expected to fail bitrot check")
|
||||
}
|
||||
if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil {
|
||||
if err := storage.storage.(*xlStorage).bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil {
|
||||
t.Fatal("expected to fail bitrot check")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,53 +5,51 @@ Each metric has a label for the server that generated the metric.
|
||||
|
||||
These metrics can be from any MinIO server once per collection.
|
||||
|
||||
| Name | Description |
|
||||
|:---------------------------------------------|:--------------------------------------------------------------------------------------------------------------------|
|
||||
| `minio_bucket_objects_size_distribution` | Distribution of object sizes in the bucket, includes label for the bucket name. |
|
||||
| `minio_bucket_replication_failed_bytes` | Total number of bytes failed at least once to replicate. |
|
||||
| `minio_bucket_replication_pending_bytes` | Total bytes pending to replicate. |
|
||||
| `minio_bucket_replication_received_bytes` | Total number of bytes replicated to this bucket from another source bucket. |
|
||||
| `minio_bucket_replication_sent_bytes` | Total number of bytes replicated to the target bucket. |
|
||||
| `minio_bucket_usage_object_total` | Total number of objects |
|
||||
| `minio_bucket_usage_total_bytes` | Total bucket size in bytes |
|
||||
| `minio_cluster_capacity_raw_free_bytes` | Total free capacity online in the cluster. |
|
||||
| `minio_cluster_capacity_raw_total_bytes` | Total capacity online in the cluster. |
|
||||
| `minio_cluster_capacity_usable_free_bytes` | Total free usable capacity online in the cluster. |
|
||||
| `minio_cluster_capacity_usable_total_bytes` | Total usable capacity online in the cluster. |
|
||||
| `minio_cluster_disk_total` | Total disks. |
|
||||
| `minio_cluster_disk_offline_total` | Total disks offline. |
|
||||
| `minio_cluster_disk_online_total` | Total disks online. |
|
||||
| `minio_cluster_nodes_offline_total` | Total number of MinIO nodes offline. |
|
||||
| `minio_cluster_nodes_online_total` | Total number of MinIO nodes online. |
|
||||
| `minio_heal_objects_error_total` | Objects for which healing failed in current self healing run |
|
||||
| `minio_heal_objects_heal_total` | Objects healed in current self healing run |
|
||||
| `minio_heal_objects_total` | Objects scanned in current self healing run |
|
||||
| `minio_heal_time_last_activity_nano_seconds` | Time elapsed (in nano seconds) since last self healing activity. This is set to -1 until initial self heal activity |
|
||||
| `minio_inter_node_traffic_received_bytes` | Total number of bytes received from other peer nodes. |
|
||||
| `minio_inter_node_traffic_sent_bytes` | Total number of bytes sent to the other peer nodes. |
|
||||
| `minio_node_disk_free_bytes` | Total storage available on a disk. |
|
||||
| `minio_node_disk_total_bytes` | Total storage on a disk. |
|
||||
| `minio_node_disk_used_bytes` | Total storage used on a disk. |
|
||||
| `minio_node_file_descriptor_limit_total` | Limit on total number of open file descriptors for the MinIO Server process. |
|
||||
| `minio_node_file_descriptor_open_total` | Total number of open file descriptors by the MinIO Server process. |
|
||||
| `minio_node_io_rchar_bytes` | Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar |
|
||||
| `minio_node_io_read_bytes` | Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes |
|
||||
| `minio_node_io_wchar_bytes` | Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar |
|
||||
| `minio_node_io_write_bytes` | Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes |
|
||||
| `minio_node_process_starttime_seconds` | Start time for MinIO process per node in seconds. |
|
||||
| `minio_node_syscall_read_total` | Total read SysCalls to the kernel. /proc/[pid]/io syscr |
|
||||
| `minio_node_syscall_write_total` | Total write SysCalls to the kernel. /proc/[pid]/io syscw |
|
||||
| `minio_s3_requests_error_total` | Total number S3 requests with errors |
|
||||
| `minio_s3_requests_inflight_total` | Total number of S3 requests currently in flight. |
|
||||
| `minio_s3_requests_total` | Total number S3 requests |
|
||||
| `minio_s3_time_ttbf_seconds_distribution` | Distribution of the time to first byte across API calls. |
|
||||
| `minio_s3_traffic_received_bytes` | Total number of s3 bytes received. |
|
||||
| `minio_s3_traffic_sent_bytes` | Total number of s3 bytes sent |
|
||||
| `minio_cache_hits_total` | Total number of disk cache hits |
|
||||
| `minio_cache_missed_total` | Total number of disk cache misses |
|
||||
| `minio_cache_sent_bytes` | Total number of bytes served from cache |
|
||||
| `minio_cache_total_bytes` | Total size of cache disk in bytes |
|
||||
| `minio_cache_usage_info` | Total percentage cache usage, value of 1 indicates high and 0 low, label level is set as well |
|
||||
| `minio_cache_used_bytes` | Current cache usage in bytes |
|
||||
| `minio_software_commit_info` | Git commit hash for the MinIO release. |
|
||||
| `minio_software_version_info` | MinIO Release tag for the server |
|
||||
| Name | Description |
|
||||
|:-----------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------|
|
||||
|`minio_bucket_objects_size_distribution` |Distribution of object sizes in the bucket, includes label for the bucket name. |
|
||||
|`minio_bucket_replication_failed_bytes` |Total number of bytes failed at least once to replicate. |
|
||||
|`minio_bucket_replication_pending_bytes` |Total bytes pending to replicate. |
|
||||
|`minio_bucket_replication_received_bytes` |Total number of bytes replicated to this bucket from another source bucket. |
|
||||
|`minio_bucket_replication_sent_bytes` |Total number of bytes replicated to the target bucket. |
|
||||
|`minio_bucket_usage_object_total` |Total number of objects |
|
||||
|`minio_bucket_usage_total_bytes` |Total bucket size in bytes |
|
||||
|`minio_cache_hits_total` |Total number of disk cache hits |
|
||||
|`minio_cache_missed_total` |Total number of disk cache misses |
|
||||
|`minio_cache_sent_bytes` |Total number of bytes served from cache |
|
||||
|`minio_cache_total_bytes` |Total size of cache disk in bytes |
|
||||
|`minio_cache_usage_info` |Total percentage cache usage, value of 1 indicates high and 0 low, label level is set as well |
|
||||
|`minio_cache_used_bytes` |Current cache usage in bytes |
|
||||
|`minio_cluster_capacity_raw_free_bytes` |Total free capacity online in the cluster. |
|
||||
|`minio_cluster_capacity_raw_total_bytes` |Total capacity online in the cluster. |
|
||||
|`minio_cluster_capacity_usable_free_bytes` |Total free usable capacity online in the cluster. |
|
||||
|`minio_cluster_capacity_usable_total_bytes` |Total usable capacity online in the cluster. |
|
||||
|`minio_cluster_nodes_offline_total` |Total number of MinIO nodes offline. |
|
||||
|`minio_cluster_nodes_online_total` |Total number of MinIO nodes online. |
|
||||
|`minio_heal_objects_error_total` |Objects for which healing failed in current self healing run |
|
||||
|`minio_heal_objects_heal_total` |Objects healed in current self healing run |
|
||||
|`minio_heal_objects_total` |Objects scanned in current self healing run |
|
||||
|`minio_heal_time_last_activity_nano_seconds` |Time elapsed (in nano seconds) since last self healing activity. This is set to -1 until initial self heal activity |
|
||||
|`minio_inter_node_traffic_received_bytes` |Total number of bytes received from other peer nodes. |
|
||||
|`minio_inter_node_traffic_sent_bytes` |Total number of bytes sent to the other peer nodes. |
|
||||
|`minio_node_disk_free_bytes` |Total storage available on a disk. |
|
||||
|`minio_node_disk_total_bytes` |Total storage on a disk. |
|
||||
|`minio_node_disk_used_bytes` |Total storage used on a disk. |
|
||||
|`minio_node_file_descriptor_limit_total` |Limit on total number of open file descriptors for the MinIO Server process. |
|
||||
|`minio_node_file_descriptor_open_total` |Total number of open file descriptors by the MinIO Server process. |
|
||||
|`minio_node_io_rchar_bytes` |Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar |
|
||||
|`minio_node_io_read_bytes` |Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes |
|
||||
|`minio_node_io_wchar_bytes` |Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar |
|
||||
|`minio_node_io_write_bytes` |Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes |
|
||||
|`minio_node_process_starttime_seconds` |Start time for MinIO process per node, time in seconds since Unix epoc. |
|
||||
|`minio_node_process_uptime_seconds` |Uptime for MinIO process per node in seconds. |
|
||||
|`minio_node_syscall_read_total` |Total read SysCalls to the kernel. /proc/[pid]/io syscr |
|
||||
|`minio_node_syscall_write_total` |Total write SysCalls to the kernel. /proc/[pid]/io syscw |
|
||||
|`minio_s3_requests_error_total` |Total number S3 requests with errors |
|
||||
|`minio_s3_requests_inflight_total` |Total number of S3 requests currently in flight |
|
||||
|`minio_s3_requests_total` |Total number S3 requests |
|
||||
|`minio_s3_time_ttbf_seconds_distribution` |Distribution of the time to first byte across API calls. |
|
||||
|`minio_s3_traffic_received_bytes` |Total number of s3 bytes received. |
|
||||
|`minio_s3_traffic_sent_bytes` |Total number of s3 bytes sent |
|
||||
|`minio_software_commit_info` |Git commit hash for the MinIO release. |
|
||||
|`minio_software_version_info` |MinIO Release tag for the server |
|
||||
|
||||
@@ -5,7 +5,7 @@ version: '3.7'
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
- data1-2:/data2
|
||||
@@ -22,7 +22,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio2:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
volumes:
|
||||
- data2-1:/data1
|
||||
- data2-2:/data2
|
||||
@@ -39,7 +39,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio3:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
volumes:
|
||||
- data3-1:/data1
|
||||
- data3-2:/data2
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio4:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
volumes:
|
||||
- data4-1:/data1
|
||||
- data4-2:/data2
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.7'
|
||||
|
||||
services:
|
||||
minio1:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- minio1-data:/export
|
||||
@@ -29,7 +29,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio2:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- minio2-data:/export
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio3:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- minio3-data:/export
|
||||
@@ -83,7 +83,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio4:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- minio4-data:/export
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.7'
|
||||
|
||||
services:
|
||||
minio1:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- minio1-data:/export
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio2:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- minio2-data:/export
|
||||
@@ -64,7 +64,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio3:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- minio3-data:/export
|
||||
@@ -95,7 +95,7 @@ services:
|
||||
retries: 3
|
||||
|
||||
minio4:
|
||||
image: minio/minio:RELEASE.2021-03-12T00-00-47Z
|
||||
image: minio/minio:RELEASE.2021-04-06T23-11-00Z
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- minio4-data:/export
|
||||
|
||||
5
go.mod
5
go.mod
@@ -9,6 +9,7 @@ require (
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.1 // indirect
|
||||
github.com/Shopify/sarama v1.27.2
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/alecthomas/participle v0.2.1
|
||||
github.com/bcicen/jstream v1.0.1
|
||||
github.com/beevik/ntp v0.3.0
|
||||
@@ -44,7 +45,7 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.12
|
||||
github.com/miekg/dns v1.1.35
|
||||
github.com/minio/cli v1.22.0
|
||||
github.com/minio/highwayhash v1.0.1
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/minio/md5-simd v1.1.1 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78
|
||||
github.com/minio/selfupdate v0.3.1
|
||||
@@ -73,7 +74,7 @@ require (
|
||||
github.com/shirou/gopsutil/v3 v3.21.1
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/streadway/amqp v1.0.0
|
||||
github.com/tidwall/gjson v1.6.7
|
||||
github.com/tidwall/gjson v1.6.8
|
||||
github.com/tidwall/sjson v1.0.4
|
||||
github.com/tinylib/msgp v1.1.3
|
||||
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 // indirect
|
||||
|
||||
11
go.sum
11
go.sum
@@ -41,6 +41,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/participle v0.2.1 h1:4AVLj1viSGa4LG5HDXKXrm5xRx19SB/rS/skPQB1Grw=
|
||||
@@ -331,7 +333,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
@@ -391,8 +392,8 @@ github.com/miekg/dns v1.1.35 h1:oTfOaDH+mZkdcgdIjH6yBajRGtIwcwcaR+rt23ZSrJs=
|
||||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/minio/cli v1.22.0 h1:VTQm7lmXm3quxO917X3p+el1l0Ca5X3S4PM2ruUYO68=
|
||||
github.com/minio/cli v1.22.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
|
||||
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/md5-simd v1.1.1 h1:9ojcLbuZ4gXbB2sX53MKn8JUZ0sB/2wfwsEcRw+I08U=
|
||||
github.com/minio/md5-simd v1.1.1/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
@@ -583,8 +584,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tidwall/gjson v1.6.7 h1:Mb1M9HZCRWEcXQ8ieJo7auYyyiSux6w9XN3AdTpxJrE=
|
||||
github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI=
|
||||
github.com/tidwall/gjson v1.6.8 h1:CTmXMClGYPAmln7652e69B7OLXfTi5ABcPPwjIWUv7w=
|
||||
github.com/tidwall/gjson v1.6.8/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI=
|
||||
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
||||
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU=
|
||||
|
||||
73
main_test.go
Normal file
73
main_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// +build testrunmain
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
_ "github.com/minio/minio/cmd/gateway"
|
||||
)
|
||||
|
||||
// TestRunMain takes arguments from APP_ARGS env variable and calls minio.Main(args)
|
||||
// 1. Build and RUN test executable:
|
||||
// $ go test -tags testrunmain -covermode count -coverpkg="./..." -c -tags testrunmain
|
||||
// $ APP_ARGS="server /tmp/test" ./minio.test -test.run "^TestRunMain$" -test.coverprofile coverage.cov
|
||||
//
|
||||
// 1. As an alternative you can also run the system under test by just by calling "go test"
|
||||
// $ APP_ARGS="server /tmp/test" go test -cover -tags testrunmain -covermode count -coverpkg="./..." -coverprofile=coverage.cov
|
||||
//
|
||||
// 2. Run System-Tests (when using GitBash prefix this line with MSYS_NO_PATHCONV=1)
|
||||
// Note the the SERVER_ENDPOINT must be reachable from inside the docker container (so don't use localhost!)
|
||||
// $ docker run -e MINT_MODE=full -e SERVER_ENDPOINT=192.168.47.11:9000 -e ACCESS_KEY=minioadmin -e SECRET_KEY=minioadmin -v /tmp/mint/log:/mint/log minio/mint
|
||||
//
|
||||
// 3. Stop system under test by sending SIGTERM
|
||||
// $ ctrl+c
|
||||
//
|
||||
// 4. Optionally transform coverage file to HTML
|
||||
// $ go tool cover -html=./coverage.cov -o coverage.html
|
||||
//
|
||||
// 5. Optionally transform the coverage file to .csv
|
||||
// $ cat coverage.cov | sed -E 's/mode: .*/source;from;to;stmnts;count/g' | sed -E 's/:| |,/;/g' > coverage.csv
|
||||
func TestRunMain(t *testing.T) {
|
||||
cancelChan := make(chan os.Signal, 1)
|
||||
// catch SIGETRM or SIGINTERRUPT. The test must gracefully end to complete the test coverage.
|
||||
signal.Notify(cancelChan, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
// start minio server with params from env variable APP_ARGS
|
||||
args := os.Getenv("APP_ARGS")
|
||||
if args == "" {
|
||||
log.Printf("No environment variable APP_ARGS found. Starting minio without parameters ...")
|
||||
} else {
|
||||
log.Printf("Starting \"minio %v\" ...", args)
|
||||
}
|
||||
minio.Main(strings.Split("minio.test "+args, " "))
|
||||
}()
|
||||
sig := <-cancelChan
|
||||
log.Printf("Caught SIGTERM %v", sig)
|
||||
log.Print("You might want to transform the coverage.cov file to .html by calling:")
|
||||
log.Print("$ go tool cover -html=./coverage.cov -o coverage.html")
|
||||
// shutdown other goroutines gracefully
|
||||
// close other resources
|
||||
}
|
||||
@@ -2,6 +2,7 @@ git
|
||||
python3-pip
|
||||
nodejs
|
||||
openjdk-8-jdk
|
||||
openjdk-8-jdk-headless
|
||||
dirmngr
|
||||
apt-transport-https
|
||||
dotnet-sdk-2.1
|
||||
|
||||
@@ -123,8 +123,12 @@ func (m *Monitor) getReport(selectBucket SelectionFunction) *bandwidth.Report {
|
||||
if !selectBucket(bucket) {
|
||||
continue
|
||||
}
|
||||
bucketThrottle, ok := m.bucketThrottle[bucket]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
report.BucketStats[bucket] = bandwidth.Details{
|
||||
LimitInBytesPerSecond: m.bucketThrottle[bucket].clusterBandwidth,
|
||||
LimitInBytesPerSecond: bucketThrottle.clusterBandwidth,
|
||||
CurrentBandwidthInBytesPerSecond: bucketMeasurement.getExpMovingAvgBytesPerSecond(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,62 +25,61 @@ import (
|
||||
|
||||
// MonitoredReader monitors the bandwidth
|
||||
type MonitoredReader struct {
|
||||
bucket string // Token to track bucket
|
||||
opts *MonitorReaderOptions
|
||||
bucketMeasurement *bucketMeasurement // bucket measurement object
|
||||
object string // Token to track object
|
||||
reader io.ReadCloser // Reader to wrap
|
||||
reader io.Reader // Reader to wrap
|
||||
lastStop time.Time // Last timestamp for a measurement
|
||||
headerSize int // Size of the header not captured by reader
|
||||
throttle *throttle // throttle the rate at which replication occur
|
||||
monitor *Monitor // Monitor reference
|
||||
closed bool // Reader is closed
|
||||
lastErr error // last error reported, if this non-nil all reads will fail.
|
||||
}
|
||||
|
||||
// NewMonitoredReader returns a io.ReadCloser that reports bandwidth details.
|
||||
// The supplied reader will be closed.
|
||||
func NewMonitoredReader(ctx context.Context, monitor *Monitor, bucket string, object string, reader io.ReadCloser, headerSize int, bandwidthBytesPerSecond int64, clusterBandwidth int64) *MonitoredReader {
|
||||
// MonitorReaderOptions provides configurable options for monitor reader implementation.
|
||||
type MonitorReaderOptions struct {
|
||||
Bucket string
|
||||
Object string
|
||||
HeaderSize int
|
||||
BandwidthBytesPerSec int64
|
||||
ClusterBandwidth int64
|
||||
}
|
||||
|
||||
// NewMonitoredReader returns a io.Reader that reports bandwidth details.
|
||||
func NewMonitoredReader(ctx context.Context, monitor *Monitor, reader io.Reader, opts *MonitorReaderOptions) *MonitoredReader {
|
||||
timeNow := time.Now()
|
||||
b := monitor.track(bucket, object, timeNow)
|
||||
b := monitor.track(opts.Bucket, opts.Object, timeNow)
|
||||
return &MonitoredReader{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
opts: opts,
|
||||
bucketMeasurement: b,
|
||||
reader: reader,
|
||||
lastStop: timeNow,
|
||||
headerSize: headerSize,
|
||||
throttle: monitor.throttleBandwidth(ctx, bucket, bandwidthBytesPerSecond, clusterBandwidth),
|
||||
throttle: monitor.throttleBandwidth(ctx, opts.Bucket, opts.BandwidthBytesPerSec, opts.ClusterBandwidth),
|
||||
monitor: monitor,
|
||||
}
|
||||
}
|
||||
|
||||
// Read wraps the read reader
|
||||
func (m *MonitoredReader) Read(p []byte) (n int, err error) {
|
||||
if m.closed {
|
||||
err = io.ErrClosedPipe
|
||||
if m.lastErr != nil {
|
||||
err = m.lastErr
|
||||
return
|
||||
}
|
||||
|
||||
p = p[:m.throttle.GetLimitForBytes(int64(len(p)))]
|
||||
|
||||
n, err = m.reader.Read(p)
|
||||
stop := time.Now()
|
||||
update := uint64(n + m.headerSize)
|
||||
update := uint64(n + m.opts.HeaderSize)
|
||||
|
||||
m.bucketMeasurement.incrementBytes(update)
|
||||
m.lastStop = stop
|
||||
unused := len(p) - (n + m.headerSize)
|
||||
m.headerSize = 0 // Set to 0 post first read
|
||||
unused := len(p) - (n + m.opts.HeaderSize)
|
||||
m.opts.HeaderSize = 0 // Set to 0 post first read
|
||||
|
||||
if unused > 0 {
|
||||
m.throttle.ReleaseUnusedBandwidth(int64(unused))
|
||||
}
|
||||
if err != nil {
|
||||
m.lastErr = err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close stops tracking the io
|
||||
func (m *MonitoredReader) Close() error {
|
||||
if m.closed {
|
||||
return nil
|
||||
}
|
||||
m.closed = true
|
||||
return m.reader.Close()
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -66,6 +67,41 @@ type Lifecycle struct {
|
||||
Rules []Rule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
switch start.Name.Local {
|
||||
case "LifecycleConfiguration", "BucketLifecycleConfiguration":
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <LifecycleConfiguration>/<BucketLifecycleConfiguration> but have <%s>",
|
||||
start.Name.Local))
|
||||
}
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "Rule":
|
||||
var r Rule
|
||||
if err = d.DecodeElement(&r, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
lc.Rules = append(lc.Rules, r)
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <Rule> but have <%s>", se.Name.Local))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasActiveRules - returns whether policy has active rules for.
|
||||
// Optionally a prefix can be supplied.
|
||||
// If recursive is specified the function will also return true if any level below the
|
||||
|
||||
@@ -336,6 +336,13 @@ func TestComputeActions(t *testing.T) {
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Should accept BucketLifecycleConfiguration root tag
|
||||
{
|
||||
inputConfig: `<BucketLifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></BucketLifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -489,6 +489,41 @@ type ObjectLegalHold struct {
|
||||
Status LegalHoldStatus `xml:"Status,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (l *ObjectLegalHold) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
switch start.Name.Local {
|
||||
case "LegalHold", "ObjectLockLegalHold":
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <LegalHold>/<ObjectLockLegalHold> but have <%s>",
|
||||
start.Name.Local))
|
||||
}
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "Status":
|
||||
var st LegalHoldStatus
|
||||
if err = d.DecodeElement(&st, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Status = st
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <Status> but have <%s>", se.Name.Local))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEmpty returns true if struct is empty
|
||||
func (l *ObjectLegalHold) IsEmpty() bool {
|
||||
return !l.Status.Valid()
|
||||
|
||||
@@ -18,6 +18,7 @@ package lock
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
@@ -467,6 +468,23 @@ func TestParseObjectLegalHold(t *testing.T) {
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><ObjectLockLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></ObjectLockLegalHold>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
// invalid Status key
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><ObjectLockLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><MyStatus>ON</MyStatus></ObjectLockLegalHold>`,
|
||||
expectedErr: errors.New("expected element type <Status> but have <MyStatus>"),
|
||||
expectErr: true,
|
||||
},
|
||||
// invalid XML attr
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><UnknownLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></UnknownLegalHold>`,
|
||||
expectedErr: errors.New("expected element type <LegalHold>/<ObjectLockLegalHold> but have <UnknownLegalHold>"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>On</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
|
||||
@@ -110,10 +110,18 @@ const (
|
||||
|
||||
// AWSUsername - user friendly name, in MinIO this value is same as your user Access Key.
|
||||
AWSUsername Key = "aws:username"
|
||||
|
||||
// S3SignatureVersion - identifies the version of AWS Signature that you want to support for authenticated requests.
|
||||
S3SignatureVersion = "s3:signatureversion"
|
||||
|
||||
// S3AuthType - optionally use this condition key to restrict incoming requests to use a specific authentication method.
|
||||
S3AuthType = "s3:authType"
|
||||
)
|
||||
|
||||
// AllSupportedKeys - is list of all all supported keys.
|
||||
var AllSupportedKeys = append([]Key{
|
||||
S3SignatureVersion,
|
||||
S3AuthType,
|
||||
S3XAmzCopySource,
|
||||
S3XAmzServerSideEncryption,
|
||||
S3XAmzServerSideEncryptionCustomerAlgorithm,
|
||||
@@ -144,6 +152,8 @@ var AllSupportedKeys = append([]Key{
|
||||
|
||||
// CommonKeys - is list of all common condition keys.
|
||||
var CommonKeys = append([]Key{
|
||||
S3SignatureVersion,
|
||||
S3AuthType,
|
||||
S3XAmzContentSha256,
|
||||
S3LocationConstraint,
|
||||
AWSReferer,
|
||||
|
||||
@@ -27,6 +27,7 @@ const DefaultVersion = "2012-10-17"
|
||||
// Args - arguments to policy to check whether it is allowed
|
||||
type Args struct {
|
||||
AccountName string `json:"account"`
|
||||
Groups []string `json:"groups"`
|
||||
Action Action `json:"action"`
|
||||
BucketName string `json:"bucket"`
|
||||
ConditionValues map[string][]string `json:"conditions"`
|
||||
|
||||
@@ -220,11 +220,17 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
|
||||
refreshTimer := time.NewTimer(drwMutexRefreshInterval)
|
||||
defer refreshTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.NewTimer(drwMutexRefreshInterval).C:
|
||||
case <-refreshTimer.C:
|
||||
refreshTimer.Reset(drwMutexRefreshInterval)
|
||||
|
||||
refreshed, err := refresh(ctx, dm.clnt, id, source, quorum, dm.Names...)
|
||||
if err == nil && !refreshed {
|
||||
if lockLossCallback != nil {
|
||||
|
||||
@@ -31,6 +31,7 @@ const DefaultVersion = "2012-10-17"
|
||||
// Args - arguments to policy to check whether it is allowed
|
||||
type Args struct {
|
||||
AccountName string `json:"account"`
|
||||
Groups []string `json:"groups"`
|
||||
Action Action `json:"action"`
|
||||
BucketName string `json:"bucket"`
|
||||
ConditionValues map[string][]string `json:"conditions"`
|
||||
|
||||
@@ -19,8 +19,11 @@
|
||||
package ioutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
)
|
||||
@@ -63,6 +66,56 @@ func WriteOnClose(w io.Writer) *WriteOnCloser {
|
||||
return &WriteOnCloser{w, false}
|
||||
}
|
||||
|
||||
type ioret struct {
|
||||
n int
|
||||
err error
|
||||
}
|
||||
|
||||
// DeadlineWriter deadline writer with context
|
||||
type DeadlineWriter struct {
|
||||
io.WriteCloser
|
||||
timeout time.Duration
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDeadlineWriter wraps a writer to make it respect given deadline
|
||||
// value per Write(). If there is a blocking write, the returned Writer
|
||||
// will return whenever the timer hits (the return values are n=0
|
||||
// and err=context.Canceled.)
|
||||
func NewDeadlineWriter(w io.WriteCloser, timeout time.Duration) io.WriteCloser {
|
||||
return &DeadlineWriter{WriteCloser: w, timeout: timeout}
|
||||
}
|
||||
|
||||
func (w *DeadlineWriter) Write(buf []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
c := make(chan ioret, 1)
|
||||
t := time.NewTimer(w.timeout)
|
||||
defer t.Stop()
|
||||
|
||||
go func() {
|
||||
n, err := w.WriteCloser.Write(buf)
|
||||
c <- ioret{n, err}
|
||||
close(c)
|
||||
}()
|
||||
|
||||
select {
|
||||
case r := <-c:
|
||||
w.err = r.err
|
||||
return r.n, r.err
|
||||
case <-t.C:
|
||||
w.err = context.Canceled
|
||||
return 0, context.Canceled
|
||||
}
|
||||
}
|
||||
|
||||
// Close closer interface to close the underlying closer
|
||||
func (w *DeadlineWriter) Close() error {
|
||||
return w.WriteCloser.Close()
|
||||
}
|
||||
|
||||
// LimitWriter implements io.WriteCloser.
|
||||
//
|
||||
// This is implemented such that we want to restrict
|
||||
@@ -188,33 +241,15 @@ const directioAlignSize = 4096
|
||||
// the file opened for writes with syscall.O_DIRECT flag.
|
||||
func CopyAligned(w *os.File, r io.Reader, alignedBuf []byte, totalSize int64) (int64, error) {
|
||||
// Writes remaining bytes in the buffer.
|
||||
writeUnaligned := func(w *os.File, buf []byte) (remainingWritten int, err error) {
|
||||
var n int
|
||||
remaining := len(buf)
|
||||
// The following logic writes the remainging data such that it writes whatever best is possible (aligned buffer)
|
||||
// in O_DIRECT mode and remaining (unaligned buffer) in non-O_DIRECT mode.
|
||||
remainingAligned := (remaining / directioAlignSize) * directioAlignSize
|
||||
remainingAlignedBuf := buf[:remainingAligned]
|
||||
remainingUnalignedBuf := buf[remainingAligned:]
|
||||
if len(remainingAlignedBuf) > 0 {
|
||||
n, err = w.Write(remainingAlignedBuf)
|
||||
if err != nil {
|
||||
return remainingWritten, err
|
||||
}
|
||||
remainingWritten += n
|
||||
writeUnaligned := func(w *os.File, buf []byte) (remainingWritten int64, err error) {
|
||||
// Disable O_DIRECT on fd's on unaligned buffer
|
||||
// perform an amortized Fdatasync(fd) on the fd at
|
||||
// the end, this is performed by the caller before
|
||||
// closing 'w'.
|
||||
if err = disk.DisableDirectIO(w); err != nil {
|
||||
return remainingWritten, err
|
||||
}
|
||||
if len(remainingUnalignedBuf) > 0 {
|
||||
// Write on O_DIRECT fds fail if buffer is not 4K aligned, hence disable O_DIRECT.
|
||||
if err = disk.DisableDirectIO(w); err != nil {
|
||||
return remainingWritten, err
|
||||
}
|
||||
n, err = w.Write(remainingUnalignedBuf)
|
||||
if err != nil {
|
||||
return remainingWritten, err
|
||||
}
|
||||
remainingWritten += n
|
||||
}
|
||||
return remainingWritten, nil
|
||||
return io.Copy(w, bytes.NewReader(buf))
|
||||
}
|
||||
|
||||
var written int64
|
||||
@@ -232,21 +267,23 @@ func CopyAligned(w *os.File, r io.Reader, alignedBuf []byte, totalSize int64) (i
|
||||
return written, err
|
||||
}
|
||||
buf = buf[:nr]
|
||||
var nw int
|
||||
var nw int64
|
||||
if len(buf)%directioAlignSize == 0 {
|
||||
var n int
|
||||
// buf is aligned for directio write()
|
||||
nw, err = w.Write(buf)
|
||||
n, err = w.Write(buf)
|
||||
nw = int64(n)
|
||||
} else {
|
||||
// buf is not aligned, hence use writeUnaligned()
|
||||
nw, err = writeUnaligned(w, buf)
|
||||
}
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
written += nw
|
||||
}
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if nw != len(buf) {
|
||||
if nw != int64(len(buf)) {
|
||||
return written, io.ErrShortWrite
|
||||
}
|
||||
|
||||
|
||||
@@ -18,12 +18,49 @@ package ioutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
goioutil "io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type sleepWriter struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (w *sleepWriter) Write(p []byte) (n int, err error) {
|
||||
time.Sleep(w.timeout)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *sleepWriter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDeadlineWriter(t *testing.T) {
|
||||
w := NewDeadlineWriter(&sleepWriter{timeout: 500 * time.Millisecond}, 450*time.Millisecond)
|
||||
_, err := w.Write([]byte("1"))
|
||||
w.Close()
|
||||
if err != context.Canceled {
|
||||
t.Error("DeadlineWriter shouldn't be successful - should return context.Canceled")
|
||||
}
|
||||
_, err = w.Write([]byte("1"))
|
||||
if err != context.Canceled {
|
||||
t.Error("DeadlineWriter shouldn't be successful - should return context.Canceled")
|
||||
}
|
||||
w = NewDeadlineWriter(&sleepWriter{timeout: 100 * time.Millisecond}, 600*time.Millisecond)
|
||||
n, err := w.Write([]byte("abcd"))
|
||||
w.Close()
|
||||
if err != nil {
|
||||
t.Errorf("DeadlineWriter should succeed but failed with %s", err)
|
||||
}
|
||||
if n != 4 {
|
||||
t.Errorf("DeadlineWriter should succeed but should have only written 4 bytes, but returned %d instead", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseOnWriter(t *testing.T) {
|
||||
writer := WriteOnClose(goioutil.Discard)
|
||||
if writer.HasWritten() {
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
@@ -340,7 +341,6 @@ var successStatus = []int{
|
||||
// delayed manner using a standard back off algorithm.
|
||||
func (adm AdminClient) executeMethod(ctx context.Context, method string, reqData requestData) (res *http.Response, err error) {
|
||||
var reqRetry = MaxRetry // Indicates how many times we can retry the request
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// close idle connections before returning, upon error.
|
||||
@@ -365,6 +365,10 @@ func (adm AdminClient) executeMethod(ctx context.Context, method string, reqData
|
||||
// Initiate the request.
|
||||
res, err = adm.do(req)
|
||||
if err != nil {
|
||||
// Give up right away if it is a connection refused problem
|
||||
if errors.Is(err, syscall.ECONNREFUSED) {
|
||||
return nil, err
|
||||
}
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -256,18 +256,6 @@ func (adm *AdminClient) ServerHealthInfo(ctx context.Context, healthDataTypes []
|
||||
var healthInfoMessage HealthInfo
|
||||
healthInfoMessage.TimeStamp = time.Now()
|
||||
|
||||
if v.Get(string(HealthDataTypeMinioInfo)) == "true" {
|
||||
info, err := adm.ServerInfo(ctx)
|
||||
if err != nil {
|
||||
respChan <- HealthInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
return
|
||||
}
|
||||
healthInfoMessage.Minio.Info = info
|
||||
respChan <- healthInfoMessage
|
||||
}
|
||||
|
||||
resp, err := adm.executeMethod(ctx, "GET", requestData{
|
||||
relPath: adminAPIPrefix + "/healthinfo",
|
||||
queryValues: v,
|
||||
@@ -308,10 +296,22 @@ func (adm *AdminClient) ServerHealthInfo(ctx context.Context, healthDataTypes []
|
||||
}
|
||||
|
||||
respChan <- healthInfoMessage
|
||||
|
||||
if v.Get(string(HealthDataTypeMinioInfo)) == "true" {
|
||||
info, err := adm.ServerInfo(ctx)
|
||||
if err != nil {
|
||||
respChan <- HealthInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
return
|
||||
}
|
||||
healthInfoMessage.Minio.Info = info
|
||||
respChan <- healthInfoMessage
|
||||
}
|
||||
|
||||
close(respChan)
|
||||
}()
|
||||
return respChan
|
||||
|
||||
}
|
||||
|
||||
// GetTotalCapacity gets the total capacity a server holds.
|
||||
|
||||
@@ -291,6 +291,14 @@ type ServerProperties struct {
|
||||
PoolNumber int `json:"poolNumber,omitempty"`
|
||||
}
|
||||
|
||||
// DiskMetrics has the information about XL Storage APIs
|
||||
// the number of calls of each API and the moving average of
|
||||
// the duration of each API.
|
||||
type DiskMetrics struct {
|
||||
APILatencies map[string]string `json:"apiLatencies,omitempty"`
|
||||
APICalls map[string]uint64 `json:"apiCalls,omitempty"`
|
||||
}
|
||||
|
||||
// Disk holds Disk information
|
||||
type Disk struct {
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
@@ -308,6 +316,7 @@ type Disk struct {
|
||||
ReadLatency float64 `json:"readlatency,omitempty"`
|
||||
WriteLatency float64 `json:"writelatency,omitempty"`
|
||||
Utilization float64 `json:"utilization,omitempty"`
|
||||
Metrics *DiskMetrics `json:"metrics,omitempty"`
|
||||
HealInfo *HealingDisk `json:"heal_info,omitempty"`
|
||||
|
||||
// Indexes, will be -1 until assigned a set.
|
||||
|
||||
@@ -378,6 +378,31 @@ func TestJSONQueries(t *testing.T) {
|
||||
query: `SELECT date_diff(day, '2010-01-01T23:00:00Z', '2010-01-02T23:00:00Z') FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":1}`,
|
||||
},
|
||||
{
|
||||
name: "cast_from_int_to_float",
|
||||
query: `SELECT cast(1 as float) FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":1}`,
|
||||
},
|
||||
{
|
||||
name: "cast_from_float_to_float",
|
||||
query: `SELECT cast(1.0 as float) FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":1}`,
|
||||
},
|
||||
{
|
||||
name: "arithmetic_integer_operand",
|
||||
query: `SELECT 1 / 2 FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":0}`,
|
||||
},
|
||||
{
|
||||
name: "arithmetic_float_operand",
|
||||
query: `SELECT 1.0 / 2.0 * .3 FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":0.15}`,
|
||||
},
|
||||
{
|
||||
name: "arithmetic_integer_float_operand",
|
||||
query: `SELECT 3.0 / 2, 5 / 2.0 FROM S3Object LIMIT 1`,
|
||||
wantResult: `{"_1":1.5,"_2":2.5}`,
|
||||
},
|
||||
}
|
||||
|
||||
defRequest := `<?xml version="1.0" encoding="UTF-8"?>
|
||||
@@ -714,6 +739,152 @@ func TestCSVQueries2(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCSVQueries3(t *testing.T) {
|
||||
input := `na.me,qty,CAST
|
||||
apple,1,true
|
||||
mango,3,false
|
||||
`
|
||||
var testTable = []struct {
|
||||
name string
|
||||
query string
|
||||
requestXML []byte // override request XML
|
||||
wantResult string
|
||||
}{
|
||||
{
|
||||
name: "Select a column containing dot",
|
||||
query: `select "na.me" from S3Object s`,
|
||||
wantResult: `apple
|
||||
mango`,
|
||||
},
|
||||
{
|
||||
name: "Select column containing dot with table name prefix",
|
||||
query: `select count(S3Object."na.me") from S3Object`,
|
||||
wantResult: `2`,
|
||||
},
|
||||
{
|
||||
name: "Select column containing dot with table alias prefix",
|
||||
query: `select s."na.me" from S3Object as s`,
|
||||
wantResult: `apple
|
||||
mango`,
|
||||
},
|
||||
{
|
||||
name: "Select column simplest",
|
||||
query: `select qty from S3Object`,
|
||||
wantResult: `1
|
||||
3`,
|
||||
},
|
||||
{
|
||||
name: "Select column with table name prefix",
|
||||
query: `select S3Object.qty from S3Object`,
|
||||
wantResult: `1
|
||||
3`,
|
||||
},
|
||||
{
|
||||
name: "Select column without table alias",
|
||||
query: `select qty from S3Object s`,
|
||||
wantResult: `1
|
||||
3`,
|
||||
},
|
||||
{
|
||||
name: "Select column with table alias",
|
||||
query: `select s.qty from S3Object s`,
|
||||
wantResult: `1
|
||||
3`,
|
||||
},
|
||||
{
|
||||
name: "Select reserved word column",
|
||||
query: `select "CAST" from s3object`,
|
||||
wantResult: `true
|
||||
false`,
|
||||
},
|
||||
{
|
||||
name: "Select reserved word column with table alias",
|
||||
query: `select S3Object."CAST" from s3object`,
|
||||
wantResult: `true
|
||||
false`,
|
||||
},
|
||||
{
|
||||
name: "Select reserved word column with unused table alias",
|
||||
query: `select "CAST" from s3object s`,
|
||||
wantResult: `true
|
||||
false`,
|
||||
},
|
||||
{
|
||||
name: "Select reserved word column with table alias",
|
||||
query: `select s."CAST" from s3object s`,
|
||||
wantResult: `true
|
||||
false`,
|
||||
},
|
||||
{
|
||||
name: "Select reserved word column with table alias",
|
||||
query: `select NOT CAST(s."CAST" AS Bool) from s3object s`,
|
||||
wantResult: `false
|
||||
true`,
|
||||
},
|
||||
}
|
||||
|
||||
defRequest := `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<SelectObjectContentRequest>
|
||||
<Expression>%s</Expression>
|
||||
<ExpressionType>SQL</ExpressionType>
|
||||
<InputSerialization>
|
||||
<CompressionType>NONE</CompressionType>
|
||||
<CSV>
|
||||
<FileHeaderInfo>USE</FileHeaderInfo>
|
||||
<QuoteCharacter>"</QuoteCharacter>
|
||||
</CSV>
|
||||
</InputSerialization>
|
||||
<OutputSerialization>
|
||||
<CSV/>
|
||||
</OutputSerialization>
|
||||
<RequestProgress>
|
||||
<Enabled>FALSE</Enabled>
|
||||
</RequestProgress>
|
||||
</SelectObjectContentRequest>`
|
||||
|
||||
for _, testCase := range testTable {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testReq := testCase.requestXML
|
||||
if len(testReq) == 0 {
|
||||
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBufferString(input)), nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
w := &testResponseWriter{}
|
||||
s3Select.Evaluate(w)
|
||||
s3Select.Close()
|
||||
resp := http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
|
||||
ContentLength: int64(len(w.response)),
|
||||
}
|
||||
res, err := minio.NewSelectResults(&resp, "testbucket")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
got, err := ioutil.ReadAll(res)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
gotS := strings.TrimSpace(string(got))
|
||||
if gotS != testCase.wantResult {
|
||||
t.Errorf("received response does not match with expected reply.\nQuery: %s\n=====\ngot: %s\n=====\nwant: %s\n=====\n", testCase.query, gotS, testCase.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCSVInput(t *testing.T) {
|
||||
var testTable = []struct {
|
||||
requestXML []byte
|
||||
|
||||
@@ -63,7 +63,7 @@ func newAggVal(fn FuncName) *aggVal {
|
||||
// current row and stores the result.
|
||||
//
|
||||
// On success, it returns (nil, nil).
|
||||
func (e *FuncExpr) evalAggregationNode(r Record) error {
|
||||
func (e *FuncExpr) evalAggregationNode(r Record, tableAlias string) error {
|
||||
// It is assumed that this function is called only when
|
||||
// `e` is an aggregation function.
|
||||
|
||||
@@ -77,13 +77,13 @@ func (e *FuncExpr) evalAggregationNode(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
val, err = e.Count.ExprArg.evalNode(r)
|
||||
val, err = e.Count.ExprArg.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Evaluate the (only) argument
|
||||
val, err = e.SFunc.ArgsList[0].evalNode(r)
|
||||
val, err = e.SFunc.ArgsList[0].evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -149,13 +149,13 @@ func (e *FuncExpr) evalAggregationNode(r Record) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *AliasedExpression) aggregateRow(r Record) error {
|
||||
return e.Expression.aggregateRow(r)
|
||||
func (e *AliasedExpression) aggregateRow(r Record, tableAlias string) error {
|
||||
return e.Expression.aggregateRow(r, tableAlias)
|
||||
}
|
||||
|
||||
func (e *Expression) aggregateRow(r Record) error {
|
||||
func (e *Expression) aggregateRow(r Record, tableAlias string) error {
|
||||
for _, ex := range e.And {
|
||||
err := ex.aggregateRow(r)
|
||||
err := ex.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,9 +163,9 @@ func (e *Expression) aggregateRow(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *ListExpr) aggregateRow(r Record) error {
|
||||
func (e *ListExpr) aggregateRow(r Record, tableAlias string) error {
|
||||
for _, ex := range e.Elements {
|
||||
err := ex.aggregateRow(r)
|
||||
err := ex.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -173,9 +173,9 @@ func (e *ListExpr) aggregateRow(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *AndCondition) aggregateRow(r Record) error {
|
||||
func (e *AndCondition) aggregateRow(r Record, tableAlias string) error {
|
||||
for _, ex := range e.Condition {
|
||||
err := ex.aggregateRow(r)
|
||||
err := ex.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -183,15 +183,15 @@ func (e *AndCondition) aggregateRow(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Condition) aggregateRow(r Record) error {
|
||||
func (e *Condition) aggregateRow(r Record, tableAlias string) error {
|
||||
if e.Operand != nil {
|
||||
return e.Operand.aggregateRow(r)
|
||||
return e.Operand.aggregateRow(r, tableAlias)
|
||||
}
|
||||
return e.Not.aggregateRow(r)
|
||||
return e.Not.aggregateRow(r, tableAlias)
|
||||
}
|
||||
|
||||
func (e *ConditionOperand) aggregateRow(r Record) error {
|
||||
err := e.Operand.aggregateRow(r)
|
||||
func (e *ConditionOperand) aggregateRow(r Record, tableAlias string) error {
|
||||
err := e.Operand.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -202,38 +202,38 @@ func (e *ConditionOperand) aggregateRow(r Record) error {
|
||||
|
||||
switch {
|
||||
case e.ConditionRHS.Compare != nil:
|
||||
return e.ConditionRHS.Compare.Operand.aggregateRow(r)
|
||||
return e.ConditionRHS.Compare.Operand.aggregateRow(r, tableAlias)
|
||||
case e.ConditionRHS.Between != nil:
|
||||
err = e.ConditionRHS.Between.Start.aggregateRow(r)
|
||||
err = e.ConditionRHS.Between.Start.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.ConditionRHS.Between.End.aggregateRow(r)
|
||||
return e.ConditionRHS.Between.End.aggregateRow(r, tableAlias)
|
||||
case e.ConditionRHS.In != nil:
|
||||
elt := e.ConditionRHS.In.ListExpression
|
||||
err = elt.aggregateRow(r)
|
||||
err = elt.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case e.ConditionRHS.Like != nil:
|
||||
err = e.ConditionRHS.Like.Pattern.aggregateRow(r)
|
||||
err = e.ConditionRHS.Like.Pattern.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.ConditionRHS.Like.EscapeChar.aggregateRow(r)
|
||||
return e.ConditionRHS.Like.EscapeChar.aggregateRow(r, tableAlias)
|
||||
default:
|
||||
return errInvalidASTNode
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Operand) aggregateRow(r Record) error {
|
||||
err := e.Left.aggregateRow(r)
|
||||
func (e *Operand) aggregateRow(r Record, tableAlias string) error {
|
||||
err := e.Left.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rt := range e.Right {
|
||||
err = rt.Right.aggregateRow(r)
|
||||
err = rt.Right.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -241,13 +241,13 @@ func (e *Operand) aggregateRow(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *MultOp) aggregateRow(r Record) error {
|
||||
err := e.Left.aggregateRow(r)
|
||||
func (e *MultOp) aggregateRow(r Record, tableAlias string) error {
|
||||
err := e.Left.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rt := range e.Right {
|
||||
err = rt.Right.aggregateRow(r)
|
||||
err = rt.Right.aggregateRow(r, tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -255,29 +255,29 @@ func (e *MultOp) aggregateRow(r Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *UnaryTerm) aggregateRow(r Record) error {
|
||||
func (e *UnaryTerm) aggregateRow(r Record, tableAlias string) error {
|
||||
if e.Negated != nil {
|
||||
return e.Negated.Term.aggregateRow(r)
|
||||
return e.Negated.Term.aggregateRow(r, tableAlias)
|
||||
}
|
||||
return e.Primary.aggregateRow(r)
|
||||
return e.Primary.aggregateRow(r, tableAlias)
|
||||
}
|
||||
|
||||
func (e *PrimaryTerm) aggregateRow(r Record) error {
|
||||
func (e *PrimaryTerm) aggregateRow(r Record, tableAlias string) error {
|
||||
switch {
|
||||
case e.ListExpr != nil:
|
||||
return e.ListExpr.aggregateRow(r)
|
||||
return e.ListExpr.aggregateRow(r, tableAlias)
|
||||
case e.SubExpression != nil:
|
||||
return e.SubExpression.aggregateRow(r)
|
||||
return e.SubExpression.aggregateRow(r, tableAlias)
|
||||
case e.FuncCall != nil:
|
||||
return e.FuncCall.aggregateRow(r)
|
||||
return e.FuncCall.aggregateRow(r, tableAlias)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *FuncExpr) aggregateRow(r Record) error {
|
||||
func (e *FuncExpr) aggregateRow(r Record, tableAlias string) error {
|
||||
switch e.getFunctionName() {
|
||||
case aggFnAvg, aggFnSum, aggFnMax, aggFnMin, aggFnCount:
|
||||
return e.evalAggregationNode(r)
|
||||
return e.evalAggregationNode(r, tableAlias)
|
||||
default:
|
||||
// TODO: traverse arguments and call aggregateRow on
|
||||
// them if they could be an ancestor of an
|
||||
|
||||
@@ -19,6 +19,7 @@ package sql
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Query analysis - The query is analyzed to determine if it involves
|
||||
@@ -177,7 +178,7 @@ func (e *PrimaryTerm) analyze(s *Select) (result qProp) {
|
||||
case e.JPathExpr != nil:
|
||||
// Check if the path expression is valid
|
||||
if len(e.JPathExpr.PathExpr) > 0 {
|
||||
if e.JPathExpr.BaseKey.String() != s.From.As {
|
||||
if e.JPathExpr.BaseKey.String() != s.From.As && strings.ToLower(e.JPathExpr.BaseKey.String()) != baseTableName {
|
||||
result = qProp{err: errInvalidKeypath}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/bcicen/jstream"
|
||||
"github.com/minio/simdjson-go"
|
||||
@@ -47,21 +46,21 @@ var (
|
||||
// of child nodes. The final result row is returned after all rows are
|
||||
// processed, and the `getAggregate` function is called.
|
||||
|
||||
func (e *AliasedExpression) evalNode(r Record) (*Value, error) {
|
||||
return e.Expression.evalNode(r)
|
||||
func (e *AliasedExpression) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
return e.Expression.evalNode(r, tableAlias)
|
||||
}
|
||||
|
||||
func (e *Expression) evalNode(r Record) (*Value, error) {
|
||||
func (e *Expression) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
if len(e.And) == 1 {
|
||||
// In this case, result is not required to be boolean
|
||||
// type.
|
||||
return e.And[0].evalNode(r)
|
||||
return e.And[0].evalNode(r, tableAlias)
|
||||
}
|
||||
|
||||
// Compute OR of conditions
|
||||
result := false
|
||||
for _, ex := range e.And {
|
||||
res, err := ex.evalNode(r)
|
||||
res, err := ex.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -74,16 +73,16 @@ func (e *Expression) evalNode(r Record) (*Value, error) {
|
||||
return FromBool(result), nil
|
||||
}
|
||||
|
||||
func (e *AndCondition) evalNode(r Record) (*Value, error) {
|
||||
func (e *AndCondition) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
if len(e.Condition) == 1 {
|
||||
// In this case, result does not have to be boolean
|
||||
return e.Condition[0].evalNode(r)
|
||||
return e.Condition[0].evalNode(r, tableAlias)
|
||||
}
|
||||
|
||||
// Compute AND of conditions
|
||||
result := true
|
||||
for _, ex := range e.Condition {
|
||||
res, err := ex.evalNode(r)
|
||||
res, err := ex.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,14 +95,14 @@ func (e *AndCondition) evalNode(r Record) (*Value, error) {
|
||||
return FromBool(result), nil
|
||||
}
|
||||
|
||||
func (e *Condition) evalNode(r Record) (*Value, error) {
|
||||
func (e *Condition) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
if e.Operand != nil {
|
||||
// In this case, result does not have to be boolean
|
||||
return e.Operand.evalNode(r)
|
||||
return e.Operand.evalNode(r, tableAlias)
|
||||
}
|
||||
|
||||
// Compute NOT of condition
|
||||
res, err := e.Not.evalNode(r)
|
||||
res, err := e.Not.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -114,8 +113,8 @@ func (e *Condition) evalNode(r Record) (*Value, error) {
|
||||
return FromBool(!b), nil
|
||||
}
|
||||
|
||||
func (e *ConditionOperand) evalNode(r Record) (*Value, error) {
|
||||
opVal, opErr := e.Operand.evalNode(r)
|
||||
func (e *ConditionOperand) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
opVal, opErr := e.Operand.evalNode(r, tableAlias)
|
||||
if opErr != nil || e.ConditionRHS == nil {
|
||||
return opVal, opErr
|
||||
}
|
||||
@@ -123,7 +122,7 @@ func (e *ConditionOperand) evalNode(r Record) (*Value, error) {
|
||||
// Need to evaluate the ConditionRHS
|
||||
switch {
|
||||
case e.ConditionRHS.Compare != nil:
|
||||
cmpRight, cmpRErr := e.ConditionRHS.Compare.Operand.evalNode(r)
|
||||
cmpRight, cmpRErr := e.ConditionRHS.Compare.Operand.evalNode(r, tableAlias)
|
||||
if cmpRErr != nil {
|
||||
return nil, cmpRErr
|
||||
}
|
||||
@@ -132,26 +131,26 @@ func (e *ConditionOperand) evalNode(r Record) (*Value, error) {
|
||||
return FromBool(b), err
|
||||
|
||||
case e.ConditionRHS.Between != nil:
|
||||
return e.ConditionRHS.Between.evalBetweenNode(r, opVal)
|
||||
return e.ConditionRHS.Between.evalBetweenNode(r, opVal, tableAlias)
|
||||
|
||||
case e.ConditionRHS.Like != nil:
|
||||
return e.ConditionRHS.Like.evalLikeNode(r, opVal)
|
||||
return e.ConditionRHS.Like.evalLikeNode(r, opVal, tableAlias)
|
||||
|
||||
case e.ConditionRHS.In != nil:
|
||||
return e.ConditionRHS.In.evalInNode(r, opVal)
|
||||
return e.ConditionRHS.In.evalInNode(r, opVal, tableAlias)
|
||||
|
||||
default:
|
||||
return nil, errInvalidASTNode
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Between) evalBetweenNode(r Record, arg *Value) (*Value, error) {
|
||||
stVal, stErr := e.Start.evalNode(r)
|
||||
func (e *Between) evalBetweenNode(r Record, arg *Value, tableAlias string) (*Value, error) {
|
||||
stVal, stErr := e.Start.evalNode(r, tableAlias)
|
||||
if stErr != nil {
|
||||
return nil, stErr
|
||||
}
|
||||
|
||||
endVal, endErr := e.End.evalNode(r)
|
||||
endVal, endErr := e.End.evalNode(r, tableAlias)
|
||||
if endErr != nil {
|
||||
return nil, endErr
|
||||
}
|
||||
@@ -174,7 +173,7 @@ func (e *Between) evalBetweenNode(r Record, arg *Value) (*Value, error) {
|
||||
return FromBool(result), nil
|
||||
}
|
||||
|
||||
func (e *Like) evalLikeNode(r Record, arg *Value) (*Value, error) {
|
||||
func (e *Like) evalLikeNode(r Record, arg *Value, tableAlias string) (*Value, error) {
|
||||
inferTypeAsString(arg)
|
||||
|
||||
s, ok := arg.ToString()
|
||||
@@ -183,7 +182,7 @@ func (e *Like) evalLikeNode(r Record, arg *Value) (*Value, error) {
|
||||
return nil, errLikeInvalidInputs(err)
|
||||
}
|
||||
|
||||
pattern, err1 := e.Pattern.evalNode(r)
|
||||
pattern, err1 := e.Pattern.evalNode(r, tableAlias)
|
||||
if err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
@@ -199,7 +198,7 @@ func (e *Like) evalLikeNode(r Record, arg *Value) (*Value, error) {
|
||||
|
||||
escape := runeZero
|
||||
if e.EscapeChar != nil {
|
||||
escapeVal, err2 := e.EscapeChar.evalNode(r)
|
||||
escapeVal, err2 := e.EscapeChar.evalNode(r, tableAlias)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
@@ -230,14 +229,14 @@ func (e *Like) evalLikeNode(r Record, arg *Value) (*Value, error) {
|
||||
return FromBool(matchResult), nil
|
||||
}
|
||||
|
||||
func (e *ListExpr) evalNode(r Record) (*Value, error) {
|
||||
func (e *ListExpr) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
res := make([]Value, len(e.Elements))
|
||||
if len(e.Elements) == 1 {
|
||||
// If length 1, treat as single value.
|
||||
return e.Elements[0].evalNode(r)
|
||||
return e.Elements[0].evalNode(r, tableAlias)
|
||||
}
|
||||
for i, elt := range e.Elements {
|
||||
v, err := elt.evalNode(r)
|
||||
v, err := elt.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -248,7 +247,7 @@ func (e *ListExpr) evalNode(r Record) (*Value, error) {
|
||||
|
||||
const floatCmpTolerance = 0.000001
|
||||
|
||||
func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) {
|
||||
func (e *In) evalInNode(r Record, lhs *Value, tableAlias string) (*Value, error) {
|
||||
// Compare two values in terms of in-ness.
|
||||
var cmp func(a, b Value) bool
|
||||
cmp = func(a, b Value) bool {
|
||||
@@ -283,7 +282,7 @@ func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) {
|
||||
|
||||
var rhs Value
|
||||
if elt := e.ListExpression; elt != nil {
|
||||
eltVal, err := elt.evalNode(r)
|
||||
eltVal, err := elt.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -304,8 +303,8 @@ func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) {
|
||||
return FromBool(cmp(rhs, *lhs)), nil
|
||||
}
|
||||
|
||||
func (e *Operand) evalNode(r Record) (*Value, error) {
|
||||
lval, lerr := e.Left.evalNode(r)
|
||||
func (e *Operand) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
lval, lerr := e.Left.evalNode(r, tableAlias)
|
||||
if lerr != nil || len(e.Right) == 0 {
|
||||
return lval, lerr
|
||||
}
|
||||
@@ -315,7 +314,7 @@ func (e *Operand) evalNode(r Record) (*Value, error) {
|
||||
// symbols.
|
||||
for _, rightTerm := range e.Right {
|
||||
op := rightTerm.Op
|
||||
rval, rerr := rightTerm.Right.evalNode(r)
|
||||
rval, rerr := rightTerm.Right.evalNode(r, tableAlias)
|
||||
if rerr != nil {
|
||||
return nil, rerr
|
||||
}
|
||||
@@ -327,8 +326,8 @@ func (e *Operand) evalNode(r Record) (*Value, error) {
|
||||
return lval, nil
|
||||
}
|
||||
|
||||
func (e *MultOp) evalNode(r Record) (*Value, error) {
|
||||
lval, lerr := e.Left.evalNode(r)
|
||||
func (e *MultOp) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
lval, lerr := e.Left.evalNode(r, tableAlias)
|
||||
if lerr != nil || len(e.Right) == 0 {
|
||||
return lval, lerr
|
||||
}
|
||||
@@ -337,7 +336,7 @@ func (e *MultOp) evalNode(r Record) (*Value, error) {
|
||||
// AST node is for terms separated by *, / or % symbols.
|
||||
for _, rightTerm := range e.Right {
|
||||
op := rightTerm.Op
|
||||
rval, rerr := rightTerm.Right.evalNode(r)
|
||||
rval, rerr := rightTerm.Right.evalNode(r, tableAlias)
|
||||
if rerr != nil {
|
||||
return nil, rerr
|
||||
}
|
||||
@@ -350,12 +349,12 @@ func (e *MultOp) evalNode(r Record) (*Value, error) {
|
||||
return lval, nil
|
||||
}
|
||||
|
||||
func (e *UnaryTerm) evalNode(r Record) (*Value, error) {
|
||||
func (e *UnaryTerm) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
if e.Negated == nil {
|
||||
return e.Primary.evalNode(r)
|
||||
return e.Primary.evalNode(r, tableAlias)
|
||||
}
|
||||
|
||||
v, err := e.Negated.Term.evalNode(r)
|
||||
v, err := e.Negated.Term.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -368,19 +367,15 @@ func (e *UnaryTerm) evalNode(r Record) (*Value, error) {
|
||||
return nil, errArithMismatchedTypes
|
||||
}
|
||||
|
||||
func (e *JSONPath) evalNode(r Record) (*Value, error) {
|
||||
// Strip the table name from the keypath.
|
||||
keypath := e.String()
|
||||
if strings.Contains(keypath, ".") {
|
||||
ps := strings.SplitN(keypath, ".", 2)
|
||||
if len(ps) == 2 {
|
||||
keypath = ps[1]
|
||||
}
|
||||
func (e *JSONPath) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
alias := tableAlias
|
||||
if tableAlias == "" {
|
||||
alias = baseTableName
|
||||
}
|
||||
pathExpr := e.StripTableAlias(alias)
|
||||
_, rawVal := r.Raw()
|
||||
switch rowVal := rawVal.(type) {
|
||||
case jstream.KVS, simdjson.Object:
|
||||
pathExpr := e.PathExpr
|
||||
if len(pathExpr) == 0 {
|
||||
pathExpr = []*JSONPathElement{{Key: &ObjectKey{ID: e.BaseKey}}}
|
||||
}
|
||||
@@ -392,7 +387,10 @@ func (e *JSONPath) evalNode(r Record) (*Value, error) {
|
||||
|
||||
return jsonToValue(result)
|
||||
default:
|
||||
return r.Get(keypath)
|
||||
if pathExpr[len(pathExpr)-1].Key == nil {
|
||||
return nil, errInvalidKeypath
|
||||
}
|
||||
return r.Get(pathExpr[len(pathExpr)-1].Key.keyString())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -447,28 +445,28 @@ func jsonToValue(result interface{}) (*Value, error) {
|
||||
return nil, fmt.Errorf("Unhandled value type: %T", result)
|
||||
}
|
||||
|
||||
func (e *PrimaryTerm) evalNode(r Record) (res *Value, err error) {
|
||||
func (e *PrimaryTerm) evalNode(r Record, tableAlias string) (res *Value, err error) {
|
||||
switch {
|
||||
case e.Value != nil:
|
||||
return e.Value.evalNode(r)
|
||||
case e.JPathExpr != nil:
|
||||
return e.JPathExpr.evalNode(r)
|
||||
return e.JPathExpr.evalNode(r, tableAlias)
|
||||
case e.ListExpr != nil:
|
||||
return e.ListExpr.evalNode(r)
|
||||
return e.ListExpr.evalNode(r, tableAlias)
|
||||
case e.SubExpression != nil:
|
||||
return e.SubExpression.evalNode(r)
|
||||
return e.SubExpression.evalNode(r, tableAlias)
|
||||
case e.FuncCall != nil:
|
||||
return e.FuncCall.evalNode(r)
|
||||
return e.FuncCall.evalNode(r, tableAlias)
|
||||
}
|
||||
return nil, errInvalidASTNode
|
||||
}
|
||||
|
||||
func (e *FuncExpr) evalNode(r Record) (res *Value, err error) {
|
||||
func (e *FuncExpr) evalNode(r Record, tableAlias string) (res *Value, err error) {
|
||||
switch e.getFunctionName() {
|
||||
case aggFnCount, aggFnAvg, aggFnMax, aggFnMin, aggFnSum:
|
||||
return e.getAggregate()
|
||||
default:
|
||||
return e.evalSQLFnNode(r)
|
||||
return e.evalSQLFnNode(r, tableAlias)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -476,8 +474,13 @@ func (e *FuncExpr) evalNode(r Record) (res *Value, err error) {
|
||||
// aggregation or a row function - it always returns a value.
|
||||
func (e *LitValue) evalNode(_ Record) (res *Value, err error) {
|
||||
switch {
|
||||
case e.Number != nil:
|
||||
return floatToValue(*e.Number), nil
|
||||
case e.Int != nil:
|
||||
if *e.Int < math.MaxInt64 && *e.Int > math.MinInt64 {
|
||||
return FromInt(int64(*e.Int)), nil
|
||||
}
|
||||
return FromFloat(*e.Int), nil
|
||||
case e.Float != nil:
|
||||
return FromFloat(*e.Float), nil
|
||||
case e.String != nil:
|
||||
return FromString(string(*e.String)), nil
|
||||
case e.Boolean != nil:
|
||||
|
||||
@@ -84,35 +84,35 @@ func (e *FuncExpr) getFunctionName() FuncName {
|
||||
|
||||
// evalSQLFnNode assumes that the FuncExpr is not an aggregation
|
||||
// function.
|
||||
func (e *FuncExpr) evalSQLFnNode(r Record) (res *Value, err error) {
|
||||
func (e *FuncExpr) evalSQLFnNode(r Record, tableAlias string) (res *Value, err error) {
|
||||
// Handle functions that have phrase arguments
|
||||
switch e.getFunctionName() {
|
||||
case sqlFnCast:
|
||||
expr := e.Cast.Expr
|
||||
res, err = expr.castTo(r, strings.ToUpper(e.Cast.CastType))
|
||||
res, err = expr.castTo(r, strings.ToUpper(e.Cast.CastType), tableAlias)
|
||||
return
|
||||
|
||||
case sqlFnSubstring:
|
||||
return handleSQLSubstring(r, e.Substring)
|
||||
return handleSQLSubstring(r, e.Substring, tableAlias)
|
||||
|
||||
case sqlFnExtract:
|
||||
return handleSQLExtract(r, e.Extract)
|
||||
return handleSQLExtract(r, e.Extract, tableAlias)
|
||||
|
||||
case sqlFnTrim:
|
||||
return handleSQLTrim(r, e.Trim)
|
||||
return handleSQLTrim(r, e.Trim, tableAlias)
|
||||
|
||||
case sqlFnDateAdd:
|
||||
return handleDateAdd(r, e.DateAdd)
|
||||
return handleDateAdd(r, e.DateAdd, tableAlias)
|
||||
|
||||
case sqlFnDateDiff:
|
||||
return handleDateDiff(r, e.DateDiff)
|
||||
return handleDateDiff(r, e.DateDiff, tableAlias)
|
||||
|
||||
}
|
||||
|
||||
// For all simple argument functions, we evaluate the arguments here
|
||||
argVals := make([]*Value, len(e.SFunc.ArgsList))
|
||||
for i, arg := range e.SFunc.ArgsList {
|
||||
argVals[i], err = arg.evalNode(r)
|
||||
argVals[i], err = arg.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -219,8 +219,8 @@ func upperCase(v *Value) (*Value, error) {
|
||||
return FromString(strings.ToUpper(s)), nil
|
||||
}
|
||||
|
||||
func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) {
|
||||
q, err := d.Quantity.evalNode(r)
|
||||
func handleDateAdd(r Record, d *DateAddFunc, tableAlias string) (*Value, error) {
|
||||
q, err := d.Quantity.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) {
|
||||
return nil, fmt.Errorf("QUANTITY must be a numeric argument to %s()", sqlFnDateAdd)
|
||||
}
|
||||
|
||||
ts, err := d.Timestamp.evalNode(r)
|
||||
ts, err := d.Timestamp.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -245,8 +245,8 @@ func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) {
|
||||
return dateAdd(strings.ToUpper(d.DatePart), qty, t)
|
||||
}
|
||||
|
||||
func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) {
|
||||
tval1, err := d.Timestamp1.evalNode(r)
|
||||
func handleDateDiff(r Record, d *DateDiffFunc, tableAlias string) (*Value, error) {
|
||||
tval1, err := d.Timestamp1.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -258,7 +258,7 @@ func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) {
|
||||
return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff)
|
||||
}
|
||||
|
||||
tval2, err := d.Timestamp2.evalNode(r)
|
||||
tval2, err := d.Timestamp2.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -277,12 +277,12 @@ func handleUTCNow() (*Value, error) {
|
||||
return FromTimestamp(time.Now().UTC()), nil
|
||||
}
|
||||
|
||||
func handleSQLSubstring(r Record, e *SubstringFunc) (val *Value, err error) {
|
||||
func handleSQLSubstring(r Record, e *SubstringFunc, tableAlias string) (val *Value, err error) {
|
||||
// Both forms `SUBSTRING('abc' FROM 2 FOR 1)` and
|
||||
// SUBSTRING('abc', 2, 1) are supported.
|
||||
|
||||
// Evaluate the string argument
|
||||
v1, err := e.Expr.evalNode(r)
|
||||
v1, err := e.Expr.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func handleSQLSubstring(r Record, e *SubstringFunc) (val *Value, err error) {
|
||||
}
|
||||
|
||||
// Evaluate the FROM argument
|
||||
v2, err := arg2.evalNode(r)
|
||||
v2, err := arg2.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,7 +315,7 @@ func handleSQLSubstring(r Record, e *SubstringFunc) (val *Value, err error) {
|
||||
length := -1
|
||||
// Evaluate the optional FOR argument
|
||||
if arg3 != nil {
|
||||
v3, err := arg3.evalNode(r)
|
||||
v3, err := arg3.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -336,11 +336,11 @@ func handleSQLSubstring(r Record, e *SubstringFunc) (val *Value, err error) {
|
||||
return FromString(res), err
|
||||
}
|
||||
|
||||
func handleSQLTrim(r Record, e *TrimFunc) (res *Value, err error) {
|
||||
func handleSQLTrim(r Record, e *TrimFunc, tableAlias string) (res *Value, err error) {
|
||||
chars := ""
|
||||
ok := false
|
||||
if e.TrimChars != nil {
|
||||
charsV, cerr := e.TrimChars.evalNode(r)
|
||||
charsV, cerr := e.TrimChars.evalNode(r, tableAlias)
|
||||
if cerr != nil {
|
||||
return nil, cerr
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func handleSQLTrim(r Record, e *TrimFunc) (res *Value, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fromV, ferr := e.TrimFrom.evalNode(r)
|
||||
fromV, ferr := e.TrimFrom.evalNode(r, tableAlias)
|
||||
if ferr != nil {
|
||||
return nil, ferr
|
||||
}
|
||||
@@ -368,8 +368,8 @@ func handleSQLTrim(r Record, e *TrimFunc) (res *Value, err error) {
|
||||
return FromString(result), nil
|
||||
}
|
||||
|
||||
func handleSQLExtract(r Record, e *ExtractFunc) (res *Value, err error) {
|
||||
timeVal, verr := e.From.evalNode(r)
|
||||
func handleSQLExtract(r Record, e *ExtractFunc, tableAlias string) (res *Value, err error) {
|
||||
timeVal, verr := e.From.evalNode(r, tableAlias)
|
||||
if verr != nil {
|
||||
return nil, verr
|
||||
}
|
||||
@@ -406,8 +406,8 @@ const (
|
||||
castTimestamp = "TIMESTAMP"
|
||||
)
|
||||
|
||||
func (e *Expression) castTo(r Record, castType string) (res *Value, err error) {
|
||||
v, err := e.evalNode(r)
|
||||
func (e *Expression) castTo(r Record, castType string, tableAlias string) (res *Value, err error) {
|
||||
v, err := e.evalNode(r, tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -492,7 +492,7 @@ func floatCast(v *Value) (float64, error) {
|
||||
switch x := v.value.(type) {
|
||||
case float64:
|
||||
return x, nil
|
||||
case int:
|
||||
case int64:
|
||||
return float64(x), nil
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(strings.TrimSpace(x), 64)
|
||||
|
||||
@@ -106,10 +106,10 @@ type TableExpression struct {
|
||||
|
||||
// JSONPathElement represents a keypath component
|
||||
type JSONPathElement struct {
|
||||
Key *ObjectKey `parser:" @@"` // ['name'] and .name forms
|
||||
Index *int `parser:"| \"[\" @Number \"]\""` // [3] form
|
||||
ObjectWildcard bool `parser:"| @\".*\""` // .* form
|
||||
ArrayWildcard bool `parser:"| @\"[*]\""` // [*] form
|
||||
Key *ObjectKey `parser:" @@"` // ['name'] and .name forms
|
||||
Index *int `parser:"| \"[\" @Int \"]\""` // [3] form
|
||||
ObjectWildcard bool `parser:"| @\".*\""` // .* form
|
||||
ArrayWildcard bool `parser:"| @\"[*]\""` // [*] form
|
||||
}
|
||||
|
||||
// JSONPath represents a keypath.
|
||||
@@ -119,7 +119,9 @@ type JSONPath struct {
|
||||
PathExpr []*JSONPathElement `parser:"(@@)*"`
|
||||
|
||||
// Cached values:
|
||||
pathString string
|
||||
pathString string
|
||||
strippedTableAlias string
|
||||
strippedPathExpr []*JSONPathElement
|
||||
}
|
||||
|
||||
// AliasedExpression is an expression that can be optionally named
|
||||
@@ -333,7 +335,8 @@ type DateDiffFunc struct {
|
||||
|
||||
// LitValue represents a literal value parsed from the sql
|
||||
type LitValue struct {
|
||||
Number *float64 `parser:"( @Number"`
|
||||
Float *float64 `parser:"( @Float"`
|
||||
Int *float64 `parser:" | @Int"` // To avoid value out of range, use float64 instead
|
||||
String *LiteralString `parser:" | @LitString"`
|
||||
Boolean *Boolean `parser:" | @(\"TRUE\" | \"FALSE\")"`
|
||||
Null bool `parser:" | @\"NULL\")"`
|
||||
@@ -351,7 +354,8 @@ var (
|
||||
`|(?P<Keyword>(?i)\b(?:SELECT|FROM|TOP|DISTINCT|ALL|WHERE|GROUP|BY|HAVING|UNION|MINUS|EXCEPT|INTERSECT|ORDER|LIMIT|OFFSET|TRUE|FALSE|NULL|IS|NOT|ANY|SOME|BETWEEN|AND|OR|LIKE|ESCAPE|AS|IN|BOOL|INT|INTEGER|STRING|FLOAT|DECIMAL|NUMERIC|TIMESTAMP|AVG|COUNT|MAX|MIN|SUM|COALESCE|NULLIF|CAST|DATE_ADD|DATE_DIFF|EXTRACT|TO_STRING|TO_TIMESTAMP|UTCNOW|CHAR_LENGTH|CHARACTER_LENGTH|LOWER|SUBSTRING|TRIM|UPPER|LEADING|TRAILING|BOTH|FOR)\b)` +
|
||||
`|(?P<Ident>[a-zA-Z_][a-zA-Z0-9_]*)` +
|
||||
`|(?P<QuotIdent>"([^"]*("")?)*")` +
|
||||
`|(?P<Number>\d*\.?\d+([eE][-+]?\d+)?)` +
|
||||
`|(?P<Float>\d*\.\d+([eE][-+]?\d+)?)` +
|
||||
`|(?P<Int>\d+)` +
|
||||
`|(?P<LitString>'([^']*('')?)*')` +
|
||||
`|(?P<Operators><>|!=|<=|>=|\.\*|\[\*\]|[-+*/%,.()=<>\[\]])`,
|
||||
))
|
||||
|
||||
@@ -46,6 +46,9 @@ type SelectStatement struct {
|
||||
|
||||
// Count of rows that have been output.
|
||||
outputCount int64
|
||||
|
||||
// Table alias
|
||||
tableAlias string
|
||||
}
|
||||
|
||||
// ParseSelectStatement - parses a select query from the given string
|
||||
@@ -107,6 +110,9 @@ func ParseSelectStatement(s string) (stmt SelectStatement, err error) {
|
||||
if err != nil {
|
||||
err = errQueryAnalysisFailure(err)
|
||||
}
|
||||
|
||||
// Set table alias
|
||||
stmt.tableAlias = selectAST.From.As
|
||||
return
|
||||
}
|
||||
|
||||
@@ -127,10 +133,10 @@ func parseLimit(v *LitValue) (int64, error) {
|
||||
switch {
|
||||
case v == nil:
|
||||
return -1, nil
|
||||
case v.Number == nil:
|
||||
case v.Int == nil:
|
||||
return -1, errBadLimitSpecified
|
||||
default:
|
||||
r := int64(*v.Number)
|
||||
r := int64(*v.Int)
|
||||
if r < 0 {
|
||||
return -1, errBadLimitSpecified
|
||||
}
|
||||
@@ -226,7 +232,7 @@ func (e *SelectStatement) IsAggregated() bool {
|
||||
// records have been processed. Applies only to aggregation queries.
|
||||
func (e *SelectStatement) AggregateResult(output Record) error {
|
||||
for i, expr := range e.selectAST.Expression.Expressions {
|
||||
v, err := expr.evalNode(nil)
|
||||
v, err := expr.evalNode(nil, e.tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +252,7 @@ func (e *SelectStatement) isPassingWhereClause(input Record) (bool, error) {
|
||||
if e.selectAST.Where == nil {
|
||||
return true, nil
|
||||
}
|
||||
value, err := e.selectAST.Where.evalNode(input)
|
||||
value, err := e.selectAST.Where.evalNode(input, e.tableAlias)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -272,7 +278,7 @@ func (e *SelectStatement) AggregateRow(input Record) error {
|
||||
}
|
||||
|
||||
for _, expr := range e.selectAST.Expression.Expressions {
|
||||
err := expr.aggregateRow(input)
|
||||
err := expr.aggregateRow(input, e.tableAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -302,7 +308,7 @@ func (e *SelectStatement) Eval(input, output Record) (Record, error) {
|
||||
}
|
||||
|
||||
for i, expr := range e.selectAST.Expression.Expressions {
|
||||
v, err := expr.evalNode(input)
|
||||
v, err := expr.evalNode(input, e.tableAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -36,6 +36,27 @@ func (e *JSONPath) String() string {
|
||||
return e.pathString
|
||||
}
|
||||
|
||||
// StripTableAlias removes a table alias from the path. The result is also
|
||||
// cached for repeated lookups during SQL query evaluation.
|
||||
func (e *JSONPath) StripTableAlias(tableAlias string) []*JSONPathElement {
|
||||
if e.strippedTableAlias == tableAlias {
|
||||
return e.strippedPathExpr
|
||||
}
|
||||
|
||||
hasTableAlias := e.BaseKey.String() == tableAlias || strings.ToLower(e.BaseKey.String()) == baseTableName
|
||||
var pathExpr []*JSONPathElement
|
||||
if hasTableAlias {
|
||||
pathExpr = e.PathExpr
|
||||
} else {
|
||||
pathExpr = make([]*JSONPathElement, len(e.PathExpr)+1)
|
||||
pathExpr[0] = &JSONPathElement{Key: &ObjectKey{ID: e.BaseKey}}
|
||||
copy(pathExpr[1:], e.PathExpr)
|
||||
}
|
||||
e.strippedTableAlias = tableAlias
|
||||
e.strippedPathExpr = pathExpr
|
||||
return e.strippedPathExpr
|
||||
}
|
||||
|
||||
func (e *JSONPathElement) String() string {
|
||||
switch {
|
||||
case e.Key != nil:
|
||||
|
||||
@@ -306,15 +306,6 @@ func (v Value) CSVString() string {
|
||||
}
|
||||
}
|
||||
|
||||
// floatToValue converts a float into int representation if needed.
|
||||
func floatToValue(f float64) *Value {
|
||||
intPart, fracPart := math.Modf(f)
|
||||
if fracPart == 0 && intPart < math.MaxInt64 && intPart > math.MinInt64 {
|
||||
return FromInt(int64(intPart))
|
||||
}
|
||||
return FromFloat(f)
|
||||
}
|
||||
|
||||
// negate negates a numeric value
|
||||
func (v *Value) negate() {
|
||||
switch x := v.value.(type) {
|
||||
|
||||
Reference in New Issue
Block a user