Compare commits

..

16 Commits

Author SHA1 Message Date
Andreas Auernhammer
404d2ebe3f set SSE headers in put-part response (#12008)
This commit fixes a bug in the put-part
implementation. The SSE headers should be
set as specified by AWS - See:
https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html

Now, the MinIO server should set SSE-C headers,
like `x-amz-server-side-encryption-customer-algorithm`.

Fixes #11991
2021-04-07 14:50:28 -07:00
Minio Trusted
46964eb764 Update yaml files to latest version RELEASE.2021-04-06T23-11-00Z 2021-04-06 23:35:33 +00:00
Poorna Krishnamoorthy
bfab990c33 Improve error message from SetRemoteTargetHandler (#11909) 2021-04-06 12:42:30 -07:00
Harshavardhana
94018588fe unmarshal both LegalHold and ObjectLockLegalHold XML types (#11921)
Because of silly AWS S3 behavior we to handle both types.

fixes #11920
2021-04-06 12:41:56 -07:00
Anis Elleuch
8b76ba8d5d crawling: Apply lifecycle then decide healing action (#11563)
It is inefficient to decide to heal an object before checking its
lifecycle for expiration or transition. This commit will just reverse
the order of action: evaluate lifecycle and heal only if asked and
lifecycle resulted a NoneAction.
2021-04-06 12:41:51 -07:00
Harshavardhana
7eb7f65e48 add policy conditions support for signatureVersion and authType (#11947)
https://docs.aws.amazon.com/AmazonS3/latest/API/bucket-policy-s3-sigv4-conditions.html

fixes #11944
2021-04-06 12:41:31 -07:00
Harshavardhana
c608c0688a fix: properly close leaking bandwidth monitor channel (#11967)
This PR fixes

- close leaking bandwidth report channel leakage
- remove the closer requirement for bandwidth monitor
  instead if Read() fails remember the error and return
  error for all subsequent reads.
- use locking for usage-cache.bin updates, with inline
  data we cannot afford to have concurrent writes to
  usage-cache.bin corrupting xl.meta
2021-04-06 12:40:42 -07:00
Aditya Manthramurthy
41a9d1d778 Fix S3Select SQL column reference handling (#11957)
This change fixes handling of these types of queries:

- Double quoted column names with special characters:
    SELECT "column.name" FROM s3object
- Double quoted column names with reserved keywords:
    SELECT "CAST" FROM s3object
- Table name as prefix for column names:
    SELECT S3Object."CAST" FROM s3object
2021-04-06 12:40:28 -07:00
Klaus Post
e21e80841e Fix data race when connecting disks (#11983)
Multiple disks from the same set would be writing concurrently.

```
WARNING: DATA RACE
Write at 0x00c002100ce0 by goroutine 166:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks.func1()
      d:/minio/minio/cmd/erasure-sets.go:254 +0x82f

Previous write at 0x00c002100ce0 by goroutine 129:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks.func1()
      d:/minio/minio/cmd/erasure-sets.go:254 +0x82f

Goroutine 166 (running) created at:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks()
      d:/minio/minio/cmd/erasure-sets.go:210 +0x324
  github.com/minio/minio/cmd.(*erasureSets).monitorAndConnectEndpoints()
      d:/minio/minio/cmd/erasure-sets.go:288 +0x244

Goroutine 129 (finished) created at:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks()
      d:/minio/minio/cmd/erasure-sets.go:210 +0x324
  github.com/minio/minio/cmd.(*erasureSets).monitorAndConnectEndpoints()
      d:/minio/minio/cmd/erasure-sets.go:288 +0x244
```
2021-04-06 12:39:59 -07:00
Klaus Post
98c792bbeb Fix disk info race (#11984)
Protect updated members in xlStorage.

```
WARNING: DATA RACE
Write at 0x00c004b4ee78 by goroutine 1491:
  github.com/minio/minio/cmd.(*xlStorage).GetDiskID()
      d:/minio/minio/cmd/xl-storage.go:590 +0x1078
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).checkDiskStale()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:195 +0x84
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).StatVol()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:284 +0x16a
  github.com/minio/minio/cmd.erasureObjects.getBucketInfo.func1()
      d:/minio/minio/cmd/erasure-bucket.go:100 +0x1a5
  github.com/minio/minio/pkg/sync/errgroup.(*Group).Go.func1()
      d:/minio/minio/pkg/sync/errgroup/errgroup.go:122 +0xd7

Previous read at 0x00c004b4ee78 by goroutine 1087:
  github.com/minio/minio/cmd.(*xlStorage).CheckFile.func1()
      d:/minio/minio/cmd/xl-storage.go:1699 +0x384
  github.com/minio/minio/cmd.(*xlStorage).CheckFile()
      d:/minio/minio/cmd/xl-storage.go:1726 +0x13c
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).CheckFile()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:446 +0x23b
  github.com/minio/minio/cmd.erasureObjects.parentDirIsObject.func1()
      d:/minio/minio/cmd/erasure-common.go:173 +0x194
  github.com/minio/minio/pkg/sync/errgroup.(*Group).Go.func1()
      d:/minio/minio/pkg/sync/errgroup/errgroup.go:122 +0xd7
```
2021-04-06 12:39:57 -07:00
Klaus Post
f687ba53bc Fix Access Key requests (#11979)
Fix accessing claims when auth error is unchecked.

Only replaced when unchecked and when clearly without side effects.

Fixes #11959
2021-04-06 11:03:55 -07:00
Harshavardhana
e3da59c923 fix possible crash in bucket bandwidth monitor (#11986) 2021-04-06 11:03:41 -07:00
Harshavardhana
781b9b051c fix: service accounts policy enforcement regression (#11910)
service accounts were not inheriting parent policies
anymore due to refactors in the PolicyDBGet() from
the latest release, fix this behavior properly.
2021-04-06 08:58:05 -07:00
Harshavardhana
438becfde8 fix: delete/delete marker replication versions consistent (#11932)
replication didn't work as expected when deletion of
delete markers was requested in DeleteMultipleObjects
API, this is due to incorrect lookup elements being
used to look for delete markers.
2021-04-06 08:57:36 -07:00
Harshavardhana
16ef338649 fix: notify parent user in notification events (#11934)
fixes #11885
2021-04-06 08:55:37 -07:00
Harshavardhana
3242847ec0 avoid network read errors crashing CreateFile call (#11939)
Thanks to @dvaldivia for reproducing this
2021-04-06 08:55:30 -07:00
190 changed files with 2621 additions and 9662 deletions

View File

@@ -12,7 +12,7 @@ jobs:
strategy:
matrix:
go-version: [1.16.x]
os: [ubuntu-latest, windows-latest]
os: [ubuntu-latest, windows-latest, macos-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
@@ -21,6 +21,14 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Build on ${{ matrix.os }}
if: matrix.os == 'macos-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
make
make test-race
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
env:

View File

@@ -1 +0,0 @@
CVE-2020-26160

View File

@@ -50,7 +50,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
}
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
@@ -90,8 +90,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
}
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
@@ -119,7 +118,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
bucket := vars["bucket"]
update := r.URL.Query().Get("update") == "true"
if !globalIsErasure {
@@ -212,7 +211,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
bucket := vars["bucket"]
arnType := vars["type"]
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
@@ -251,7 +250,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
bucket := vars["bucket"]
arn := vars["arn"]
if !globalIsErasure {

View File

@@ -23,6 +23,7 @@ import (
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
@@ -362,7 +363,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
accessKey := vars["accessKey"]
accessKey := path.Clean(vars["accessKey"])
// Get current object layer instance.
objectAPI := newObjectLayerFn()
@@ -470,12 +471,18 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
@@ -489,55 +496,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
// Disallow creating service accounts by root user.
if createReq.TargetUser == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
var (
targetUser string
targetGroups []string
)
targetUser = createReq.TargetUser
// Need permission if we are creating a service acccount
// for a user <> to the request sender
if targetUser != "" && targetUser != cred.AccessKey {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.CreateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if globalLDAPConfig.Enabled && targetUser != "" {
// If LDAP enabled, service accounts need
// to be created only for LDAP users.
var err error
_, targetGroups, err = globalLDAPConfig.LookupUserDN(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
} else {
if targetUser == "" {
targetUser = cred.AccessKey
}
if cred.ParentUser != "" {
targetUser = cred.ParentUser
}
targetGroups = cred.Groups
}
opts := newServiceAccountOpts{sessionPolicy: createReq.Policy, accessKey: createReq.AccessKey, secretKey: createReq.SecretKey}
newCred, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -573,191 +537,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
writeSuccessResponseJSON(w, encryptedData)
}
// UpdateServiceAccount - POST /minio/admin/v3/update-service-account
func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Disallow editing service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.UpdateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var updateReq madmin.UpdateServiceAccountReq
if err = json.Unmarshal(reqBytes, &updateReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
opts := updateServiceAccountOpts{sessionPolicy: updateReq.NewPolicy, secretKey: updateReq.NewSecretKey, status: updateReq.NewStatus}
err = globalIAMSys.UpdateServiceAccount(ctx, accessKey, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
writeSuccessNoContent(w)
}
// InfoServiceAccount - GET /minio/admin/v3/info-service-account
func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
svcAccount, policy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
var svcAccountPolicy iampolicy.Policy
impliedPolicy := policy == nil
// If policy is empty, check for policy of the parent user
if !impliedPolicy {
svcAccountPolicy = svcAccountPolicy.Merge(*policy)
} else {
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
svcAccountPolicy = svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
}
policyJSON, err := json.Marshal(svcAccountPolicy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var infoResp = madmin.InfoServiceAccountResp{
ParentUser: svcAccount.ParentUser,
AccountStatus: svcAccount.Status,
ImpliedPolicy: impliedPolicy,
Policy: string(policyJSON),
}
data, err := json.Marshal(infoResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
@@ -771,7 +550,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
@@ -783,42 +562,19 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
return
}
var targetAccount string
user := r.URL.Query().Get("user")
if user != "" {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
targetAccount = user
} else {
targetAccount = cred.AccessKey
if cred.ParentUser != "" {
targetAccount = cred.ParentUser
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, targetAccount)
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var serviceAccountsNames []string
for _, svc := range serviceAccounts {
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccountsNames,
Accounts: serviceAccounts,
}
data, err := json.Marshal(listResp)
@@ -849,7 +605,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
@@ -867,32 +623,23 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.RemoveServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
})
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if !adminPrivilege {
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != svcAccount.ParentUser {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminServiceAccountNotFound), r.URL)
return
}
if parentUser != user || user == "" {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
return
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
@@ -1028,7 +775,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
if !dataUsageInfo.LastUpdate.IsZero() {
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
}
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
Name: bucket.Name,
Created: bucket.Created,
Size: size,

View File

@@ -46,7 +46,6 @@ import (
"github.com/minio/minio/pkg/dsync"
"github.com/minio/minio/pkg/handlers"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/kms"
"github.com/minio/minio/pkg/madmin"
xnet "github.com/minio/minio/pkg/net"
trace "github.com/minio/minio/pkg/trace"
@@ -65,8 +64,8 @@ const (
mgmtForceStop = "forceStop"
)
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, releaseInfo string, mode string) (us madmin.ServerUpdateStatus, err error) {
if err = doUpdate(u, lrTime, sha256Sum, releaseInfo, mode); err != nil {
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
if err = doUpdate(u, lrTime, sha256Sum, mode); err != nil {
return us, err
}
@@ -116,13 +115,14 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
return
}
sha256Sum, lrTime, releaseInfo, err := parseReleaseData(content)
sha256Sum, lrTime, err := parseReleaseData(content)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
u.Path = path.Dir(u.Path) + SlashSeparator + "minio.RELEASE." + lrTime.Format(minioReleaseTagTimeLayout)
crTime, err := GetCurrentReleaseTime()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -146,7 +146,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
return
}
for _, nerr := range globalNotificationSys.ServerUpdate(ctx, u, sha256Sum, lrTime, releaseInfo) {
for _, nerr := range globalNotificationSys.ServerUpdate(ctx, u, sha256Sum, lrTime) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
@@ -156,7 +156,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
}
}
updateStatus, err := updateServer(u, sha256Sum, lrTime, releaseInfo, mode)
updateStatus, err := updateServer(u, sha256Sum, lrTime, mode)
if err != nil {
err = fmt.Errorf("Server update failed, please do not restart the servers yet: failed with %w", err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -260,15 +260,26 @@ type ServerHTTPAPIStats struct {
// ServerHTTPStats holds all type of http operations performed to/from the server
// including their average execution time.
type ServerHTTPStats struct {
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
TotalS3RejectedAuth uint64 `json:"totalS3RejectedAuth"`
TotalS3RejectedTime uint64 `json:"totalS3RejectedTime"`
TotalS3RejectedHeader uint64 `json:"totalS3RejectedHeader"`
TotalS3RejectedInvalid uint64 `json:"totalS3RejectedInvalid"`
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
}
// ServerInfoData holds storage, connections and other
// information of a given server.
type ServerInfoData struct {
ConnStats ServerConnStats `json:"network"`
HTTPStats ServerHTTPStats `json:"http"`
Properties ServerProperties `json:"server"`
}
// ServerInfo holds server information result of one node
type ServerInfo struct {
Error string `json:"error"`
Addr string `json:"addr"`
Data *ServerInfoData `json:"data"`
}
// StorageInfoHandler - GET /minio/admin/v3/storageinfo
@@ -507,7 +518,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
vars := mux.Vars(r)
profiles := strings.Split(vars["profilerType"], ",")
thisAddr, err := xnet.ParseHost(globalLocalNodeName)
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -1021,75 +1032,26 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
// false if certain conditions are not met.
// - input entry is not of the type *trace.Info*
// - errOnly entries are to be traced, not status code 2xx, 3xx.
// - trace.Info type is asked by opts
func mustTrace(entry interface{}, opts madmin.ServiceTraceOpts) (shouldTrace bool) {
// - all entries to be traced, if not trace only S3 API requests.
func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
trcInfo, ok := entry.(trace.Info)
if !ok {
return false
}
// Override shouldTrace decision with errOnly filtering
defer func() {
if shouldTrace && opts.OnlyErrors {
shouldTrace = trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
// Handle browser requests separately filter them and return.
if HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+"/upload") {
if errOnly {
return trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
}
}()
if opts.Threshold > 0 {
var latency time.Duration
switch trcInfo.TraceType {
case trace.OS:
latency = trcInfo.OSStats.Duration
case trace.Storage:
latency = trcInfo.StorageStats.Duration
case trace.HTTP:
latency = trcInfo.CallStats.Latency
}
if latency < opts.Threshold {
return false
}
}
if opts.Internal && trcInfo.TraceType == trace.HTTP && HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator) {
return true
}
if opts.S3 && trcInfo.TraceType == trace.HTTP && !HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator) {
return true
trace := trcAll || !HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
if errOnly {
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
}
if opts.Storage && trcInfo.TraceType == trace.Storage {
return true
}
return opts.OS && trcInfo.TraceType == trace.OS
}
func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
q := r.URL.Query()
opts.OnlyErrors = q.Get("err") == "true"
opts.S3 = q.Get("s3") == "true"
opts.Internal = q.Get("internal") == "true"
opts.Storage = q.Get("storage") == "true"
opts.OS = q.Get("os") == "true"
// Support deprecated 'all' query
if q.Get("all") == "true" {
opts.S3 = true
opts.Internal = true
opts.Storage = true
opts.OS = true
}
if t := q.Get("threshold"); t != "" {
d, err := time.ParseDuration(t)
if err != nil {
return opts, err
}
opts.Threshold = d
}
return
return trace
}
// TraceHandler - POST /minio/admin/v3/trace
@@ -1098,6 +1060,9 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err
func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HTTPTrace")
trcAll := r.URL.Query().Get("all") == "true"
trcErr := r.URL.Query().Get("err") == "true"
// Validate request signature.
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.TraceAdminAction, "")
if adminAPIErr != ErrNone {
@@ -1105,12 +1070,6 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
return
}
traceOpts, err := extractTraceOptions(r)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
setEventStreamHeaders(w)
// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
@@ -1119,15 +1078,15 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
peers, _ := newPeerRestClients(globalEndpoints)
globalTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
return mustTrace(entry, traceOpts)
globalHTTPTrace.Subscribe(traceCh, ctx.Done(), func(entry interface{}) bool {
return mustTrace(entry, trcAll, trcErr)
})
for _, peer := range peers {
if peer == nil {
continue
}
peer.Trace(traceCh, ctx.Done(), traceOpts)
peer.Trace(traceCh, ctx.Done(), trcAll, trcErr)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
@@ -1261,23 +1220,18 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
stat, err := GlobalKMS.Stat()
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
keyID := r.URL.Query().Get("key-id")
if keyID == "" {
keyID = stat.DefaultKey
keyID = GlobalKMS.DefaultKeyID()
}
var response = madmin.KMSKeyStatus{
KeyID: keyID,
}
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
kmsContext := crypto.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(keyID, kmsContext)
key, sealedKey, err := GlobalKMS.GenerateKey(keyID, kmsContext)
if err != nil {
response.EncryptionErr = err.Error()
resp, err := json.Marshal(response)
@@ -1290,7 +1244,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Plaintext, kmsContext)
decryptedKey, err := GlobalKMS.UnsealKey(keyID, sealedKey, kmsContext)
if err != nil {
response.DecryptionErr = err.Error()
resp, err := json.Marshal(response)
@@ -1303,7 +1257,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
}
// 3. Compare generated key with decrypted key
if subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1 {
if subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1 {
response.DecryptionErr = "The generated and the decrypted data key do not match"
resp, err := json.Marshal(response)
if err != nil {
@@ -1748,39 +1702,36 @@ func fetchKMSStatus() madmin.KMS {
kmsStat.Status = "disabled"
return kmsStat
}
stat, err := GlobalKMS.Stat()
if err != nil {
kmsStat.Status = string(madmin.ItemOffline)
keyID := GlobalKMS.DefaultKeyID()
kmsInfo := GlobalKMS.Info()
if len(kmsInfo.Endpoints) == 0 {
kmsStat.Status = "KMS configured using master key"
return kmsStat
}
if len(stat.Endpoints) == 0 {
kmsStat.Status = stat.Name
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
kmsStat.Status = string(madmin.ItemOffline)
} else {
if err := checkConnection(stat.Endpoints[0], 15*time.Second); err != nil {
kmsStat.Status = string(madmin.ItemOffline)
kmsStat.Status = string(madmin.ItemOnline)
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, sealedKey, err := GlobalKMS.GenerateKey(keyID, kmsContext)
if err != nil {
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
} else {
kmsStat.Status = string(madmin.ItemOnline)
kmsStat.Encrypt = "success"
}
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey("", kmsContext)
if err != nil {
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
} else {
kmsStat.Encrypt = "success"
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
switch {
case err != nil:
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
default:
kmsStat.Decrypt = "success"
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.UnsealKey(keyID, sealedKey, kmsContext)
switch {
case err != nil:
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
default:
kmsStat.Decrypt = "success"
}
}
return kmsStat

View File

@@ -121,8 +121,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update-service-account").HandlerFunc(httpTraceHdrs(adminAPI.UpdateServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-service-account").HandlerFunc(httpTraceHdrs(adminAPI.InfoServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")

View File

@@ -19,7 +19,6 @@ package cmd
import (
"context"
"net/http"
"runtime"
"time"
"github.com/minio/minio/cmd/logger"
@@ -32,7 +31,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
var localEndpoints Endpoints
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(endpointServerPools)
}
network := make(map[string]string)
for _, ep := range endpointServerPools {
@@ -68,7 +67,6 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
CommitID: CommitID,
Network: network,
}
runtime.ReadMemStats(&props.MemStats)
objLayer := newObjectLayerFn()
if objLayer != nil && !globalIsGateway {

View File

@@ -66,8 +66,6 @@ type APIErrorResponse struct {
// APIErrorCode type of error status.
type APIErrorCode int
//go:generate stringer -type=APIErrorCode -trimprefix=Err $GOFILE
// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
const (
ErrNone APIErrorCode = iota
@@ -82,7 +80,6 @@ const (
ErrInvalidBucketName
ErrInvalidDigest
ErrInvalidRange
ErrInvalidRangePartNumber
ErrInvalidCopyPartRange
ErrInvalidCopyPartRangeSource
ErrInvalidMaxKeys
@@ -367,7 +364,7 @@ const (
ErrAddUserInvalidArgument
ErrAdminAccountNotEligible
ErrAccountNotEligible
ErrAdminServiceAccountNotFound
ErrServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat
)
@@ -381,13 +378,6 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
if err != nil {
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
if globalServerRegion != "" {
switch errCode {
case ErrAuthorizationHeaderMalformed:
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
return apiErr
}
}
return apiErr
}
@@ -513,11 +503,6 @@ var errorCodes = errorCodeMap{
Description: "The requested range is not satisfiable",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrInvalidRangePartNumber: {
Code: "InvalidRequest",
Description: "Cannot specify both Range header and partNumber query parameter",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedXML: {
Code: "MalformedXML",
Description: "The XML you provided was not well-formed or did not validate against our published schema.",
@@ -1754,7 +1739,7 @@ var errorCodes = errorCodeMap{
Description: "The account key is not eligible for this operation",
HTTPStatusCode: http.StatusForbidden,
},
ErrAdminServiceAccountNotFound: {
ErrServiceAccountNotFound: {
Code: "XMinioInvalidIAMCredentials",
Description: "The specified service account is not found",
HTTPStatusCode: http.StatusNotFound,
@@ -1790,8 +1775,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrAdminInvalidArgument
case errNoSuchUser:
apiErr = ErrAdminNoSuchUser
case errNoSuchServiceAccount:
apiErr = ErrAdminServiceAccountNotFound
case errNoSuchGroup:
apiErr = ErrAdminNoSuchGroup
case errGroupNotEmpty:
@@ -1921,6 +1904,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrSlowDown
case InsufficientReadQuorum:
apiErr = ErrSlowDown
case UnsupportedDelimiter:
apiErr = ErrNotImplemented
case InvalidMarkerPrefixCombination:
apiErr = ErrNotImplemented
case InvalidUploadIDKeyCombination:
@@ -2011,8 +1996,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKeyTooLongError
case dns.ErrInvalidBucketName:
apiErr = ErrInvalidBucketName
case dns.ErrBucketConflict:
apiErr = ErrBucketAlreadyExists
default:
var ie, iw int
// This work-around is to handle the issue golang/go#30648
@@ -2055,22 +2038,6 @@ func toAPIError(ctx context.Context, err error) APIError {
apiErr = errorCodes.ToAPIErrWithErr(code, e)
}
if apiErr.Code == "NotImplemented" {
switch e := err.(type) {
case NotImplemented:
desc := e.Error()
if desc == "" {
desc = apiErr.Description
}
apiErr = APIError{
Code: apiErr.Code,
Description: desc,
HTTPStatusCode: apiErr.HTTPStatusCode,
}
return apiErr
}
}
if apiErr.Code == "InternalError" {
// If we see an internal error try to interpret
// any underlying errors if possible depending on
@@ -2139,13 +2106,6 @@ func toAPIError(ctx context.Context, err error) APIError {
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
if globalIsGateway && strings.Contains(e.Message, "KMS is not configured") {
apiErr = APIError{
Code: "NotImplemented",
Description: e.Message,
HTTPStatusCode: http.StatusNotImplemented,
}
}
case *googleapi.Error:
apiErr = APIError{
Code: "XGCSInternalError",

View File

@@ -43,6 +43,7 @@ var toAPIErrorTests = []struct {
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
{err: UnsupportedDelimiter{}, errCode: ErrNotImplemented},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},

View File

@@ -158,16 +158,16 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
return err
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
}
// For providing ranged content
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
}
// Set content length.
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
if rs != nil {

View File

@@ -78,103 +78,6 @@ func getHost(r *http.Request) string {
return r.Host
}
func notImplementedHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
}
type rejectedAPI struct {
api string
methods []string
queries []string
path string
}
var rejectedAPIs = []rejectedAPI{
{
api: "inventory",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"inventory", ""},
},
{
api: "cors",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"cors", ""},
},
{
api: "metrics",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"metrics", ""},
},
{
api: "website",
methods: []string{http.MethodPut},
queries: []string{"website", ""},
},
{
api: "logging",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"logging", ""},
},
{
api: "accelerate",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"accelerate", ""},
},
{
api: "requestPayment",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"requestPayment", ""},
},
{
api: "torrent",
methods: []string{http.MethodPut, http.MethodDelete, http.MethodGet},
queries: []string{"torrent", ""},
path: "/{object:.+}",
},
{
api: "acl",
methods: []string{http.MethodDelete},
queries: []string{"acl", ""},
path: "/{object:.+}",
},
{
api: "acl",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodHead},
queries: []string{"acl", ""},
},
{
api: "publicAccessBlock",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"publicAccessBlock", ""},
},
{
api: "ownershipControls",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"ownershipControls", ""},
},
{
api: "intelligent-tiering",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"intelligent-tiering", ""},
},
{
api: "analytics",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"analytics", ""},
},
}
func rejectUnsupportedAPIs(router *mux.Router) {
for _, r := range rejectedAPIs {
t := router.Methods(r.methods...).
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
Queries(r.queries...)
if r.path != "" {
t.Path(r.path)
}
}
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
// Initialize API.
@@ -213,224 +116,217 @@ func registerAPIRouter(router *mux.Router) {
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
for _, router := range routers {
rejectUnsupportedAPIs(router)
for _, bucket := range routers {
// Object operations
// HeadObject
router.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").
bucket.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject with auto-extract support for zip
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzSnowballExtract, "true").HandlerFunc(
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectExtractHandler))))
// PutObject
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
// PostRestoreObject
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
/// Bucket operations
// GetBucketLocation
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// GetBucketObjectLockConfig
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketReplicationConfig
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
// GetBucketVersioning
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenNotification
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketTaggingHandler
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// ListMultipartUploads
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListObjectVersions
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
// GetBucketPolicyStatus
router.Methods(http.MethodGet).HandlerFunc(
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
// PutBucketLifecycle
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketReplicationConfig
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
// GetObjectRetention
// PutBucketEncryption
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketTaggingHandler
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
// PutBucketVersioning
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
router.Methods(http.MethodPut).HandlerFunc(
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
// HeadBucket
router.Methods(http.MethodHead).HandlerFunc(
bucket.Methods(http.MethodHead).HandlerFunc(
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
router.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
router.Methods(http.MethodPost).HandlerFunc(
bucket.Methods(http.MethodPost).HandlerFunc(
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketReplication
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
// DeleteBucketLifecycle
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
router.Methods(http.MethodDelete).HandlerFunc(
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
// MinIO extension API for replication.
//
// GetBucketReplicationMetrics
router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationmetrics", maxClients(httpTraceAll(api.GetBucketReplicationMetricsHandler)))).Queries("replication-metrics", "")
// S3 ListObjectsV1 (Legacy)
router.Methods(http.MethodGet).HandlerFunc(
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
}
/// Root operation

File diff suppressed because one or more lines are too long

View File

@@ -28,7 +28,6 @@ import (
"net/http"
"strconv"
"strings"
"sync/atomic"
"time"
xhttp "github.com/minio/minio/cmd/http"
@@ -194,21 +193,24 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
if token == "" {
claims := xjwt.NewMapClaims()
return claims.Map(), nil
}
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
if err != nil {
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
return []byte(globalActiveCred.SecretKey), nil
}
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
return nil, errAuthentication
}
@@ -503,7 +505,6 @@ func setAuthHandler(h http.Handler) http.Handler {
return
}
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsAuth, 1)
})
}

View File

@@ -66,7 +66,7 @@ func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
maxIOFn := func() int {
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalHTTPTrace.NumSubscribers())
}
tmpMaxWait := maxWait

View File

@@ -25,7 +25,6 @@ import (
"io"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
)
type errHashMismatch struct {
@@ -38,11 +37,10 @@ func (err *errHashMismatch) Error() string {
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow io.WriteCloser
closeWithErr func(err error) error
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
iow io.WriteCloser
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
@@ -68,24 +66,16 @@ func (b *streamingBitrotWriter) Close() error {
// 2) pipe.Close()
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
// Hence an immediate Read() on the file can return incorrect data.
if b.canClose != nil {
<-b.canClose
}
<-b.canClose
return err
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil}
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{iow: w, closeWithErr: w.CloseWithError, h: h, shardSize: shardSize, canClose: make(chan struct{})}
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
go func() {
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
if length != -1 {
@@ -133,7 +123,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
if len(b.data) == 0 && b.tillOffset != streamOffset {
if len(b.data) == 0 {
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
} else {
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
@@ -171,13 +161,15 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk: disk,
data: data,
volume: volume,
filePath: filePath,
tillOffset: ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
h: h,
shardSize: shardSize,
hashBytes: make([]byte, h.Size()),
disk,
data,
nil,
volume,
filePath,
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
0,
h,
shardSize,
make([]byte, h.Size()),
}
}

View File

@@ -17,11 +17,8 @@
package cmd
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
@@ -146,93 +143,3 @@ func bitrotShardFileSize(size int64, shardSize int64, algo BitrotAlgorithm) int6
}
return ceilFrac(size, shardSize)*int64(algo.New().Size()) + size
}
// bitrotVerify a single stream of data.
func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, want []byte, shardSize int64) error {
if algo != HighwayHash256S {
h := algo.New()
if n, err := io.Copy(h, r); err != nil || n != wantSize {
// Premature failure in reading the object, file is corrupt.
return errFileCorrupt
}
if !bytes.Equal(h.Sum(nil), want) {
return errFileCorrupt
}
return nil
}
h := algo.New()
hashBuf := make([]byte, h.Size())
buf := make([]byte, shardSize)
left := wantSize
// Calculate the size of the bitrot file and compare
// it with the actual file size.
if left != bitrotShardFileSize(partSize, shardSize, algo) {
return errFileCorrupt
}
for left > 0 {
// Read expected hash...
h.Reset()
n, err := io.ReadFull(r, hashBuf)
if err != nil {
// Read's failed for object with right size, file is corrupt.
return err
}
// Subtract hash length..
left -= int64(n)
if left < shardSize {
shardSize = left
}
read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), buf)
if err != nil {
// Read's failed for object with right size, at different offsets.
return err
}
left -= read
if !bytes.Equal(h.Sum(nil), hashBuf) {
return errFileCorrupt
}
}
return nil
}
// bitrotSelfTest performs a self-test to ensure that bitrot
// algorithms compute correct checksums. If any algorithm
// produces an incorrect checksum it fails with a hard error.
//
// bitrotSelfTest tries to catch any issue in the bitrot implementation
// early instead of silently corrupting data.
func bitrotSelfTest() {
var checksums = map[BitrotAlgorithm]string{
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
HighwayHash256S: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
}
for algorithm := range bitrotAlgorithms {
if !algorithm.Available() {
continue
}
checksum, err := hex.DecodeString(checksums[algorithm])
if err != nil {
logger.Fatal(errSelfTestFailure, fmt.Sprintf("bitrot: failed to decode %v checksum %s for selftest: %v", algorithm, checksums[algorithm], err))
}
var (
hash = algorithm.New()
msg = make([]byte, 0, hash.Size()*hash.BlockSize())
sum = make([]byte, 0, hash.Size())
)
for i := 0; i < hash.Size()*hash.BlockSize(); i += hash.Size() {
hash.Write(msg)
sum = hash.Sum(sum[:0])
msg = append(msg, sum...)
hash.Reset()
}
if !bytes.Equal(sum, checksum) {
logger.Fatal(errSelfTestFailure, fmt.Sprintf("bitrot: %v selftest checksum mismatch: got %x - want %x", algorithm, sum, checksum))
}
}
}

View File

@@ -20,7 +20,6 @@ import (
"bytes"
"crypto/subtle"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
@@ -760,9 +759,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
// Make sure to add Location information here only for bucket
if cp := pathClean(r.URL.Path); cp != "" {
w.Header().Set(xhttp.Location, cp) // Clean any trailing slashes.
}
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
writeSuccessResponseHeadersOnly(w)
@@ -1606,59 +1603,3 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
// ----------
// Gets the replication metrics for a bucket.
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketReplicationMetrics")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketStats := globalNotificationSys.GetClusterBucketStats(r.Context(), bucket)
bucketReplStats := BucketReplicationStats{}
// sum up metrics from each node in the cluster
for _, bucketStat := range bucketStats {
bucketReplStats.FailedCount += bucketStat.ReplicationStats.FailedCount
bucketReplStats.FailedSize += bucketStat.ReplicationStats.FailedSize
bucketReplStats.PendingCount += bucketStat.ReplicationStats.PendingCount
bucketReplStats.PendingSize += bucketStat.ReplicationStats.PendingSize
bucketReplStats.ReplicaSize += bucketStat.ReplicationStats.ReplicaSize
bucketReplStats.ReplicatedSize += bucketStat.ReplicationStats.ReplicatedSize
}
// add initial usage from the time of cluster up
usageStat := globalReplicationStats.GetInitialUsage(bucket)
bucketReplStats.FailedCount += usageStat.FailedCount
bucketReplStats.FailedSize += usageStat.FailedSize
bucketReplStats.PendingCount += usageStat.PendingCount
bucketReplStats.PendingSize += usageStat.PendingSize
bucketReplStats.ReplicaSize += usageStat.ReplicaSize
bucketReplStats.ReplicatedSize += usageStat.ReplicatedSize
if err := json.NewEncoder(w).Encode(&bucketReplStats); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}

View File

@@ -24,7 +24,6 @@ import (
"net/http"
"runtime"
"strings"
"sync"
"time"
miniogo "github.com/minio/minio-go/v7"
@@ -72,16 +71,11 @@ type expiryTask struct {
}
type expiryState struct {
once sync.Once
expiryCh chan expiryTask
}
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
select {
case <-GlobalContext.Done():
es.once.Do(func() {
close(es.expiryCh)
})
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
default:
}
@@ -92,9 +86,14 @@ var (
)
func newExpiryState() *expiryState {
return &expiryState{
es := &expiryState{
expiryCh: make(chan expiryTask, 10000),
}
go func() {
<-GlobalContext.Done()
close(es.expiryCh)
}()
return es
}
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
@@ -107,17 +106,12 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
}
type transitionState struct {
once sync.Once
// add future metrics here
transitionCh chan ObjectInfo
}
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
select {
case <-GlobalContext.Done():
t.once.Do(func() {
close(t.transitionCh)
})
case t.transitionCh <- oi:
default:
}
@@ -129,13 +123,19 @@ var (
)
func newTransitionState() *transitionState {
// fix minimum concurrent transition to 1 for single CPU setup
if globalTransitionConcurrent == 0 {
globalTransitionConcurrent = 1
}
return &transitionState{
ts := &transitionState{
transitionCh: make(chan ObjectInfo, 10000),
}
go func() {
<-GlobalContext.Done()
close(ts.transitionCh)
}()
return ts
}
// addWorker creates a new worker to process tasks
@@ -530,7 +530,7 @@ type SelectParameters struct {
// IsEmpty returns true if no select parameters set
func (sp *SelectParameters) IsEmpty() bool {
return sp == nil
return sp == nil || sp.S3Select == s3select.S3Select{}
}
var (
@@ -623,9 +623,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
}
meta[v.Name] = v.Value
}
if tags := rreq.OutputLocation.S3.Tagging.String(); tags != "" {
meta[xhttp.AmzObjectTagging] = tags
}
meta[xhttp.AmzObjectTagging] = rreq.OutputLocation.S3.Tagging.String()
if rreq.OutputLocation.S3.Encryption.EncryptionType != "" {
meta[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
}
@@ -638,9 +636,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
for k, v := range objInfo.UserDefined {
meta[k] = v
}
if len(objInfo.UserTags) != 0 {
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
}
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
return ObjectOptions{
Versioned: globalBucketVersioningSys.Enabled(bucket),

View File

@@ -169,10 +169,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
}
meta.ReplicationConfigXML = configData
case bucketTargetsFile:
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{
bucket: meta.Name,
bucketTargetsFile: bucketTargetsFile,
})
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{bucket: meta.Name, bucketTargetsFile: bucketTargetsFile})
if err != nil {
return fmt.Errorf("Error encrypting bucket target metadata %w", err)
}

View File

@@ -38,8 +38,6 @@ import (
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/fips"
"github.com/minio/minio/pkg/kms"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/sio"
)
@@ -392,7 +390,7 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
return nil
}
encBytes, metaBytes, err := encryptBucketMetadata(b.Name, b.BucketTargetsConfigJSON, kms.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
encBytes, metaBytes, err := encryptBucketMetadata(b.Name, b.BucketTargetsConfigJSON, crypto.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
if err != nil {
return err
}
@@ -403,23 +401,26 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
}
// encrypt bucket metadata if kms is configured.
func encryptBucketMetadata(bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
func encryptBucketMetadata(bucket string, input []byte, kmsContext crypto.Context) (output, metabytes []byte, err error) {
var sealedKey crypto.SealedKey
if GlobalKMS == nil {
output = input
return
}
var (
key [32]byte
encKey []byte
)
metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey("", kmsContext)
key, encKey, err = GlobalKMS.GenerateKey(GlobalKMS.DefaultKeyID(), kmsContext)
if err != nil {
return
}
outbuf := bytes.NewBuffer(nil)
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, GlobalKMS.DefaultKeyID(), encKey, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
if err != nil {
return output, metabytes, err
}
@@ -431,15 +432,16 @@ func encryptBucketMetadata(bucket string, input []byte, kmsContext kms.Context)
}
// decrypt bucket metadata if kms is configured.
func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, kmsContext kms.Context) ([]byte, error) {
func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, kmsContext crypto.Context) ([]byte, error) {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(meta)
if err != nil {
return nil, err
}
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, kmsContext)
if err != nil {
return nil, err
}
@@ -447,8 +449,8 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, ""); err != nil {
return nil, err
}
outbuf := bytes.NewBuffer(nil)
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
return outbuf.Bytes(), err
}

View File

@@ -88,7 +88,7 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
return err
}
dui := v.(madmin.DataUsageInfo)
dui := v.(DataUsageInfo)
bui, ok := dui.BucketsUsage[bucket]
if !ok {
@@ -115,7 +115,7 @@ func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota.
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui madmin.BucketUsageInfo) {
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := globalBucketQuotaSys.Get(bucket)
if err != nil {

View File

@@ -1,192 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"sync"
"sync/atomic"
"github.com/minio/minio/pkg/bucket/replication"
)
func (b *BucketReplicationStats) hasReplicationUsage() bool {
return b.PendingSize > 0 ||
b.FailedSize > 0 ||
b.ReplicatedSize > 0 ||
b.ReplicaSize > 0 ||
b.PendingCount > 0 ||
b.FailedCount > 0
}
// ReplicationStats holds the global in-memory replication stats
type ReplicationStats struct {
sync.RWMutex
Cache map[string]*BucketReplicationStats
UsageCache map[string]*BucketReplicationStats // initial usage
}
// Delete deletes in-memory replication statistics for a bucket.
func (r *ReplicationStats) Delete(bucket string) {
if r == nil {
return
}
r.Lock()
defer r.Unlock()
delete(r.Cache, bucket)
delete(r.UsageCache, bucket)
}
// Update updates in-memory replication statistics with new values.
func (r *ReplicationStats) Update(bucket string, n int64, status, prevStatus replication.StatusType, opType replication.Type) {
if r == nil {
return
}
r.RLock()
b, ok := r.Cache[bucket]
if !ok {
b = &BucketReplicationStats{}
}
r.RUnlock()
switch status {
case replication.Pending:
if opType == replication.ObjectReplicationType {
atomic.AddUint64(&b.PendingSize, uint64(n))
}
atomic.AddUint64(&b.PendingCount, 1)
case replication.Completed:
switch prevStatus { // adjust counters based on previous state
case replication.Pending:
atomic.AddUint64(&b.PendingCount, ^uint64(0))
case replication.Failed:
atomic.AddUint64(&b.FailedCount, ^uint64(0))
}
if opType == replication.ObjectReplicationType {
atomic.AddUint64(&b.ReplicatedSize, uint64(n))
switch prevStatus {
case replication.Pending:
atomic.AddUint64(&b.PendingSize, ^uint64(n-1))
case replication.Failed:
atomic.AddUint64(&b.FailedSize, ^uint64(n-1))
}
}
case replication.Failed:
// count failures only once - not on every retry
switch prevStatus { // adjust counters based on previous state
case replication.Pending:
atomic.AddUint64(&b.PendingCount, ^uint64(0))
}
if opType == replication.ObjectReplicationType {
if prevStatus == replication.Pending {
atomic.AddUint64(&b.FailedSize, uint64(n))
atomic.AddUint64(&b.FailedCount, 1)
atomic.AddUint64(&b.PendingSize, ^uint64(n-1))
}
}
case replication.Replica:
if opType == replication.ObjectReplicationType {
atomic.AddUint64(&b.ReplicaSize, uint64(n))
}
}
r.Lock()
r.Cache[bucket] = b
r.Unlock()
}
// GetInitialUsage get replication metrics available at the time of cluster initialization
func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats {
if r == nil {
return BucketReplicationStats{}
}
r.RLock()
defer r.RUnlock()
st, ok := r.UsageCache[bucket]
if !ok {
return BucketReplicationStats{}
}
return BucketReplicationStats{
PendingSize: atomic.LoadUint64(&st.PendingSize),
FailedSize: atomic.LoadUint64(&st.FailedSize),
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
PendingCount: atomic.LoadUint64(&st.PendingCount),
FailedCount: atomic.LoadUint64(&st.FailedCount),
}
}
// Get replication metrics for a bucket from this node since this node came up.
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
if r == nil {
return BucketReplicationStats{}
}
r.RLock()
defer r.RUnlock()
st, ok := r.Cache[bucket]
if !ok {
return BucketReplicationStats{}
}
return BucketReplicationStats{
PendingSize: atomic.LoadUint64(&st.PendingSize),
FailedSize: atomic.LoadUint64(&st.FailedSize),
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
PendingCount: atomic.LoadUint64(&st.PendingCount),
FailedCount: atomic.LoadUint64(&st.FailedCount),
}
}
// NewReplicationStats initialize in-memory replication statistics
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
st := &ReplicationStats{
Cache: make(map[string]*BucketReplicationStats),
UsageCache: make(map[string]*BucketReplicationStats),
}
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
return st
}
// data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() {
return st
}
for bucket, usage := range dataUsageInfo.BucketsUsage {
b := &BucketReplicationStats{
PendingSize: usage.ReplicationPendingSize,
FailedSize: usage.ReplicationFailedSize,
ReplicatedSize: usage.ReplicatedSize,
ReplicaSize: usage.ReplicaSize,
PendingCount: usage.ReplicationPendingCount,
FailedCount: usage.ReplicationFailedCount,
}
if b.hasReplicationUsage() {
st.UsageCache[bucket] = b
}
}
return st
}

View File

@@ -291,14 +291,6 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
versionPurgeStatus = Complete
}
}
prevStatus := dobj.DeleteMarkerReplicationStatus
currStatus := replicationStatus
if dobj.VersionID != "" {
prevStatus = string(dobj.VersionPurgeStatus)
currStatus = string(versionPurgeStatus)
}
// to decrement pending count later.
globalReplicationStats.Update(dobj.Bucket, 0, replication.StatusType(currStatus), replication.StatusType(prevStatus), replication.DeleteReplicationType)
var eventName = event.ObjectReplicationComplete
if replicationStatus == string(replication.Failed) || versionPurgeStatus == Failed {
@@ -571,8 +563,12 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer) {
objInfo := ri.ObjectInfo
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return
}
bucket := objInfo.Bucket
object := objInfo.Name
@@ -598,7 +594,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
})
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, writeLock, ObjectOptions{
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
VersionID: objInfo.VersionID,
})
if err != nil {
@@ -608,10 +604,10 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
Object: objInfo,
Host: "Internal: [Replication]",
})
logger.LogIf(ctx, fmt.Errorf("Unable to update replicate for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
logger.LogIf(ctx, err)
return
}
defer gr.Close() // hold write lock for entire transaction
defer gr.Close() // hold read lock for entire transaction
objInfo = gr.ObjInfo
size, err := objInfo.GetActualSize()
@@ -648,7 +644,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
rtype = getReplicationAction(objInfo, oi)
if rtype == replicateNone {
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed
// PutObject might have completed.
return
}
}
@@ -660,8 +656,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
srcOpts := miniogo.CopySrcOptions{
Bucket: dest.Bucket,
Object: object,
VersionID: objInfo.VersionID,
}
VersionID: objInfo.VersionID}
dstOpts := miniogo.PutObjectOptions{
Internal: miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
@@ -723,7 +718,6 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
}
}
prevReplStatus := objInfo.ReplicationStatus
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
if objInfo.UserTags != "" {
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
@@ -737,46 +731,23 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
eventName = event.ObjectReplicationFailed
}
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return
}
// Leave metadata in `PENDING` state if inline replication fails to save iops
if ri.OpType == replication.HealReplicationType || replicationStatus == replication.Completed {
// This lower level implementation is necessary to avoid write locks from CopyObject.
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
if err != nil {
// This lower level implementation is necessary to avoid write locks from CopyObject.
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
fi := FileInfo{}
fi.VersionID = objInfo.VersionID
fi.Metadata = make(map[string]string, len(objInfo.UserDefined))
for k, v := range objInfo.UserDefined {
fi.Metadata[k] = v
}
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, fi); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
}
opType := replication.MetadataReplicationType
if rtype == replicateAll {
opType = replication.ObjectReplicationType
}
globalReplicationStats.Update(bucket, size, replicationStatus, prevReplStatus, opType)
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
}
// re-queue failures once more - keep a retry count to avoid flooding the queue if
// the target site is down. Leave it to scanner to catch up instead.
if replicationStatus == replication.Failed && ri.RetryCount < 1 {
ri.OpType = replication.HealReplicationType
ri.RetryCount++
globalReplicationPool.queueReplicaTask(ctx, ri)
}
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
}
// filterReplicationStatusMetadata filters replication status metadata for COPY
@@ -809,62 +780,38 @@ type DeletedObjectVersionInfo struct {
}
var (
globalReplicationPool *ReplicationPool
globalReplicationStats *ReplicationStats
globalReplicationPool *ReplicationPool
)
// ReplicationPool describes replication pool
type ReplicationPool struct {
once sync.Once
mu sync.Mutex
size int
replicaCh chan ReplicateObjectInfo
replicaDeleteCh chan DeletedObjectVersionInfo
mrfReplicaCh chan ReplicateObjectInfo
mrfReplicaDeleteCh chan DeletedObjectVersionInfo
killCh chan struct{}
wg sync.WaitGroup
ctx context.Context
objLayer ObjectLayer
mu sync.Mutex
size int
replicaCh chan ObjectInfo
replicaDeleteCh chan DeletedObjectVersionInfo
killCh chan struct{}
wg sync.WaitGroup
ctx context.Context
objLayer ObjectLayer
}
// NewReplicationPool creates a pool of replication workers of specified size
func NewReplicationPool(ctx context.Context, o ObjectLayer, sz int) *ReplicationPool {
pool := &ReplicationPool{
replicaCh: make(chan ReplicateObjectInfo, 1000),
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 1000),
mrfReplicaCh: make(chan ReplicateObjectInfo, 100000),
mrfReplicaDeleteCh: make(chan DeletedObjectVersionInfo, 100000),
ctx: ctx,
objLayer: o,
replicaCh: make(chan ObjectInfo, 10000),
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
ctx: ctx,
objLayer: o,
}
go func() {
<-ctx.Done()
close(pool.replicaCh)
close(pool.replicaDeleteCh)
}()
pool.Resize(sz)
// add long running worker for handling most recent failures/pending replications
go pool.AddMRFWorker()
return pool
}
// AddMRFWorker adds a pending/failed replication worker to handle requests that could not be queued
// to the other workers
func (p *ReplicationPool) AddMRFWorker() {
for {
select {
case <-p.ctx.Done():
return
case oi, ok := <-p.mrfReplicaCh:
if !ok {
return
}
replicateObject(p.ctx, oi, p.objLayer)
case doi, ok := <-p.mrfReplicaDeleteCh:
if !ok {
return
}
replicateDelete(p.ctx, doi, p.objLayer)
}
}
}
// AddWorker adds a replication worker to the pool
func (p *ReplicationPool) AddWorker() {
defer p.wg.Done()
@@ -905,43 +852,28 @@ func (p *ReplicationPool) Resize(n int) {
}
}
func (p *ReplicationPool) queueReplicaTask(ctx context.Context, ri ReplicateObjectInfo) {
func (p *ReplicationPool) queueReplicaTask(oi ObjectInfo) {
if p == nil {
return
}
select {
case <-ctx.Done():
p.once.Do(func() {
close(p.replicaCh)
close(p.mrfReplicaCh)
})
case p.replicaCh <- ri:
case p.mrfReplicaCh <- ri:
// queue all overflows into the mrfReplicaCh to handle incoming pending/failed operations
case p.replicaCh <- oi:
default:
}
}
func (p *ReplicationPool) queueReplicaDeleteTask(ctx context.Context, doi DeletedObjectVersionInfo) {
func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
if p == nil {
return
}
select {
case <-ctx.Done():
p.once.Do(func() {
close(p.replicaDeleteCh)
close(p.mrfReplicaDeleteCh)
})
case p.replicaDeleteCh <- doi:
case p.mrfReplicaDeleteCh <- doi:
// queue all overflows into the mrfReplicaDeleteCh to handle incoming pending/failed operations
default:
}
}
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationWorkers())
globalReplicationStats = NewReplicationStats(ctx, objectAPI)
}
// get Reader from replication target if active-active replication is in place and
@@ -1077,18 +1009,18 @@ func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, op
return oi, proxy, err
}
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool, opType replication.Type) {
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool) {
if sync {
replicateObject(ctx, ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType}, o)
replicateObject(ctx, objInfo, o)
} else {
globalReplicationPool.queueReplicaTask(GlobalContext, ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType})
}
if sz, err := objInfo.GetActualSize(); err == nil {
globalReplicationStats.Update(objInfo.Bucket, sz, objInfo.ReplicationStatus, replication.StatusType(""), opType)
globalReplicationPool.queueReplicaTask(objInfo)
}
}
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo, o ObjectLayer, sync bool) {
globalReplicationPool.queueReplicaDeleteTask(GlobalContext, dv)
globalReplicationStats.Update(dv.Bucket, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
if sync {
replicateDelete(ctx, dv, o)
} else {
globalReplicationPool.queueReplicaDeleteTask(dv)
}
}

View File

@@ -1,41 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
//go:generate msgp -file $GOFILE
// BucketStats bucket statistics
type BucketStats struct {
ReplicationStats BucketReplicationStats
}
// BucketReplicationStats represents inline replication statistics
// such as pending, failed and completed bytes in total for a bucket
type BucketReplicationStats struct {
// Pending size in bytes
PendingSize uint64 `json:"pendingReplicationSize"`
// Completed size in bytes
ReplicatedSize uint64 `json:"completedReplicationSize"`
// Total Replica size in bytes
ReplicaSize uint64 `json:"replicaSize"`
// Failed size in bytes
FailedSize uint64 `json:"failedReplicationSize"`
// Total number of pending operations including metadata updates
PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates
FailedCount uint64 `json:"failedReplicationCount"`
}

View File

@@ -1,342 +0,0 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "PendingSize":
z.PendingSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
case "ReplicatedSize":
z.ReplicatedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "ReplicaSize":
z.ReplicaSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "FailedSize":
z.FailedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "PendingCount":
z.PendingCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
case "FailedCount":
z.FailedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "PendingSize"
err = en.Append(0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.PendingSize)
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
// write "ReplicatedSize"
err = en.Append(0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicatedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
// write "ReplicaSize"
err = en.Append(0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicaSize)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
// write "FailedSize"
err = en.Append(0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.FailedSize)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
// write "PendingCount"
err = en.Append(0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.PendingCount)
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
// write "FailedCount"
err = en.Append(0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.FailedCount)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketReplicationStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "PendingSize"
o = append(o, 0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.PendingSize)
// string "ReplicatedSize"
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicatedSize)
// string "ReplicaSize"
o = append(o, 0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicaSize)
// string "FailedSize"
o = append(o, 0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.FailedSize)
// string "PendingCount"
o = append(o, 0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.PendingCount)
// string "FailedCount"
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.FailedCount)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "PendingSize":
z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "PendingSize")
return
}
case "ReplicatedSize":
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "ReplicaSize":
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "FailedSize":
z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "PendingCount":
z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "PendingCount")
return
}
case "FailedCount":
z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketReplicationStats) Msgsize() (s int) {
s = 1 + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 12 + msgp.Uint64Size + 11 + msgp.Uint64Size + 13 + msgp.Uint64Size + 12 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *BucketStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ReplicationStats":
err = z.ReplicationStats.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "ReplicationStats"
err = en.Append(0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = z.ReplicationStats.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "ReplicationStats"
o = append(o, 0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
o, err = z.ReplicationStats.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ReplicationStats":
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationStats")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketStats) Msgsize() (s int) {
s = 1 + 17 + z.ReplicationStats.Msgsize()
return
}

View File

@@ -1,236 +0,0 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketReplicationStats(t *testing.T) {
v := BucketReplicationStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketReplicationStats(b *testing.B) {
v := BucketReplicationStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketReplicationStats(b *testing.B) {
v := BucketReplicationStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketReplicationStats(b *testing.B) {
v := BucketReplicationStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketReplicationStats(t *testing.T) {
v := BucketReplicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketReplicationStats Msgsize() is inaccurate")
}
vn := BucketReplicationStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketReplicationStats(b *testing.B) {
v := BucketReplicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketReplicationStats(b *testing.B) {
v := BucketReplicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBucketStats(t *testing.T) {
v := BucketStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketStats(b *testing.B) {
v := BucketStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketStats(b *testing.B) {
v := BucketStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketStats(b *testing.B) {
v := BucketStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketStats(t *testing.T) {
v := BucketStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketStats Msgsize() is inaccurate")
}
vn := BucketStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketStats(b *testing.B) {
v := BucketStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketStats(b *testing.B) {
v := BucketStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -31,7 +31,6 @@ import (
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/madmin"
)
@@ -105,7 +104,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
}
if tgt.Type == madmin.ReplicationService {
if !globalIsErasure {
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
return NotImplemented{}
}
if !globalBucketVersioningSys.Enabled(bucket) {
return BucketReplicationSourceNotVersioned{Bucket: bucket}
@@ -117,9 +116,6 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
if vcfg.Status != string(versioning.Enabled) {
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
}
if tgt.ReplicationSync && tgt.BandwidthLimit > 0 {
return NotImplemented{Message: "Synchronous replication does not support bandwidth limits"}
}
}
if tgt.Type == madmin.ILMService {
if globalBucketVersioningSys.Enabled(bucket) {
@@ -183,7 +179,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
}
if arn.Type == madmin.ReplicationService {
if !globalIsErasure {
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
return NotImplemented{}
}
// reject removal of remote target if replication configuration is present
rcfg, err := getReplicationConfig(ctx, bucket)
@@ -332,7 +328,6 @@ func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objA
for _, bucket := range buckets {
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket.Name)
if err != nil {
logger.LogIf(ctx, err)
continue
}
if cfg == nil || cfg.Empty() {
@@ -344,7 +339,6 @@ func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objA
for _, tgt := range cfg.Targets {
tgtClient, err := sys.getRemoteTargetClient(&tgt)
if err != nil {
logger.LogIf(ctx, err)
continue
}
sys.arnRemotesMap[tgt.Arn] = tgtClient
@@ -363,7 +357,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
getRemoteTargetInstanceTransportOnce.Do(func() {
getRemoteTargetInstanceTransport = NewRemoteTargetHTTPTransport()
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute)
})
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
Creds: creds,
@@ -438,10 +432,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
return nil, err
}
if crypto.S3.IsEncrypted(meta) {
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{
bucket: bucket,
bucketTargetsFile: bucketTargetsFile,
}); err != nil {
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{bucket: bucket, bucketTargetsFile: bucketTargetsFile}); err != nil {
return nil, err
}
}

View File

@@ -59,15 +59,7 @@ func init() {
config.Logger.Info = logger.Info
config.Logger.LogIf = logger.LogIf
if IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsKubernetesReplicaSet() || IsPCFTile() {
// 30 seconds matches the orchestrator DNS TTLs, have
// a 5 second timeout to lookup from DNS servers.
globalDNSCache = xhttp.NewDNSCache(30*time.Second, 5*time.Second, logger.LogOnceIf)
} else {
// On bare-metals DNS do not change often, so it is
// safe to assume a higher timeout upto 10 minutes.
globalDNSCache = xhttp.NewDNSCache(10*time.Minute, 5*time.Second, logger.LogOnceIf)
}
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
initGlobalContext()
@@ -436,13 +428,3 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
secureConn = true
return x509Certs, manager, secureConn, nil
}
// contextCanceled returns whether a context is canceled.
func contextCanceled(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}

View File

@@ -452,7 +452,7 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
// if we validated all setDriveCounts and it was successful
// proceed to store the correct storage class globally.
if i == len(setDriveCounts)-1 {
globalStorageClass.Update(sc)
globalStorageClass = sc
}
}
}
@@ -624,8 +624,6 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
globalHealConfig = healCfg
globalHealConfigMu.Unlock()
// update dynamic scanner values.
scannerCycle.Update(scannerCfg.Cycle)
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
// Update all dynamic config values in memory.

View File

@@ -29,15 +29,14 @@ import (
// API sub-system constants
const (
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
apiClusterDeadline = "cluster_deadline"
apiCorsAllowOrigin = "cors_allow_origin"
apiRemoteTransportDeadline = "remote_transport_deadline"
apiListQuorum = "list_quorum"
apiExtendListCacheLife = "extend_list_cache_life"
apiReplicationWorkers = "replication_workers"
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
apiClusterDeadline = "cluster_deadline"
apiCorsAllowOrigin = "cors_allow_origin"
apiRemoteTransportDeadline = "remote_transport_deadline"
apiListQuorum = "list_quorum"
apiExtendListCacheLife = "extend_list_cache_life"
apiReplicationWorkers = "replication_workers"
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
@@ -88,7 +87,7 @@ var (
},
config.KV{
Key: apiReplicationWorkers,
Value: "500",
Value: "100",
},
}
)

View File

@@ -23,21 +23,23 @@ const (
// Top level common ENVs
const (
EnvAccessKey = "MINIO_ACCESS_KEY"
EnvSecretKey = "MINIO_SECRET_KEY"
EnvRootUser = "MINIO_ROOT_USER"
EnvRootPassword = "MINIO_ROOT_PASSWORD"
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
EnvBrowser = "MINIO_BROWSER"
EnvDomain = "MINIO_DOMAIN"
EnvRegionName = "MINIO_REGION_NAME"
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvFSOSync = "MINIO_FS_OSYNC"
EnvArgs = "MINIO_ARGS"
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
EnvAccessKey = "MINIO_ACCESS_KEY"
EnvSecretKey = "MINIO_SECRET_KEY"
EnvRootUser = "MINIO_ROOT_USER"
EnvRootPassword = "MINIO_ROOT_PASSWORD"
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
EnvBrowser = "MINIO_BROWSER"
EnvDomain = "MINIO_DOMAIN"
EnvRegionName = "MINIO_REGION_NAME"
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvFSOSync = "MINIO_FS_OSYNC"
EnvArgs = "MINIO_ARGS"
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
EnvUpdate = "MINIO_UPDATE"

View File

@@ -1,138 +0,0 @@
// MinIO Cloud Storage, (C) 2021 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/minio/minio/pkg/kms"
"github.com/secure-io/sio-go"
"github.com/secure-io/sio-go/sioutil"
)
// Encrypt encrypts the plaintext with a key managed by KMS.
// The context is bound to the returned ciphertext.
//
// The same context must be provided when decrypting the
// ciphertext.
func Encrypt(KMS kms.KMS, plaintext io.Reader, context kms.Context) (io.Reader, error) {
var algorithm = sio.AES_256_GCM
if !sioutil.NativeAES() {
algorithm = sio.ChaCha20Poly1305
}
key, err := KMS.GenerateKey("", context)
if err != nil {
return nil, err
}
stream, err := algorithm.Stream(key.Plaintext)
if err != nil {
return nil, err
}
nonce := make([]byte, stream.NonceSize())
if _, err := rand.Read(nonce); err != nil {
return nil, err
}
const (
MaxMetadataSize = 1 << 20 // max. size of the metadata
Version = 1
)
var (
header [5]byte
buffer bytes.Buffer
)
metadata, err := json.Marshal(encryptedObject{
KeyID: key.KeyID,
KMSKey: key.Ciphertext,
Algorithm: algorithm,
Nonce: nonce,
})
if err != nil {
return nil, err
}
if len(metadata) > MaxMetadataSize {
return nil, errors.New("config: encryption metadata is too large")
}
header[0] = Version
binary.LittleEndian.PutUint32(header[1:], uint32(len(metadata)))
buffer.Write(header[:])
buffer.Write(metadata)
return io.MultiReader(
&buffer,
stream.EncryptReader(plaintext, nonce, nil),
), nil
}
// Decrypt decrypts the ciphertext using a key managed by the KMS.
// The same context that have been used during encryption must be
// provided.
func Decrypt(KMS kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, error) {
const (
MaxMetadataSize = 1 << 20 // max. size of the metadata
Version = 1
)
var header [5]byte
if _, err := io.ReadFull(ciphertext, header[:]); err != nil {
return nil, err
}
if header[0] != Version {
return nil, fmt.Errorf("config: unknown ciphertext version %d", header[0])
}
size := binary.LittleEndian.Uint32(header[1:])
if size > MaxMetadataSize {
return nil, errors.New("config: encryption metadata is too large")
}
var (
metadataBuffer = make([]byte, size)
metadata encryptedObject
)
if _, err := io.ReadFull(ciphertext, metadataBuffer); err != nil {
return nil, err
}
if err := json.Unmarshal(metadataBuffer, &metadata); err != nil {
return nil, err
}
key, err := KMS.DecryptKey(metadata.KeyID, metadata.KMSKey, context)
if err != nil {
return nil, err
}
stream, err := metadata.Algorithm.Stream(key)
if err != nil {
return nil, err
}
if stream.NonceSize() != len(metadata.Nonce) {
return nil, sio.NotAuthentic
}
return stream.DecryptReader(ciphertext, metadata.Nonce, nil), nil
}
type encryptedObject struct {
KeyID string `json:"keyid"`
KMSKey []byte `json:"kmskey"`
Algorithm sio.Algorithm `json:"algorithm"`
Nonce []byte `json:"nonce"`
}

View File

@@ -1,116 +0,0 @@
// MinIO Cloud Storage, (C) 2021 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/hex"
"io"
"io/ioutil"
"testing"
"github.com/minio/minio/pkg/kms"
)
var encryptDecryptTests = []struct {
Data []byte
Context kms.Context
}{
{
Data: nil,
Context: nil,
},
{
Data: []byte{1},
Context: nil,
},
{
Data: []byte{1},
Context: kms.Context{"key": "value"},
},
{
Data: make([]byte, 1<<20),
Context: kms.Context{"key": "value", "a": "b"},
},
}
func TestEncryptDecrypt(t *testing.T) {
key, err := hex.DecodeString("ddedadb867afa3f73bd33c25499a723ed7f9f51172ee7b1b679e08dc795debcc")
if err != nil {
t.Fatalf("Failed to decode master key: %v", err)
}
KMS, err := kms.New("my-key", key)
if err != nil {
t.Fatalf("Failed to create KMS: %v", err)
}
for i, test := range encryptDecryptTests {
ciphertext, err := Encrypt(KMS, bytes.NewReader(test.Data), test.Context)
if err != nil {
t.Fatalf("Test %d: failed to encrypt stream: %v", i, err)
}
data, err := ioutil.ReadAll(ciphertext)
if err != nil {
t.Fatalf("Test %d: failed to encrypt stream: %v", i, err)
}
plaintext, err := Decrypt(KMS, bytes.NewReader(data), test.Context)
if err != nil {
t.Fatalf("Test %d: failed to decrypt stream: %v", i, err)
}
data, err = ioutil.ReadAll(plaintext)
if err != nil {
t.Fatalf("Test %d: failed to decrypt stream: %v", i, err)
}
if !bytes.Equal(data, test.Data) {
t.Fatalf("Test %d: decrypted data does not match original data", i)
}
}
}
func BenchmarkEncrypt(b *testing.B) {
key, err := hex.DecodeString("ddedadb867afa3f73bd33c25499a723ed7f9f51172ee7b1b679e08dc795debcc")
if err != nil {
b.Fatalf("Failed to decode master key: %v", err)
}
KMS, err := kms.New("my-key", key)
if err != nil {
b.Fatalf("Failed to create KMS: %v", err)
}
benchmarkEncrypt := func(size int, b *testing.B) {
var (
data = make([]byte, size)
plaintext = bytes.NewReader(data)
context = kms.Context{"key": "value"}
)
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
ciphertext, err := Encrypt(KMS, plaintext, context)
if err != nil {
b.Fatal(err)
}
if _, err = io.Copy(ioutil.Discard, ciphertext); err != nil {
b.Fatal(err)
}
plaintext.Reset(data)
}
}
b.Run("1KB", func(b *testing.B) { benchmarkEncrypt(1*1024, b) })
b.Run("512KB", func(b *testing.B) { benchmarkEncrypt(512*1024, b) })
b.Run("1MB", func(b *testing.B) { benchmarkEncrypt(1024*1024, b) })
b.Run("10MB", func(b *testing.B) { benchmarkEncrypt(10*1024*1024, b) })
}

View File

@@ -100,10 +100,6 @@ func (c *OperatorDNS) Put(bucket string) error {
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
errorString := errorStringBuilder.String()
switch resp.StatusCode {
case http.StatusConflict:
return ErrBucketConflict(Error{bucket, errors.New(errorString)})
}
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
}
return nil

View File

@@ -26,20 +26,12 @@ type Error struct {
type ErrInvalidBucketName Error
func (e ErrInvalidBucketName) Error() string {
return e.Bucket + " invalid bucket name error: " + e.Err.Error()
return "invalid bucket name error: " + e.Err.Error()
}
func (e Error) Error() string {
return "dns related error: " + e.Err.Error()
}
// ErrBucketConflict for buckets that already exist
type ErrBucketConflict Error
func (e ErrBucketConflict) Error() string {
return e.Bucket + " bucket conflict error: " + e.Err.Error()
}
// Store dns record store
type Store interface {
Put(bucket string) error

View File

@@ -260,67 +260,6 @@ func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error)
return searchResult.Entries[0].DN, nil
}
func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ([]string, error) {
// User groups lookup.
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
nil,
nil,
)
var newGroups []string
newGroups, err := getGroups(conn, searchRequest)
if err != nil {
errRet := fmt.Errorf("Error finding groups of %s: %v", bindDN, err)
return nil, errRet
}
groups = append(groups, newGroups...)
}
}
return groups, nil
}
// LookupUserDN searches for the full DN ang groups of a given username
func (l *Config) LookupUserDN(username string) (string, []string, error) {
if !l.isUsingLookupBind {
return "", nil, errors.New("current lookup mode does not support searching for User DN")
}
conn, err := l.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.lookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err := l.lookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
// user and the list of groups.
func (l *Config) Bind(username, password string) (string, []string, error) {
@@ -371,9 +310,28 @@ func (l *Config) Bind(username, password string) (string, []string, error) {
}
// User groups lookup.
groups, err := l.searchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
nil,
nil,
)
var newGroups []string
newGroups, err = getGroups(conn, searchRequest)
if err != nil {
errRet := fmt.Errorf("Error finding groups of %s: %v", bindDN, err)
return "", nil, errRet
}
groups = append(groups, newGroups...)
}
}
return bindDN, groups, nil

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2020-2021 MinIO, Inc.
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,10 +28,8 @@ import (
const (
Delay = "delay"
MaxWait = "max_wait"
Cycle = "cycle"
EnvDelay = "MINIO_SCANNER_DELAY"
EnvCycle = "MINIO_SCANNER_CYCLE"
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
@@ -43,8 +41,6 @@ type Config struct {
Delay float64 `json:"delay"`
// MaxWait is maximum wait time between operations
MaxWait time.Duration
// Cycle is the time.Duration between each scanner cycles
Cycle time.Duration
}
var (
@@ -58,10 +54,6 @@ var (
Key: MaxWait,
Value: "15s",
},
config.KV{
Key: Cycle,
Value: "1m",
},
}
// Help provides help for config values
@@ -78,12 +70,6 @@ var (
Optional: true,
Type: "duration",
},
config.HelpKV{
Key: Cycle,
Description: `time duration between scanner cycles, defaults to '1m'`,
Optional: true,
Type: "duration",
},
}
)
@@ -108,10 +94,5 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err != nil {
return cfg, err
}
cfg.Cycle, err = time.ParseDuration(env.Get(EnvCycle, kvs.Get(Cycle)))
if err != nil {
return cfg, err
}
return cfg, nil
}

View File

@@ -22,7 +22,6 @@ import (
"fmt"
"strconv"
"strings"
"sync"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
@@ -91,9 +90,6 @@ type StorageClass struct {
Parity int
}
// ConfigLock is a global lock for storage-class config
var ConfigLock = sync.RWMutex{}
// Config storage class configuration
type Config struct {
Standard StorageClass `json:"standard"`
@@ -236,8 +232,6 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
// is returned, the caller is expected to choose the right parity
// at that point.
func (sCfg Config) GetParityForSC(sc string) (parity int) {
ConfigLock.RLock()
defer ConfigLock.RUnlock()
switch strings.TrimSpace(sc) {
case RRS:
// set the rrs parity if available
@@ -250,19 +244,8 @@ func (sCfg Config) GetParityForSC(sc string) (parity int) {
}
}
// Update update storage-class with new config
func (sCfg Config) Update(newCfg Config) {
ConfigLock.Lock()
defer ConfigLock.Unlock()
sCfg.RRS = newCfg.RRS
sCfg.DMA = newCfg.DMA
sCfg.Standard = newCfg.Standard
}
// GetDMA - returns DMA configuration.
func (sCfg Config) GetDMA() string {
ConfigLock.RLock()
defer ConfigLock.RUnlock()
return sCfg.DMA
}

View File

@@ -40,6 +40,17 @@ type HTTPConsoleLoggerSys struct {
logBuf *ring.Ring
}
func mustGetNodeName(endpointServerPools EndpointServerPools) (nodeName string) {
host, err := xnet.ParseHost(GetLocalPeer(endpointServerPools))
if err != nil {
logger.FatalIf(err, "Unable to start console logging subsystem")
}
if globalIsDistErasure {
nodeName = host.Name
}
return nodeName
}
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
// the console logging pub sub system
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
@@ -52,18 +63,8 @@ func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
}
// SetNodeName - sets the node name if any after distributed setup has initialized
func (sys *HTTPConsoleLoggerSys) SetNodeName(nodeName string) {
if !globalIsDistErasure {
sys.nodeName = ""
return
}
host, err := xnet.ParseHost(globalLocalNodeName)
if err != nil {
logger.FatalIf(err, "Unable to start console logging subsystem")
}
sys.nodeName = host.Name
func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointServerPools EndpointServerPools) {
sys.nodeName = mustGetNodeName(endpointServerPools)
}
// HasLogListeners returns true if console log listeners are registered

View File

@@ -32,7 +32,6 @@ import (
jsoniter "github.com/json-iterator/go"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/kms"
xnet "github.com/minio/minio/pkg/net"
)
@@ -156,12 +155,22 @@ func NewKes(cfg KesConfig) (KMS, error) {
}, nil
}
func (kes *kesService) Stat() (kms.Status, error) {
return kms.Status{
Name: "KES",
Endpoints: kes.endpoints,
DefaultKey: kes.defaultKeyID,
}, nil
// DefaultKeyID returns the default key ID that should be
// used for SSE-S3 or SSE-KMS when the S3 client does not
// provide an explicit key ID.
func (kes *kesService) DefaultKeyID() string {
return kes.defaultKeyID
}
// Info returns some information about the KES,
// configuration - like the endpoint or authentication
// method.
func (kes *kesService) Info() KMSInfo {
return KMSInfo{
Endpoints: kes.endpoints,
Name: kes.DefaultKeyID(),
AuthType: "TLS",
}
}
// CreateKey tries to create a new master key with the given keyID.
@@ -171,24 +180,22 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
// and a sealed version of this plaintext key encrypted using the
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (kes *kesService) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
if keyID == "" {
keyID = kes.defaultKeyID
}
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
context, err := ctx.MarshalText()
if err != nil {
return kms.DEK{}, err
return key, nil, err
}
plaintext, ciphertext, err := kes.client.GenerateDataKey(keyID, context)
var plainKey []byte
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
if err != nil {
return kms.DEK{}, err
return key, nil, err
}
return kms.DEK{
KeyID: keyID,
Plaintext: plaintext,
Ciphertext: ciphertext,
}, nil
if len(plainKey) != len(key) {
return key, nil, Errorf("crypto: received invalid plaintext key size from KMS")
}
copy(key[:], plainKey)
return key, sealedKey, nil
}
// UnsealKey returns the decrypted sealedKey as plaintext key.
@@ -198,12 +205,22 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
//
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (kes *kesService) DecryptKey(keyID string, ciphertext []byte, ctx Context) ([]byte, error) {
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
context, err := ctx.MarshalText()
if err != nil {
return nil, err
return key, err
}
return kes.client.DecryptDataKey(keyID, ciphertext, context)
var plainKey []byte
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
if err != nil {
return key, err
}
if len(plainKey) != len(key) {
return key, Errorf("crypto: received invalid plaintext key size from KMS")
}
copy(key[:], plainKey)
return key, nil
}
// kesClient implements the bare minimum functionality needed for

View File

@@ -26,7 +26,6 @@ import (
"path"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/fips"
"github.com/minio/sio"
)
@@ -37,7 +36,7 @@ type ObjectKey [32]byte
// GenerateKey generates a unique ObjectKey from a 256 bit external key
// and a source of randomness. If random is nil the default PRNG of the
// system (crypto/rand) is used.
func GenerateKey(extKey []byte, random io.Reader) (key ObjectKey) {
func GenerateKey(extKey [32]byte, random io.Reader) (key ObjectKey) {
if random == nil {
random = rand.Reader
}
@@ -46,7 +45,7 @@ func GenerateKey(extKey []byte, random io.Reader) (key ObjectKey) {
logger.CriticalIf(context.Background(), errOutOfEntropy)
}
sha := sha256.New()
sha.Write(extKey)
sha.Write(extKey[:])
sha.Write(nonce[:])
sha.Sum(key[:0])
return key
@@ -76,7 +75,7 @@ type SealedKey struct {
// Seal encrypts the ObjectKey using the 256 bit external key and IV. The sealed
// key is also cryptographically bound to the object's path (bucket/object) and the
// domain (SSE-C or SSE-S3).
func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object string) SealedKey {
func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) SealedKey {
var (
sealingKey [32]byte
encryptedKey bytes.Buffer
@@ -87,7 +86,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
mac.Sum(sealingKey[:0])
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:], CipherSuites: fips.CipherSuitesDARE()}); n != 64 || err != nil {
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
}
sealedKey := SealedKey{
@@ -101,7 +100,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
// Unseal decrypts a sealed key using the 256 bit external key. Since the sealed key
// may be cryptographically bound to the object's path the same bucket/object as during sealing
// must be provided. On success the ObjectKey contains the decrypted sealed key.
func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket, object string) error {
func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucket, object string) error {
var (
unsealConfig sio.Config
)
@@ -114,12 +113,12 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)}
case InsecureSealAlgorithm:
sha := sha256.New()
sha.Write(extKey[:])
sha.Write(sealedKey.IV[:])
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
}
if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil {
@@ -150,7 +149,7 @@ func (key ObjectKey) SealETag(etag []byte) []byte {
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}); err != nil {
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
}
return buffer.Bytes()
@@ -166,5 +165,5 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
}
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()})
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)})
}

View File

@@ -53,7 +53,7 @@ func TestGenerateKey(t *testing.T) {
i, test := i, test
func() {
defer recoverTest(i, test.ShouldPass, t)
key := GenerateKey(test.ExtKey[:], test.Random)
key := GenerateKey(test.ExtKey, test.Random)
if [32]byte(key) == [32]byte{} {
t.Errorf("Test %d: generated key is zero key", i) // check that we generate random and unique key
}
@@ -125,9 +125,9 @@ var sealUnsealKeyTests = []struct {
func TestSealUnsealKey(t *testing.T) {
for i, test := range sealUnsealKeyTests {
key := GenerateKey(test.SealExtKey[:], rand.Reader)
sealedKey := key.Seal(test.SealExtKey[:], test.SealIV, test.SealDomain, test.SealBucket, test.SealObject)
if err := key.Unseal(test.UnsealExtKey[:], sealedKey, test.UnsealDomain, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
key := GenerateKey(test.SealExtKey, rand.Reader)
sealedKey := key.Seal(test.SealExtKey, test.SealIV, test.SealDomain, test.SealBucket, test.SealObject)
if err := key.Unseal(test.UnsealExtKey, sealedKey, test.UnsealDomain, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
t.Errorf("Test %d should fail but passed successfully", i)
} else if err != nil && test.ShouldPass {
t.Errorf("Test %d should pass put failed: %v", i, err)
@@ -136,10 +136,10 @@ func TestSealUnsealKey(t *testing.T) {
// Test legacy InsecureSealAlgorithm
var extKey, iv [32]byte
key := GenerateKey(extKey[:], rand.Reader)
sealedKey := key.Seal(extKey[:], iv, "SSE-S3", "bucket", "object")
key := GenerateKey(extKey, rand.Reader)
sealedKey := key.Seal(extKey, iv, "SSE-S3", "bucket", "object")
sealedKey.Algorithm = InsecureSealAlgorithm
if err := key.Unseal(extKey[:], sealedKey, "SSE-S3", "bucket", "object"); err == nil {
if err := key.Unseal(extKey, sealedKey, "SSE-S3", "bucket", "object"); err == nil {
t.Errorf("'%s' test succeeded but it should fail because the legacy algorithm was used", sealedKey.Algorithm)
}
}

View File

@@ -22,86 +22,166 @@ import (
"crypto/sha256"
"errors"
"io"
"sort"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/kms"
"github.com/minio/sio"
)
// Context is a list of key-value pairs cryptographically
// associated with a certain object.
type Context = kms.Context
type Context map[string]string
// MarshalText returns a canonical text representation of
// the Context.
// MarshalText sorts the context keys and writes the sorted
// key-value pairs as canonical JSON object. The sort order
// is based on the un-escaped keys.
func (c Context) MarshalText() ([]byte, error) {
if len(c) == 0 {
return []byte{'{', '}'}, nil
}
// Pre-allocate a buffer - 128 bytes is an arbitrary
// heuristic value that seems like a good starting size.
var b = bytes.NewBuffer(make([]byte, 0, 128))
if len(c) == 1 {
for k, v := range c {
b.WriteString(`{"`)
EscapeStringJSON(b, k)
b.WriteString(`":"`)
EscapeStringJSON(b, v)
b.WriteString(`"}`)
}
return b.Bytes(), nil
}
sortedKeys := make([]string, 0, len(c))
for k := range c {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
b.WriteByte('{')
for i, k := range sortedKeys {
b.WriteByte('"')
EscapeStringJSON(b, k)
b.WriteString(`":"`)
EscapeStringJSON(b, c[k])
b.WriteByte('"')
if i < len(sortedKeys)-1 {
b.WriteByte(',')
}
}
b.WriteByte('}')
return b.Bytes(), nil
}
// KMS represents an active and authenticted connection
// to a Key-Management-Service. It supports generating
// data key generation and unsealing of KMS-generated
// data keys.
type KMS = kms.KMS
type KMS interface {
// DefaultKeyID returns the default master key ID. It should be
// used for SSE-S3 and whenever a S3 client requests SSE-KMS but
// does not specify an explicit SSE-KMS key ID.
DefaultKeyID() string
// CreateKey creates a new master key with the given key ID
// at the KMS.
CreateKey(keyID string) error
// GenerateKey generates a new random data key using
// the master key referenced by the keyID. It returns
// the plaintext key and the sealed plaintext key
// on success.
//
// The context is cryptographically bound to the
// generated key. The same context must be provided
// again to unseal the generated key.
GenerateKey(keyID string, context Context) (key [32]byte, sealedKey []byte, err error)
// UnsealKey unseals the sealedKey using the master key
// referenced by the keyID. The provided context must
// match the context used to generate the sealed key.
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
// Info returns descriptive information about the KMS,
// like the default key ID and authentication method.
Info() KMSInfo
}
type masterKeyKMS struct {
keyID string
masterKey [32]byte
}
// KMSInfo contains some describing information about
// the KMS.
type KMSInfo struct {
Endpoints []string
Name string
AuthType string
}
// NewMasterKey returns a basic KMS implementation from a single 256 bit master key.
//
// The KMS accepts any keyID but binds the keyID and context cryptographically
// to the generated keys.
func NewMasterKey(keyID string, key [32]byte) KMS { return &masterKeyKMS{keyID: keyID, masterKey: key} }
func (m *masterKeyKMS) Stat() (kms.Status, error) {
return kms.Status{
Name: "MasterKey",
DefaultKey: m.keyID,
}, nil
func (kms *masterKeyKMS) DefaultKeyID() string {
return kms.keyID
}
func (m *masterKeyKMS) CreateKey(keyID string) error {
func (kms *masterKeyKMS) CreateKey(keyID string) error {
return errors.New("crypto: creating keys is not supported by a static master key")
}
func (m *masterKeyKMS) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
if keyID == "" {
keyID = m.keyID
}
var key [32]byte
if _, err := io.ReadFull(rand.Reader, key[:]); err != nil {
func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
if _, err = io.ReadFull(rand.Reader, key[:]); err != nil {
logger.CriticalIf(context.Background(), errOutOfEntropy)
}
var (
buffer bytes.Buffer
derivedKey = m.deriveKey(keyID, ctx)
derivedKey = kms.deriveKey(keyID, ctx)
)
if n, err := sio.Encrypt(&buffer, bytes.NewReader(key[:]), sio.Config{Key: derivedKey[:]}); err != nil || n != 64 {
logger.CriticalIf(context.Background(), errors.New("KMS: unable to encrypt data key"))
}
return kms.DEK{
KeyID: m.keyID,
Plaintext: key[:],
Ciphertext: buffer.Bytes(),
}, nil
sealedKey = buffer.Bytes()
return key, sealedKey, nil
}
func (m *masterKeyKMS) DecryptKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) {
var derivedKey = m.deriveKey(keyID, ctx)
// KMS is configured directly using master key
func (kms *masterKeyKMS) Info() (info KMSInfo) {
return KMSInfo{
Endpoints: []string{},
Name: "",
AuthType: "master-key",
}
}
var key [32]byte
func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
var (
derivedKey = kms.deriveKey(keyID, ctx)
)
out, err := sio.DecryptBuffer(key[:0], sealedKey, sio.Config{Key: derivedKey[:]})
if err != nil || len(out) != 32 {
return nil, err // TODO(aead): upgrade sio to use sio.Error
return key, err // TODO(aead): upgrade sio to use sio.Error
}
return key[:], nil
return key, nil
}
func (m *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
if context == nil {
context = Context{}
}
ctxBytes, _ := context.MarshalText()
mac := hmac.New(sha256.New, m.masterKey[:])
mac := hmac.New(sha256.New, kms.masterKey[:])
mac.Write([]byte(keyID))
mac.Write(ctxBytes)
mac.Sum(key[:0])

View File

@@ -43,18 +43,18 @@ func TestMasterKeyKMS(t *testing.T) {
for i, test := range masterKeyKMSTests {
kms := NewMasterKey(test.GenKeyID, [32]byte{})
key, err := kms.GenerateKey(test.GenKeyID, test.GenContext)
key, sealedKey, err := kms.GenerateKey(test.GenKeyID, test.GenContext)
if err != nil {
t.Errorf("Test %d: KMS failed to generate key: %v", i, err)
}
unsealedKey, err := kms.DecryptKey(test.UnsealKeyID, key.Ciphertext, test.UnsealContext)
unsealedKey, err := kms.UnsealKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
if err != nil && !test.ShouldFail {
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
}
if err == nil && test.ShouldFail {
t.Errorf("Test %d: KMS unsealed the generated key successfully but should have failed", i)
}
if !test.ShouldFail && !bytes.Equal(key.Plaintext, unsealedKey[:]) {
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
t.Errorf("Test %d: The generated and unsealed key differ", i)
}
}

View File

@@ -51,11 +51,8 @@ func TestParseMasterKey(t *testing.T) {
if !tt.success && err == nil {
t.Error("Unexpected failure")
}
if kms != nil {
stat, _ := kms.Stat()
if err == nil && stat.DefaultKey != tt.expectedKeyID {
t.Errorf("Expected keyID %s, got %s", tt.expectedKeyID, stat.DefaultKey)
}
if err == nil && kms.DefaultKeyID() != tt.expectedKeyID {
t.Errorf("Expected keyID %s, got %s", tt.expectedKeyID, kms.DefaultKeyID())
}
})
}

View File

@@ -99,7 +99,7 @@ func (s3 ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket
if err != nil {
return
}
return unsealObjectKey(clientKey[:], metadata, bucket, object)
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// CreateMetadata encodes the sealed key into the metadata

View File

@@ -96,11 +96,11 @@ func (s3 ssekms) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, ob
if _, ok := ctx[bucket]; !ok {
ctx[bucket] = path.Join(bucket, object)
}
unsealKey, err := kms.DecryptKey(keyID, kmsKey, ctx)
unsealKey, err := kms.UnsealKey(keyID, kmsKey, ctx)
if err != nil {
return key, err
}
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object)
err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err
}

View File

@@ -73,11 +73,11 @@ func (s3 sses3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, obj
if err != nil {
return key, err
}
unsealKey, err := kms.DecryptKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
if err != nil {
return key, err
}
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object)
err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err
}

View File

@@ -22,7 +22,6 @@ import (
"net/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/fips"
"github.com/minio/minio/pkg/ioutil"
"github.com/minio/sio"
)
@@ -77,12 +76,12 @@ func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, b
if err != nil {
return
}
return unsealObjectKey(clientKey[:], metadata, bucket, object)
return unsealObjectKey(clientKey, metadata, bucket, object)
}
// unsealObjectKey decrypts and returns the sealed object key
// from the metadata using the SSE-C client key.
func unsealObjectKey(clientKey []byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
sealedKey, err := SSEC.ParseMetadata(metadata)
if err != nil {
return
@@ -94,7 +93,7 @@ func unsealObjectKey(clientKey []byte, metadata map[string]string, bucket, objec
// EncryptSinglePart encrypts an io.Reader which must be the
// the body of a single-part PUT request.
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:], CipherSuites: fips.CipherSuitesDARE()})
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:]})
if err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt io.Reader using object key"))
}
@@ -116,7 +115,7 @@ func DecryptSinglePart(w io.Writer, offset, length int64, key ObjectKey) io.Writ
const PayloadSize = 1 << 16 // DARE 2.0
w = ioutil.LimitedWriter(w, offset%PayloadSize, length)
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:], CipherSuites: fips.CipherSuitesDARE()})
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:]})
if err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to decrypt io.Writer using object key"))
}

View File

@@ -22,7 +22,6 @@ import (
"time"
vault "github.com/hashicorp/vault/api"
"github.com/minio/minio/pkg/kms"
)
var (
@@ -191,15 +190,22 @@ func (v *vaultService) authenticate() (err error) {
return
}
// DefaultKeyID returns the default key ID that should be
// used for SSE-S3 or SSE-KMS when the S3 client does not
// provide an explicit key ID.
func (v *vaultService) DefaultKeyID() string {
return v.config.Key.Name
}
// Info returns some information about the Vault,
// configuration - like the endpoints or authentication
// method.
func (v *vaultService) Stat() (kms.Status, error) {
return kms.Status{
Endpoints: []string{v.config.Endpoint},
Name: "Hashicorp Vault",
DefaultKey: v.config.Key.Name,
}, nil
func (v *vaultService) Info() KMSInfo {
return KMSInfo{
Endpoints: []string{v.config.Endpoint},
Name: v.DefaultKeyID(),
AuthType: v.config.Auth.Type,
}
}
// CreateKey is a stub that exists such that the Vault
@@ -216,13 +222,10 @@ func (v *vaultService) CreateKey(keyID string) error {
// and a sealed version of this plaintext key encrypted using the
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (v *vaultService) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
if keyID == "" {
keyID = v.config.Key.Name
}
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
context, err := ctx.MarshalText()
if err != nil {
return kms.DEK{}, err
return key, sealedKey, err
}
payload := map[string]interface{}{
@@ -230,27 +233,24 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
}
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
if err != nil {
return kms.DEK{}, Errorf("crypto: client error %w", err)
return key, sealedKey, Errorf("crypto: client error %w", err)
}
sealKey, ok := s.Data["ciphertext"].(string)
if !ok {
return kms.DEK{}, Errorf("crypto: incorrect 'ciphertext' key type %v", s.Data["ciphertext"])
return key, sealedKey, Errorf("crypto: incorrect 'ciphertext' key type %v", s.Data["ciphertext"])
}
plainKeyB64, ok := s.Data["plaintext"].(string)
if !ok {
return kms.DEK{}, Errorf("crypto: incorrect 'plaintext' key type %v", s.Data["plaintext"])
return key, sealedKey, Errorf("crypto: incorrect 'plaintext' key type %v", s.Data["plaintext"])
}
plainKey, err := base64.StdEncoding.DecodeString(plainKeyB64)
if err != nil {
return kms.DEK{}, Errorf("crypto: invalid base64 key %w", err)
return key, sealedKey, Errorf("crypto: invalid base64 key %w", err)
}
return kms.DEK{
KeyID: keyID,
Plaintext: plainKey,
Ciphertext: []byte(sealKey),
}, nil
copy(key[:], plainKey)
return key, []byte(sealKey), nil
}
// UnsealKey returns the decrypted sealedKey as plaintext key.
@@ -260,10 +260,10 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (kms.DEK, error) {
//
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (v *vaultService) DecryptKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) {
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
context, err := ctx.MarshalText()
if err != nil {
return nil, err
return key, err
}
payload := map[string]interface{}{
@@ -273,17 +273,18 @@ func (v *vaultService) DecryptKey(keyID string, sealedKey []byte, ctx Context) (
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
if err != nil {
return nil, Errorf("crypto: client error %w", err)
return key, Errorf("crypto: client error %w", err)
}
base64Key, ok := s.Data["plaintext"].(string)
if !ok {
return nil, Errorf("crypto: incorrect 'plaintext' key type %v", s.Data["plaintext"])
return key, Errorf("crypto: incorrect 'plaintext' key type %v", s.Data["plaintext"])
}
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
if err != nil {
return nil, Errorf("crypto: invalid base64 key %w", err)
return key, Errorf("crypto: invalid base64 key %w", err)
}
return plainKey, nil
copy(key[:], plainKey)
return key, nil
}

View File

@@ -44,6 +44,7 @@ import (
const (
dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
healDeleteDangling = true
@@ -58,7 +59,6 @@ var (
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
// Sleeper values are updated when config is loaded.
scannerSleeper = newDynamicSleeper(10, 10*time.Second)
scannerCycle = &safeDuration{}
)
// initDataScanner will start the scanner in the background.
@@ -66,23 +66,6 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
go runDataScanner(ctx, objAPI)
}
type safeDuration struct {
sync.Mutex
t time.Duration
}
func (s *safeDuration) Update(t time.Duration) {
s.Lock()
defer s.Unlock()
s.t = t
}
func (s *safeDuration) Get() time.Duration {
s.Lock()
defer s.Unlock()
return s.t
}
// runDataScanner will start a data scanner.
// The function will block until the context is canceled.
// There should only ever be one scanner running per cluster.
@@ -94,7 +77,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
for {
ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
if err != nil {
time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get())))
time.Sleep(time.Duration(r.Float64() * float64(dataScannerStartDelay)))
continue
}
break
@@ -118,7 +101,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
br.Close()
}
scannerTimer := time.NewTimer(scannerCycle.Get())
scannerTimer := time.NewTimer(dataScannerStartDelay)
defer scannerTimer.Stop()
for {
@@ -127,14 +110,14 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
return
case <-scannerTimer.C:
// Reset the timer for next cycle.
scannerTimer.Reset(scannerCycle.Get())
scannerTimer.Reset(dataScannerStartDelay)
if intDataUpdateTracker.debug {
console.Debugln("starting scanner cycle")
}
// Wait before starting next cycle and wait on startup.
results := make(chan madmin.DataUsageInfo, 1)
results := make(chan DataUsageInfo, 1)
go storeDataUsageInBackend(ctx, objAPI, results)
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
logger.LogIf(ctx, err)
@@ -419,13 +402,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
// Parse
entName = pathClean(path.Join(folder.name, entName))
if entName == "" {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
}
return errDoneForNow
}
entName = path.Clean(path.Join(folder.name, entName))
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
if bucket == "" {
if f.dataUsageScannerDebug {
@@ -796,8 +773,6 @@ type sizeSummary struct {
pendingSize int64
failedSize int64
replicaSize int64
pendingCount uint64
failedCount uint64
}
type getSizeFn func(item scannerItem) (sizeSummary, error)
@@ -1113,13 +1088,11 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
}
switch oi.ReplicationStatus {
case replication.Pending:
sizeS.pendingCount++
sizeS.pendingSize += oi.Size
globalReplicationPool.queueReplicaTask(ctx, ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
globalReplicationPool.queueReplicaTask(oi)
case replication.Failed:
sizeS.failedSize += oi.Size
sizeS.failedCount++
globalReplicationPool.queueReplicaTask(ctx, ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
globalReplicationPool.queueReplicaTask(oi)
case replication.Completed, "COMPLETE":
sizeS.replicatedSize += oi.Size
case replication.Replica:
@@ -1138,7 +1111,7 @@ func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer,
} else {
versionID = oi.VersionID
}
globalReplicationPool.queueReplicaDeleteTask(ctx, DeletedObjectVersionInfo{
globalReplicationPool.queueReplicaDeleteTask(DeletedObjectVersionInfo{
DeletedObject: DeletedObject{
ObjectName: oi.Name,
DeleteMarkerVersionID: dmVersionID,

View File

@@ -31,9 +31,11 @@ import (
"sync"
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/env"
"github.com/willf/bloom"
)
@@ -78,7 +80,7 @@ func newDataUpdateTracker() *dataUpdateTracker {
Current: dataUpdateFilter{
idx: 1,
},
debug: serverDebugLog,
debug: env.Get(envDataUsageScannerDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
input: make(chan string, dataUpdateTrackerQueueSize),
save: make(chan struct{}, 1),
saveExited: make(chan struct{}),
@@ -638,7 +640,7 @@ func (d *dataUpdateTracker) cycleFilter(ctx context.Context, req bloomFilterRequ
// splitPathDeterministic will split the provided relative path
// deterministically and return up to the first 3 elements of the path.
// slash and dot prefixes are removed.
// Slash and dot prefixes are removed.
// Trailing slashes are removed.
// Returns 0 length if no parts are found after trimming.
func splitPathDeterministic(in string) []string {

View File

@@ -32,7 +32,6 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
"github.com/tinylib/msgp/msgp"
)
@@ -46,39 +45,6 @@ type sizeHistogram [dataUsageBucketLen]uint64
//msgp:tuple dataUsageEntry
type dataUsageEntry struct {
Children dataUsageHashMap
// These fields do no include any children.
Size int64
Objects uint64
ObjSizes sizeHistogram
ReplicationStats replicationStats
}
//msgp:tuple replicationStats
type replicationStats struct {
PendingSize uint64
ReplicatedSize uint64
FailedSize uint64
ReplicaSize uint64
FailedCount uint64
PendingCount uint64
MissedThresholdSize uint64
AfterThresholdSize uint64
MissedThresholdCount uint64
AfterThresholdCount uint64
}
//msgp:tuple dataUsageEntryV2
type dataUsageEntryV2 struct {
// These fields do no include any children.
Size int64
Objects uint64
ObjSizes sizeHistogram
Children dataUsageHashMap
}
//msgp:tuple dataUsageEntryV3
type dataUsageEntryV3 struct {
// These fields do no include any children.
Size int64
ReplicatedSize uint64
@@ -90,27 +56,29 @@ type dataUsageEntryV3 struct {
Children dataUsageHashMap
}
// dataUsageCache contains a cache of data usage entries latest version 4.
type dataUsageCache struct {
Info dataUsageCacheInfo
Cache map[string]dataUsageEntry
Disks []string
//msgp:tuple dataUsageEntryV2
type dataUsageEntryV2 struct {
// These fields do no include any children.
Size int64
Objects uint64
ObjSizes sizeHistogram
Children dataUsageHashMap
}
// dataUsageCacheV2 contains a cache of data usage entries version 2.
// dataUsageCache contains a cache of data usage entries latest version 3.
type dataUsageCache struct {
Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntry
}
// dataUsageCache contains a cache of data usage entries version 2.
type dataUsageCacheV2 struct {
Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV2
}
// dataUsageCache contains a cache of data usage entries version 3.
type dataUsageCacheV3 struct {
Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV3
}
//msgp:ignore dataUsageEntryInfo
type dataUsageEntryInfo struct {
Name string
@@ -121,8 +89,8 @@ type dataUsageEntryInfo struct {
type dataUsageCacheInfo struct {
// Name of the bucket. Also root element.
Name string
NextCycle uint32
LastUpdate time.Time
NextCycle uint32
// indicates if the disk is being healed and scanner
// should skip healing the disk
SkipHealing bool
@@ -132,25 +100,20 @@ type dataUsageCacheInfo struct {
func (e *dataUsageEntry) addSizes(summary sizeSummary) {
e.Size += summary.totalSize
e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize)
e.ReplicationStats.FailedSize += uint64(summary.failedSize)
e.ReplicationStats.PendingSize += uint64(summary.pendingSize)
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
e.ReplicationStats.PendingCount += uint64(summary.pendingCount)
e.ReplicationStats.FailedCount += uint64(summary.failedCount)
e.ReplicatedSize += uint64(summary.replicatedSize)
e.ReplicationFailedSize += uint64(summary.failedSize)
e.ReplicationPendingSize += uint64(summary.pendingSize)
e.ReplicaSize += uint64(summary.replicaSize)
}
// merge other data usage entry into this, excluding children.
func (e *dataUsageEntry) merge(other dataUsageEntry) {
e.Objects += other.Objects
e.Size += other.Size
e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize
e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize
e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount
e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount
e.ReplicationPendingSize += other.ReplicationPendingSize
e.ReplicationFailedSize += other.ReplicationFailedSize
e.ReplicatedSize += other.ReplicatedSize
e.ReplicaSize += other.ReplicaSize
for i, v := range other.ObjSizes[:] {
e.ObjSizes[i] += v
@@ -275,27 +238,25 @@ func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) {
}
}
// dui converts the flattened version of the path to madmin.DataUsageInfo.
// dui converts the flattened version of the path to DataUsageInfo.
// As a side effect d will be flattened, use a clone if this is not ok.
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) madmin.DataUsageInfo {
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
e := d.find(path)
if e == nil {
// No entry found, return empty.
return madmin.DataUsageInfo{}
return DataUsageInfo{}
}
flat := d.flatten(*e)
return madmin.DataUsageInfo{
LastUpdate: d.Info.LastUpdate,
ObjectsTotalCount: flat.Objects,
ObjectsTotalSize: uint64(flat.Size),
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
ReplicaSize: flat.ReplicationStats.ReplicaSize,
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
BucketsCount: uint64(len(e.Children)),
BucketsUsage: d.bucketsUsageInfo(buckets),
return DataUsageInfo{
LastUpdate: d.Info.LastUpdate,
ObjectsTotalCount: flat.Objects,
ObjectsTotalSize: uint64(flat.Size),
ReplicatedSize: flat.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationFailedSize,
ReplicationPendingSize: flat.ReplicationPendingSize,
ReplicaSize: flat.ReplicaSize,
BucketsCount: uint64(len(e.Children)),
BucketsUsage: d.bucketsUsageInfo(buckets),
}
}
@@ -412,24 +373,22 @@ func (h *sizeHistogram) toMap() map[string]uint64 {
// bucketsUsageInfo returns the buckets usage info as a map, with
// key as bucket name
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmin.BucketUsageInfo {
var dst = make(map[string]madmin.BucketUsageInfo, len(buckets))
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
var dst = make(map[string]BucketUsageInfo, len(buckets))
for _, bucket := range buckets {
e := d.find(bucket.Name)
if e == nil {
continue
}
flat := d.flatten(*e)
dst[bucket.Name] = madmin.BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: flat.Objects,
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
ReplicaSize: flat.ReplicationStats.ReplicaSize,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
dst[bucket.Name] = BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: flat.Objects,
ReplicationPendingSize: flat.ReplicationPendingSize,
ReplicatedSize: flat.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationFailedSize,
ReplicaSize: flat.ReplicaSize,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
}
}
return dst
@@ -437,22 +396,20 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmi
// bucketUsageInfo returns the buckets usage info.
// If not found all values returned are zero values.
func (d *dataUsageCache) bucketUsageInfo(bucket string) madmin.BucketUsageInfo {
func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
e := d.find(bucket)
if e == nil {
return madmin.BucketUsageInfo{}
return BucketUsageInfo{}
}
flat := d.flatten(*e)
return madmin.BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: flat.Objects,
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
ReplicaSize: flat.ReplicationStats.ReplicaSize,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
return BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: flat.Objects,
ReplicationPendingSize: flat.ReplicationPendingSize,
ReplicatedSize: flat.ReplicatedSize,
ReplicationFailedSize: flat.ReplicationFailedSize,
ReplicaSize: flat.ReplicaSize,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
}
}
@@ -528,7 +485,7 @@ type objectIO interface {
// Only backend errors are returned as errors.
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, noLock, ObjectOptions{})
if err != nil {
switch err.(type) {
case ObjectNotFound:
@@ -576,7 +533,6 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
// Bumping the cache version will drop data from previous versions
// and write new data with the new version.
const (
dataUsageCacheVerV4 = 4
dataUsageCacheVerV3 = 3
dataUsageCacheVerV2 = 2
dataUsageCacheVerV1 = 1
@@ -585,7 +541,7 @@ const (
// serialize the contents of the cache.
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
// Add version and compress.
_, err := dst.Write([]byte{dataUsageCacheVerV4})
_, err := dst.Write([]byte{dataUsageCacheVerV3})
if err != nil {
return err
}
@@ -653,35 +609,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err
}
defer dec.Close()
dold := &dataUsageCacheV3{}
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
return err
}
d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache {
d.Cache[k] = dataUsageEntry{
Size: v.Size,
Objects: v.Objects,
ObjSizes: v.ObjSizes,
Children: v.Children,
ReplicationStats: replicationStats{
ReplicatedSize: v.ReplicatedSize,
ReplicaSize: v.ReplicaSize,
FailedSize: v.ReplicationFailedSize,
PendingSize: v.ReplicationPendingSize,
},
}
}
return nil
case dataUsageCacheVerV4:
// Zstd compressed.
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
if err != nil {
return err
}
defer dec.Close()
return d.DecodeMsg(msgp.NewReader(dec))
}

File diff suppressed because it is too large Load Diff

View File

@@ -348,119 +348,6 @@ func BenchmarkDecodedataUsageCacheV2(b *testing.B) {
}
}
func TestMarshalUnmarshaldataUsageCacheV3(t *testing.T) {
v := dataUsageCacheV3{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgdataUsageCacheV3(b *testing.B) {
v := dataUsageCacheV3{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgdataUsageCacheV3(b *testing.B) {
v := dataUsageCacheV3{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshaldataUsageCacheV3(b *testing.B) {
v := dataUsageCacheV3{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodedataUsageCacheV3(t *testing.T) {
v := dataUsageCacheV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodedataUsageCacheV3 Msgsize() is inaccurate")
}
vn := dataUsageCacheV3{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodedataUsageCacheV3(b *testing.B) {
v := dataUsageCacheV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodedataUsageCacheV3(b *testing.B) {
v := dataUsageCacheV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshaldataUsageEntry(t *testing.T) {
v := dataUsageEntry{}
bts, err := v.MarshalMsg(nil)
@@ -687,232 +574,6 @@ func BenchmarkDecodedataUsageEntryV2(b *testing.B) {
}
}
func TestMarshalUnmarshaldataUsageEntryV3(t *testing.T) {
v := dataUsageEntryV3{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgdataUsageEntryV3(b *testing.B) {
v := dataUsageEntryV3{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgdataUsageEntryV3(b *testing.B) {
v := dataUsageEntryV3{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshaldataUsageEntryV3(b *testing.B) {
v := dataUsageEntryV3{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodedataUsageEntryV3(t *testing.T) {
v := dataUsageEntryV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodedataUsageEntryV3 Msgsize() is inaccurate")
}
vn := dataUsageEntryV3{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodedataUsageEntryV3(b *testing.B) {
v := dataUsageEntryV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodedataUsageEntryV3(b *testing.B) {
v := dataUsageEntryV3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
v := replicationStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgreplicationStats(b *testing.B) {
v := replicationStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgreplicationStats(b *testing.B) {
v := replicationStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalreplicationStats(b *testing.B) {
v := replicationStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodereplicationStats(t *testing.T) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodereplicationStats Msgsize() is inaccurate")
}
vn := replicationStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodereplicationStats(b *testing.B) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodereplicationStats(b *testing.B) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalsizeHistogram(t *testing.T) {
v := sizeHistogram{}
bts, err := v.MarshalMsg(nil)

View File

@@ -25,10 +25,11 @@ import (
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
)
const (
envDataUsageScannerDebug = "MINIO_DISK_USAGE_SCANNER_DEBUG"
dataUsageRoot = SlashSeparator
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
@@ -38,7 +39,7 @@ const (
)
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan madmin.DataUsageInfo) {
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
for dataUsageInfo := range dui {
dataUsageJSON, err := json.Marshal(dataUsageInfo)
if err != nil {
@@ -58,27 +59,27 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
}
}
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (madmin.DataUsageInfo, error) {
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
r, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageObjName, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
return madmin.DataUsageInfo{}, nil
return DataUsageInfo{}, nil
}
return madmin.DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
}
defer r.Close()
var dataUsageInfo madmin.DataUsageInfo
var dataUsageInfo DataUsageInfo
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
return madmin.DataUsageInfo{}, err
return DataUsageInfo{}, err
}
// For forward compatibility reasons, we need to add this code.
if len(dataUsageInfo.BucketsUsage) == 0 {
dataUsageInfo.BucketsUsage = make(map[string]madmin.BucketUsageInfo, len(dataUsageInfo.BucketSizes))
dataUsageInfo.BucketsUsage = make(map[string]BucketUsageInfo, len(dataUsageInfo.BucketSizes))
for bucket, size := range dataUsageInfo.BucketSizes {
dataUsageInfo.BucketsUsage[bucket] = madmin.BucketUsageInfo{Size: size}
dataUsageInfo.BucketsUsage[bucket] = BucketUsageInfo{Size: size}
}
}

View File

@@ -39,8 +39,6 @@ import (
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/fips"
"github.com/minio/minio/pkg/kms"
"github.com/minio/sio"
)
@@ -663,7 +661,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
return nil, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey
}
@@ -674,14 +672,14 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
if globalCacheKMS == nil {
return nil, errKMSNotConfigured
}
key, err := globalCacheKMS.GenerateKey("", kms.Context{bucket: pathJoin(bucket, object)})
key, encKey, err := globalCacheKMS.GenerateKey(globalCacheKMS.DefaultKeyID(), crypto.Context{bucket: pathJoin(bucket, object)})
if err != nil {
return nil, err
}
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, globalCacheKMS.DefaultKeyID(), encKey, sealedKey)
if etag, ok := metadata["etag"]; ok {
metadata["etag"] = hex.EncodeToString(objectKey.SealETag([]byte(etag)))

View File

@@ -245,10 +245,11 @@ func decryptCacheObjectETag(info *ObjectInfo) error {
return errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(info.UserDefined)
if err != nil {
return err
}
extKey, err := globalCacheKMS.DecryptKey(keyID, kmsKey, crypto.Context{info.Bucket: path.Join(info.Bucket, info.Name)})
extKey, err := globalCacheKMS.UnsealKey(keyID, kmsKey, crypto.Context{info.Bucket: path.Join(info.Bucket, info.Name)})
if err != nil {
return err
}

View File

@@ -34,7 +34,6 @@ import (
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/fips"
"github.com/minio/sio"
)
@@ -126,7 +125,9 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(oldKey, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
var extKey [32]byte
copy(extKey[:], oldKey)
if err = objectKey.Unseal(extKey, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
if subtle.ConstantTimeCompare(oldKey, newKey) == 1 {
return errInvalidSSEParameters // AWS returns special error for equal but invalid keys.
}
@@ -136,7 +137,8 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
if subtle.ConstantTimeCompare(oldKey, newKey) == 1 && sealedKey.Algorithm == crypto.SealAlgorithm {
return nil // don't rotate on equal keys if seal algorithm is latest
}
sealedKey = objectKey.Seal(newKey, sealedKey.IV, crypto.SSEC.String(), bucket, object)
copy(extKey[:], newKey)
sealedKey = objectKey.Seal(extKey, sealedKey.IV, crypto.SSEC.String(), bucket, object)
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return nil
case crypto.S3.IsEncrypted(metadata):
@@ -147,7 +149,7 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
if err != nil {
return err
}
oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
oldKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
@@ -156,12 +158,12 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
return err
}
newKey, err := GlobalKMS.GenerateKey("", crypto.Context{bucket: path.Join(bucket, object)})
newKey, encKey, err := GlobalKMS.GenerateKey(GlobalKMS.DefaultKeyID(), crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
sealedKey = objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey)
sealedKey = objectKey.Seal(newKey, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, GlobalKMS.DefaultKeyID(), encKey, sealedKey)
return nil
}
}
@@ -172,18 +174,20 @@ func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]s
if GlobalKMS == nil {
return crypto.ObjectKey{}, errKMSNotConfigured
}
key, err := GlobalKMS.GenerateKey("", crypto.Context{bucket: path.Join(bucket, object)})
key, encKey, err := GlobalKMS.GenerateKey(GlobalKMS.DefaultKeyID(), crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return crypto.ObjectKey{}, err
}
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, GlobalKMS.DefaultKeyID(), encKey, sealedKey)
return objectKey, nil
}
objectKey := crypto.GenerateKey(key, rand.Reader)
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
var extKey [32]byte
copy(extKey[:], key)
objectKey := crypto.GenerateKey(extKey, rand.Reader)
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return objectKey, nil
}
@@ -194,7 +198,7 @@ func newEncryptReader(content io.Reader, key []byte, bucket, object string, meta
return nil, crypto.ObjectKey{}, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
if err != nil {
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
}
@@ -271,8 +275,12 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
if err != nil {
return nil, err
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(key, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
var (
objectKey crypto.ObjectKey
extKey [32]byte
)
copy(extKey[:], key)
if err = objectKey.Unseal(extKey, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
return nil, err
}
return objectKey[:], nil
@@ -325,7 +333,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
reader, err := sio.DecryptReader(client, sio.Config{
Key: objectEncryptionKey,
SequenceNumber: seqNumber,
CipherSuites: fips.CipherSuitesDARE(),
})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey

View File

@@ -761,7 +761,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
// the first element from the set of peers which indicate that
// they are local. There is always one entry that is local
// even with repeated server endpoints.
func GetLocalPeer(endpointServerPools EndpointServerPools, host, port string) (localPeer string) {
func GetLocalPeer(endpointServerPools EndpointServerPools) (localPeer string) {
peerSet := set.NewStringSet()
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
@@ -776,11 +776,11 @@ func GetLocalPeer(endpointServerPools EndpointServerPools, host, port string) (l
if peerSet.IsEmpty() {
// Local peer can be empty in FS or Erasure coded mode.
// If so, return globalMinioHost + globalMinioPort value.
if host != "" {
return net.JoinHostPort(host, port)
if globalMinioHost != "" {
return net.JoinHostPort(globalMinioHost, globalMinioPort)
}
return net.JoinHostPort("127.0.0.1", port)
return net.JoinHostPort("127.0.0.1", globalMinioPort)
}
return peerSet.ToSlice()[0]
}

View File

@@ -334,6 +334,15 @@ func TestCreateEndpoints(t *testing.T) {
// So it means that if you have say localhost:9000 and localhost:9001 as endpointArgs then localhost:9001
// is considered a remote service from localhost:9000 perspective.
func TestGetLocalPeer(t *testing.T) {
tempGlobalMinioAddr := globalMinioAddr
tempGlobalMinioPort := globalMinioPort
defer func() {
globalMinioAddr = tempGlobalMinioAddr
globalMinioPort = tempGlobalMinioPort
}()
globalMinioAddr = ":9000"
globalMinioPort = "9000"
testCases := []struct {
endpointArgs []string
expectedResult string
@@ -354,9 +363,9 @@ func TestGetLocalPeer(t *testing.T) {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
}
localPeer := GetLocalPeer(zendpoints, "", "9000")
if localPeer != testCase.expectedResult {
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, localPeer)
remotePeer := GetLocalPeer(zendpoints)
if remotePeer != testCase.expectedResult {
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, remotePeer)
}
}
}

View File

@@ -17,15 +17,9 @@
package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"os"
"reflect"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger"
)
@@ -147,69 +141,3 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
}
return tillOffset
}
// erasureSelfTest performs a self-test to ensure that erasure
// algorithms compute expected erasure codes. If any algorithm
// produces an incorrect value it fails with a hard error.
//
// erasureSelfTest tries to catch any issue in the erasure implementation
// early instead of silently corrupting data.
func erasureSelfTest() {
// Approx runtime ~1ms
var testConfigs [][2]uint8
for total := uint8(4); total < 16; total++ {
for data := total / 2; data < total; data++ {
parity := total - data
testConfigs = append(testConfigs, [2]uint8{data, parity})
}
}
got := make(map[[2]uint8]map[ErasureAlgo]uint64, len(testConfigs))
// Copied from output of fmt.Printf("%#v", got) at the end.
want := map[[2]uint8]map[ErasureAlgo]uint64{{0x2, 0x2}: {0x1: 0x23fb21be2496f5d3}, {0x2, 0x3}: {0x1: 0xa5cd5600ba0d8e7c}, {0x3, 0x1}: {0x1: 0x60ab052148b010b4}, {0x3, 0x2}: {0x1: 0xe64927daef76435a}, {0x3, 0x3}: {0x1: 0x672f6f242b227b21}, {0x3, 0x4}: {0x1: 0x571e41ba23a6dc6}, {0x4, 0x1}: {0x1: 0x524eaa814d5d86e2}, {0x4, 0x2}: {0x1: 0x62b9552945504fef}, {0x4, 0x3}: {0x1: 0xcbf9065ee053e518}, {0x4, 0x4}: {0x1: 0x9a07581dcd03da8}, {0x4, 0x5}: {0x1: 0xbf2d27b55370113f}, {0x5, 0x1}: {0x1: 0xf71031a01d70daf}, {0x5, 0x2}: {0x1: 0x8e5845859939d0f4}, {0x5, 0x3}: {0x1: 0x7ad9161acbb4c325}, {0x5, 0x4}: {0x1: 0xc446b88830b4f800}, {0x5, 0x5}: {0x1: 0xabf1573cc6f76165}, {0x5, 0x6}: {0x1: 0x7b5598a85045bfb8}, {0x6, 0x1}: {0x1: 0xe2fc1e677cc7d872}, {0x6, 0x2}: {0x1: 0x7ed133de5ca6a58e}, {0x6, 0x3}: {0x1: 0x39ef92d0a74cc3c0}, {0x6, 0x4}: {0x1: 0xcfc90052bc25d20}, {0x6, 0x5}: {0x1: 0x71c96f6baeef9c58}, {0x6, 0x6}: {0x1: 0x4b79056484883e4c}, {0x6, 0x7}: {0x1: 0xb1a0e2427ac2dc1a}, {0x7, 0x1}: {0x1: 0x937ba2b7af467a22}, {0x7, 0x2}: {0x1: 0x5fd13a734d27d37a}, {0x7, 0x3}: {0x1: 0x3be2722d9b66912f}, {0x7, 0x4}: {0x1: 0x14c628e59011be3d}, {0x7, 0x5}: {0x1: 0xcc3b39ad4c083b9f}, {0x7, 0x6}: {0x1: 0x45af361b7de7a4ff}, {0x7, 0x7}: {0x1: 0x456cc320cec8a6e6}, {0x7, 0x8}: {0x1: 0x1867a9f4db315b5c}, {0x8, 0x1}: {0x1: 0xbc5756b9a9ade030}, {0x8, 0x2}: {0x1: 0xdfd7d9d0b3e36503}, {0x8, 0x3}: {0x1: 0x72bb72c2cdbcf99d}, {0x8, 0x4}: {0x1: 0x3ba5e9b41bf07f0}, {0x8, 0x5}: {0x1: 0xd7dabc15800f9d41}, {0x8, 0x6}: {0x1: 0xb482a6169fd270f}, {0x8, 0x7}: {0x1: 0x50748e0099d657e8}, {0x9, 0x1}: {0x1: 0xc77ae0144fcaeb6e}, {0x9, 0x2}: {0x1: 0x8a86c7dbebf27b68}, {0x9, 0x3}: {0x1: 0xa64e3be6d6fe7e92}, {0x9, 0x4}: {0x1: 0x239b71c41745d207}, {0x9, 0x5}: {0x1: 0x2d0803094c5a86ce}, {0x9, 0x6}: {0x1: 0xa3c2539b3af84874}, {0xa, 0x1}: {0x1: 0x7d30d91b89fcec21}, {0xa, 0x2}: {0x1: 0xfa5af9aa9f1857a3}, {0xa, 0x3}: {0x1: 0x84bc4bda8af81f90}, {0xa, 0x4}: {0x1: 0x6c1cba8631de994a}, {0xa, 0x5}: {0x1: 0x4383e58a086cc1ac}, {0xb, 0x1}: {0x1: 0x4ed2929a2df690b}, {0xb, 0x2}: {0x1: 0xecd6f1b1399775c0}, {0xb, 0x3}: {0x1: 0xc78cfbfc0dc64d01}, {0xb, 0x4}: {0x1: 0xb2643390973702d6}, {0xc, 0x1}: {0x1: 0x3b2a88686122d082}, {0xc, 0x2}: {0x1: 0xfd2f30a48a8e2e9}, {0xc, 0x3}: {0x1: 0xd5ce58368ae90b13}, {0xd, 0x1}: {0x1: 0x9c88e2a9d1b8fff8}, {0xd, 0x2}: {0x1: 0xcb8460aa4cf6613}, {0xe, 0x1}: {0x1: 0x78a28bbaec57996e}}
var testData [256]byte
for i := range testData {
testData[i] = byte(i)
}
ok := true
for algo := invalidErasureAlgo + 1; algo < lastErasureAlgo; algo++ {
for _, conf := range testConfigs {
failOnErr := func(err error) {
if err != nil {
logger.Fatal(errSelfTestFailure, "%v: error on self-test [d:%d,p:%d]: %v. Unsafe to start server.\n", algo, conf[0], conf[1], err)
}
}
e, err := NewErasure(context.Background(), int(conf[0]), int(conf[1]), blockSizeV2)
failOnErr(err)
encoded, err := e.EncodeData(GlobalContext, testData[:])
failOnErr(err)
hash := xxhash.New()
for i, data := range encoded {
// Write index to keep track of sizes of each.
_, err = hash.Write([]byte{byte(i)})
failOnErr(err)
_, err = hash.Write(data)
failOnErr(err)
got[conf] = map[ErasureAlgo]uint64{algo: hash.Sum64()}
}
if a, b := want[conf], got[conf]; !reflect.DeepEqual(a, b) {
fmt.Fprintf(os.Stderr, "%v: error on self-test [d:%d,p:%d]: want %#v, got %#v\n", algo, conf[0], conf[1], a, b)
ok = false
continue
}
// Delete first shard and reconstruct...
first := encoded[0]
encoded[0] = nil
failOnErr(e.DecodeDataBlocks(encoded))
if a, b := first, encoded[0]; !bytes.Equal(a, b) {
fmt.Fprintf(os.Stderr, "%v: error on self-test [d:%d,p:%d]: want %#v, got %#v\n", algo, conf[0], conf[1], hex.EncodeToString(a), hex.EncodeToString(b))
ok = false
continue
}
}
}
if !ok {
logger.Fatal(errSelfTestFailure, "Erasure Coding self test failed")
}
}

View File

@@ -139,7 +139,7 @@ func TestErasureEncode(t *testing.T) {
case *wholeBitrotWriter:
w.disk = badDisk{nil}
case *streamingBitrotWriter:
w.closeWithErr(errFaultyDisk)
w.iow.(*io.PipeWriter).CloseWithError(errFaultyDisk)
}
}
if test.offDisks > 0 {

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"context"
"time"
@@ -25,11 +24,9 @@ import (
)
// commonTime returns a maximally occurring time from a list of time.
func commonTime(modTimes []time.Time, dataDirs []string) (modTime time.Time, dataDir string) {
func commonTime(modTimes []time.Time) (modTime time.Time, count int) {
var maxima int // Counter for remembering max occurrence of elements.
timeOccurenceMap := make(map[int64]int, len(modTimes))
dataDirOccurenceMap := make(map[string]int, len(dataDirs))
timeOccurenceMap := make(map[int64]int)
// Ignore the uuid sentinel and count the rest.
for _, time := range modTimes {
if time.Equal(timeSentinel) {
@@ -38,13 +35,6 @@ func commonTime(modTimes []time.Time, dataDirs []string) (modTime time.Time, dat
timeOccurenceMap[time.UnixNano()]++
}
for _, dataDir := range dataDirs {
if dataDir == "" {
continue
}
dataDirOccurenceMap[dataDir]++
}
// Find the common cardinality from previously collected
// occurrences of elements.
for nano, count := range timeOccurenceMap {
@@ -55,18 +45,8 @@ func commonTime(modTimes []time.Time, dataDirs []string) (modTime time.Time, dat
}
}
// Find the common cardinality from the previously collected
// occurrences of elements.
var dmaxima int
for ddataDir, count := range dataDirOccurenceMap {
if count > dmaxima {
dmaxima = count
dataDir = ddataDir
}
}
// Return the collected common uuid.
return modTime, dataDir
return modTime, maxima
}
// Beginning of unix time is treated as sentinel value here.
@@ -120,33 +100,24 @@ func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time
// - a slice of disks where disk having 'older' xl.meta (or nothing)
// are set to nil.
// - latest (in time) of the maximally occurring modTime(s).
func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time, dataDir string) {
func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
onlineDisks = make([]StorageAPI, len(disks))
// List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(partsMetadata, errs)
dataDirs := make([]string, len(partsMetadata))
for idx, fi := range partsMetadata {
if errs[idx] != nil {
continue
}
dataDirs[idx] = fi.DataDir
}
// Reduce list of UUIDs to a single common value.
modTime, dataDir = commonTime(modTimes, dataDirs)
modTime, _ = commonTime(modTimes)
// Create a new online disks slice, which have common uuid.
for index, t := range modTimes {
if partsMetadata[index].IsValid() && t.Equal(modTime) && partsMetadata[index].DataDir == dataDir {
if t.Equal(modTime) {
onlineDisks[index] = disks[index]
} else {
onlineDisks[index] = nil
}
}
return onlineDisks, modTime, dataDir
return onlineDisks, modTime
}
// Returns the latest updated FileInfo files and error in case of failure.
@@ -159,24 +130,16 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
// List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(partsMetadata, errs)
dataDirs := make([]string, len(partsMetadata))
for idx, fi := range partsMetadata {
if errs[idx] != nil {
continue
}
dataDirs[idx] = fi.DataDir
}
// Count all latest updated FileInfo values
var count int
var latestFileInfo FileInfo
// Reduce list of UUIDs to a single common value - i.e. the last updated Time
modTime, dataDir := commonTime(modTimes, dataDirs)
modTime, _ := commonTime(modTimes)
// Interate through all the modTimes and count the FileInfo(s) with latest time.
for index, t := range modTimes {
if partsMetadata[index].IsValid() && t.Equal(modTime) && dataDir == partsMetadata[index].DataDir {
if t.Equal(modTime) && partsMetadata[index].IsValid() {
latestFileInfo = partsMetadata[index]
count++
}
@@ -235,8 +198,8 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
dataErrs[i] = errDiskNotFound
continue
}
meta := partsMetadata[i]
if erasureDistributionReliable {
meta := partsMetadata[i]
if !meta.IsValid() {
continue
}
@@ -258,21 +221,6 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
}
}
// Always check data, if we got it.
if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 {
checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number)
dataErrs[i] = bitrotVerify(bytes.NewBuffer(meta.Data),
int64(len(meta.Data)),
meta.Erasure.ShardFileSize(meta.Size),
checksumInfo.Algorithm,
checksumInfo.Hash, meta.Erasure.ShardSize())
if dataErrs[i] == nil {
// All parts verified, mark it as all data available.
availableDisks[i] = onlineDisk
}
continue
}
switch scanMode {
case madmin.HealDeepScan:
// disk has a valid xl.meta but may not have all the

View File

@@ -82,7 +82,7 @@ func TestCommonTime(t *testing.T) {
// common modtime. Tests fail if modtime does not match.
for i, testCase := range testCases {
// Obtain a common mod time from modTimes slice.
ctime, _ := commonTime(testCase.times, nil)
ctime, _ := commonTime(testCase.times)
if !testCase.time.Equal(ctime) {
t.Fatalf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime)
}
@@ -177,275 +177,85 @@ func TestListOnlineDisks(t *testing.T) {
}
object := "object"
data := bytes.Repeat([]byte("a"), smallFileThreshold*16)
data := bytes.Repeat([]byte("a"), 1024)
z := obj.(*erasureServerPools)
erasureDisks := z.serverPools[0].sets[0].getDisks()
for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
fi, err := getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
}
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
fi, err := getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
}
for j := range partsMetadata {
if errs[j] != nil {
t.Fatalf("expected error to be nil: %s", errs[j])
for j := range partsMetadata {
if errs[j] != nil {
t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[j])
}
partsMetadata[j].ModTime = test.modTimes[j]
}
tamperedIndex := -1
switch test._tamperBackend {
case deletePart:
for index, err := range test.errs {
if err != nil {
continue
}
partsMetadata[j].ModTime = test.modTimes[j]
}
tamperedIndex := -1
switch test._tamperBackend {
case deletePart:
for index, err := range test.errs {
if err != nil {
continue
}
// Remove a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
if dErr != nil {
t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr)
}
break
// Remove a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
if dErr != nil {
t.Fatalf("Test %d: Failed to delete %s - %v", i+1,
filepath.Join(object, "part.1"), dErr)
}
case corruptPart:
for index, err := range test.errs {
if err != nil {
continue
}
// Corrupt a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1")
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err)
}
f.Write([]byte("oops")) // Will cause bitrot error
f.Close()
break
break
}
case corruptPart:
for index, err := range test.errs {
if err != nil {
continue
}
}
onlineDisks, modTime, dataDir := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) {
t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
test.expectedTime, modTime)
}
if fi.DataDir != dataDir {
t.Fatalf("Expected dataDir to be equal to %v but was found to be %v",
fi.DataDir, dataDir)
}
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs
if test._tamperBackend != noTamper {
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
t.Fatalf("disk (%v) with part.1 missing is not a disk with available data",
erasureDisks[tamperedIndex])
// Corrupt a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1")
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err)
}
}
})
}
}
// TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
// are consistent with each other.
func TestListOnlineDisksSmallObjects(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
if err != nil {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
defer obj.Shutdown(context.Background())
defer removeRoots(disks)
type tamperKind int
const (
noTamper tamperKind = iota
deletePart tamperKind = iota
corruptPart tamperKind = iota
)
timeSentinel := time.Unix(1, 0).UTC()
threeNanoSecs := time.Unix(3, 0).UTC()
fourNanoSecs := time.Unix(4, 0).UTC()
modTimesThreeNone := []time.Time{
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
threeNanoSecs, threeNanoSecs, threeNanoSecs,
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
timeSentinel,
}
modTimesThreeFour := []time.Time{
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
}
testCases := []struct {
modTimes []time.Time
expectedTime time.Time
errs []error
_tamperBackend tamperKind
}{
{
modTimes: modTimesThreeFour,
expectedTime: fourNanoSecs,
errs: []error{
nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil,
},
_tamperBackend: noTamper,
},
{
modTimes: modTimesThreeNone,
expectedTime: threeNanoSecs,
errs: []error{
// Disks that have a valid xl.meta.
nil, nil, nil, nil, nil, nil, nil,
// Majority of disks don't have xl.meta.
errFileNotFound, errFileNotFound,
errFileNotFound, errFileNotFound,
errFileNotFound, errDiskAccessDenied,
errDiskNotFound, errFileNotFound,
errFileNotFound,
},
_tamperBackend: deletePart,
},
{
modTimes: modTimesThreeNone,
expectedTime: threeNanoSecs,
errs: []error{
// Disks that have a valid xl.meta.
nil, nil, nil, nil, nil, nil, nil,
// Majority of disks don't have xl.meta.
errFileNotFound, errFileNotFound,
errFileNotFound, errFileNotFound,
errFileNotFound, errDiskAccessDenied,
errDiskNotFound, errFileNotFound,
errFileNotFound,
},
_tamperBackend: corruptPart,
},
}
bucket := "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatalf("Failed to make a bucket %v", err)
}
object := "object"
data := bytes.Repeat([]byte("a"), smallFileThreshold/2)
z := obj.(*erasureServerPools)
erasureDisks := z.serverPools[0].sets[0].getDisks()
for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
f.Write([]byte("oops")) // Will cause bitrot error
f.Close()
break
}
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", true)
fi, err := getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
}
}
for j := range partsMetadata {
if errs[j] != nil {
t.Fatalf("expected error to be nil: %s", errs[j])
}
partsMetadata[j].ModTime = test.modTimes[j]
}
onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) {
t.Fatalf("Test %d: Expected modTime to be equal to %v but was found to be %v",
i+1, test.expectedTime, modTime)
}
if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil {
t.Fatal(ctx, err)
}
tamperedIndex := -1
switch test._tamperBackend {
case deletePart:
for index, err := range test.errs {
if err != nil {
continue
}
// Remove a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
if dErr != nil {
t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr)
}
break
}
case corruptPart:
for index, err := range test.errs {
if err != nil {
continue
}
// Corrupt a part from a disk
// which has a valid xl.meta,
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
filePath := pathJoin(erasureDisks[index].String(), bucket, object, xlStorageFormatFile)
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err)
}
f.Write([]byte("oops")) // Will cause bitrot error
f.Close()
break
}
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs
if test._tamperBackend != noTamper {
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
t.Fatalf("Test %d: disk (%v) with part.1 missing is not a disk with available data",
i+1, erasureDisks[tamperedIndex])
}
partsMetadata, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", true)
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
}
}
onlineDisks, modTime, dataDir := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) {
t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
test.expectedTime, modTime)
}
if fi.DataDir != dataDir {
t.Fatalf("Expected dataDir to be equal to %v but was found to be %v",
fi.DataDir, dataDir)
}
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs
if test._tamperBackend != noTamper {
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
t.Fatalf("disk (%v) with part.1 missing is not a disk with available data",
erasureDisks[tamperedIndex])
}
}
})
}
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
@@ -231,7 +230,8 @@ func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime t
}
// Heals an object by re-writing corrupt/missing erasure blocks.
func (er erasureObjects) healObject(ctx context.Context, bucket string, object string, versionID string, opts madmin.HealOpts) (result madmin.HealResultItem, err error) {
func (er erasureObjects) healObject(ctx context.Context, bucket string, object string,
versionID string, partsMetadata []FileInfo, errs []error, lfi FileInfo, opts madmin.HealOpts) (result madmin.HealResultItem, err error) {
dryRun := opts.DryRun
scanMode := opts.ScanMode
@@ -249,18 +249,17 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
DataBlocks: len(storageDisks) - er.defaultParityCount,
}
lk := er.NewNSLock(bucket, object)
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
return result, err
if !opts.NoLock {
lk := er.NewNSLock(bucket, object)
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
return result, err
}
defer lk.Unlock()
}
defer lk.Unlock()
// Re-read when we have lock...
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
// List of disks having latest version of the object er.meta
// (by modtime).
latestDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// List of disks having all parts as per latest er.meta.
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
@@ -350,7 +349,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
// Latest FileInfo for reference. If a valid metadata is not
// present, it is as good as object not found.
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, result.DataBlocks)
if err != nil {
return result, toObjectErr(err, bucket, object, versionID)
}
@@ -359,7 +358,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
cleanFileInfo := func(fi FileInfo) FileInfo {
// Returns a copy of the 'fi' with checksums and parts nil'ed.
nfi := fi
nfi.Erasure.Index = 0
nfi.Erasure.Checksums = nil
nfi.Parts = nil
return nfi
@@ -369,28 +367,16 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
tmpID := mustGetUUID()
migrateDataDir := mustGetUUID()
copyPartsMetadata := make([]FileInfo, len(partsMetadata))
for i := range outDatedDisks {
if outDatedDisks[i] == nil {
continue
}
copyPartsMetadata[i] = partsMetadata[i]
partsMetadata[i] = cleanFileInfo(latestMeta)
}
// source data dir shall be empty in case of XLV1
// differentiate it with dstDataDir for readability
// srcDataDir is the one used with newBitrotReader()
// to read existing content.
srcDataDir := latestMeta.DataDir
dstDataDir := latestMeta.DataDir
dataDir := latestMeta.DataDir
if latestMeta.XLV1 {
dstDataDir = migrateDataDir
}
var inlineBuffers []*bytes.Buffer
if len(latestMeta.Parts) <= 1 && latestMeta.Size < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
dataDir = migrateDataDir
}
if !latestMeta.Deleted || latestMeta.TransitionStatus != lifecycle.TransitionComplete {
@@ -401,7 +387,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
latestDisks = shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
// Heal each part. erasureHealFile() will write the healed
// part to .minio/tmp/uuid/ which needs to be renamed later to
@@ -413,7 +398,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
}
erasureInfo := latestMeta.Erasure
for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ {
partSize := latestMeta.Parts[partIndex].Size
partActualSize := latestMeta.Parts[partIndex].ActualSize
@@ -425,23 +409,21 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
if disk == OfflineDisk {
continue
}
checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber))
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber))
if latestMeta.XLV1 {
partPath = pathJoin(object, fmt.Sprintf("part.%d", partNumber))
}
readers[i] = newBitrotReader(disk, nil, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
}
writers := make([]io.Writer, len(outDatedDisks))
for i, disk := range outDatedDisks {
if disk == OfflineDisk {
continue
}
partPath := pathJoin(tmpID, dstDataDir, fmt.Sprintf("part.%d", partNumber))
if len(inlineBuffers) > 0 {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, erasure.ShardFileSize(latestMeta.Size)))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
} else {
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath,
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize(), true)
}
partPath := pathJoin(tmpID, dataDir, fmt.Sprintf("part.%d", partNumber))
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath,
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize(), true)
}
err = erasure.Heal(ctx, readers, writers, partSize)
closeBitrotReaders(readers)
@@ -464,18 +446,13 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
continue
}
partsMetadata[i].DataDir = dstDataDir
partsMetadata[i].DataDir = dataDir
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partNumber,
Algorithm: checksumAlgo,
Hash: bitrotWriterSum(writers[i]),
})
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
} else {
partsMetadata[i].Data = nil
}
}
// If all disks are having errors, we give up.
@@ -487,18 +464,24 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tmpID, len(storageDisks)/2+1)
// Generate and write `xl.meta` generated from other disks.
outDatedDisks, err = writeUniqueFileInfo(ctx, outDatedDisks, minioMetaTmpBucket, tmpID,
partsMetadata, diskCount(outDatedDisks))
if err != nil {
return result, toObjectErr(err, bucket, object, versionID)
}
// Rename from tmp location to the actual location.
for i, disk := range outDatedDisks {
if disk == OfflineDisk {
continue
}
// record the index of the updated disks
partsMetadata[i].Erasure.Index = i + 1
// Attempt a rename now from healed data to final location.
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
logger.LogIf(ctx, err)
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i].DataDir, bucket, object); err != nil {
if err != errIsNotRegular && err != errFileNotFound {
logger.LogIf(ctx, err)
}
return result, toObjectErr(err, bucket, object)
}
@@ -847,12 +830,6 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
storageEndpoints := er.getEndpoints()
// Read metadata files from all the disks
// When versionID is empty, we read directly from the `null` versionID for healing.
if versionID == "" {
versionID = nullVersionID
}
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
if isAllNotFound(errs) {
@@ -864,11 +841,11 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object, versionID, er.defaultParityCount), err
}
_, err = getLatestFileInfo(healCtx, partsMetadata, errs)
fi, err := getLatestFileInfo(healCtx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(healCtx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
// Heal the object.
return er.healObject(healCtx, bucket, object, versionID, opts)
return er.healObject(healCtx, bucket, object, versionID, partsMetadata, errs, fi, opts)
}

View File

@@ -135,8 +135,7 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
errFileVersionNotFound,
errDiskNotFound,
}...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disks[index], bucket, object, err),
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s returned an error (%w)", disks[index], err),
disks[index].String())
}
}
@@ -148,15 +147,11 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
return metadataArray, g.Wait()
}
// shuffleDisksAndPartsMetadataByIndex this function should be always used by GetObjectNInfo()
// and CompleteMultipartUpload code path, it is not meant to be used with PutObject,
// NewMultipartUpload metadata shuffling.
func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, fi FileInfo) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
shuffledDisks = make([]StorageAPI, len(disks))
shuffledPartsMetadata = make([]FileInfo, len(disks))
distribution := fi.Erasure.Distribution
var inconsistent int
distribution := fi.Erasure.Distribution
for i, meta := range metaArr {
if disks[i] == nil {
// Assuming offline drives as inconsistent,
@@ -165,14 +160,6 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
inconsistent++
continue
}
if !meta.IsValid() {
inconsistent++
continue
}
if len(fi.Data) != len(meta.Data) {
inconsistent++
continue
}
// check if erasure distribution order matches the index
// position if this is not correct we discard the disk
// and move to collect others
@@ -192,36 +179,18 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
}
// fall back to original distribution based order.
return shuffleDisksAndPartsMetadata(disks, metaArr, fi)
return shuffleDisksAndPartsMetadata(disks, metaArr, distribution)
}
// Return shuffled partsMetadata depending on fi.Distribution.
// additional validation is attempted and invalid metadata is
// automatically skipped only when fi.ModTime is non-zero
// indicating that this is called during read-phase
func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo, fi FileInfo) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
// Return shuffled partsMetadata depending on distribution.
func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo, distribution []int) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
if distribution == nil {
return disks, partsMetadata
}
shuffledDisks = make([]StorageAPI, len(disks))
shuffledPartsMetadata = make([]FileInfo, len(partsMetadata))
distribution := fi.Erasure.Distribution
init := fi.ModTime.IsZero()
// Shuffle slice xl metadata for expected distribution.
for index := range partsMetadata {
if disks[index] == nil {
continue
}
if !init && !partsMetadata[index].IsValid() {
// Check for parts metadata validity for only
// fi.ModTime is not empty - ModTime is always set,
// if object was ever written previously.
continue
}
if !init && len(fi.Data) != len(partsMetadata[index].Data) {
// Check for length of data parts only when
// fi.ModTime is not empty - ModTime is always set,
// if object was ever written previously.
continue
}
blockIndex := distribution[index]
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
shuffledDisks[blockIndex-1] = disks[index]

View File

@@ -138,10 +138,7 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
objInfo.UserTags = fi.Metadata[xhttp.AmzObjectTagging]
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
@@ -232,17 +229,15 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
if meta.IsValid() && meta.ModTime.Equal(modTime) {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
@@ -280,8 +275,39 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, quorum)
}
// Rename metadata content to destination location for each disk concurrently.
func renameFileInfo(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) {
ignoredErr := []error{errFileNotFound}
g := errgroup.WithNErrs(len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
if err := disks[index].RenameData(ctx, srcBucket, srcEntry, "", dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
return err
}
}
return nil
}, index)
}
// Wait for all renames to finish.
errs := g.Wait()
// We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, errs), err
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
@@ -296,12 +322,8 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
files[index].Erasure.Index = index + 1
return disks[index].WriteMetadata(ctx, bucket, prefix, files[index])
}, index)
}

View File

@@ -157,11 +157,10 @@ func TestObjectToPartOffset(t *testing.T) {
}
func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
getNFInfo := func(n int, quorum int, t int64) []FileInfo {
fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100)
fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir
fis := make([]FileInfo, n)
for i := range fis {
fis[i] = fi
@@ -177,19 +176,16 @@ func TestFindFileInfoInQuorum(t *testing.T) {
tests := []struct {
fis []FileInfo
modTime time.Time
dataDir string
expectedErr error
}{
{
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
fis: getNFInfo(16, 16, 1603863445),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil,
},
{
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
fis: getNFInfo(16, 7, 1603863445),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum,
},
}
@@ -197,7 +193,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, 8)
if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err)
}

View File

@@ -44,13 +44,7 @@ func (er erasureObjects) getMultipartSHADir(bucket, object string) string {
}
// checkUploadIDExists - verify if a given uploadID exists and is valid.
func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) {
defer func() {
if err == errFileNotFound {
err = errUploadIDNotFound
}
}()
func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error {
disks := er.getDisks()
// Read metadata associated with the object from all disks.
@@ -62,14 +56,14 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return reducedErr
return toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
_, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
_, err = pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
_, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
return err
}
@@ -283,63 +277,74 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
// operation(s) on the object.
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
onlineDisks := er.getDisks()
parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
if parityDrives <= 0 {
parityDrives = er.defaultParityCount
parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = er.defaultParityCount
}
dataDrives := len(onlineDisks) - parityDrives
dataBlocks := len(onlineDisks) - parityBlocks
fi := newFileInfo(pathJoin(bucket, object), dataBlocks, parityBlocks)
// we now know the number of blocks this object needs for data and parity.
// establish the writeQuorum using this data
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(onlineDisks))
if opts.UserDefined["content-type"] == "" {
contentType := mimedb.TypeByExtension(path.Ext(object))
opts.UserDefined["content-type"] = contentType
}
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
// Calculate the version to be saved.
if opts.Versioned {
fi.VersionID = opts.VersionID
if fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
}
fi.DataDir = mustGetUUID()
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(onlineDisks, partsMetadata, fi)
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Metadata = opts.UserDefined
partsMetadata[index].ModTime = modTime
}
fi.ModTime = UTCNow()
fi.Metadata = cloneMSS(opts.UserDefined)
uploadID := mustGetUUID()
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
tempUploadIDPath := uploadID
// Delete the tmp path later in case we fail to commit (ignore
// returned errors) - this will be a no-op in case of a commit
// success.
var online int
defer func() {
if online != len(onlineDisks) {
er.deleteObject(context.Background(), minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
}
}()
var partsMetadata = make([]FileInfo, len(onlineDisks))
for i := range onlineDisks {
partsMetadata[i] = fi
}
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(onlineDisks, partsMetadata, fi.Erasure.Distribution)
var err error
// Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
if err != nil {
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
}
// Attempt to rename temp upload object to actual upload path object
_, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, true, writeQuorum, nil)
if err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
online = countOnlineDisks(onlineDisks)
// Return success.
return uploadID, nil
}
@@ -424,10 +429,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
// List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum)
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
@@ -534,10 +539,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
// Get current highest version based on re-read partsMetadata.
onlineDisks, modTime, dataDir = listOnlineDisks(onlineDisks, partsMetadata, errs)
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum)
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
@@ -622,10 +627,10 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, readQuorum)
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
if err != nil {
return result, err
}
@@ -671,10 +676,10 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum)
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return result, err
}
@@ -776,13 +781,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
return oi, toObjectErr(reducedErr, bucket, object)
}
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum)
if err != nil {
return oi, err
}
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Calculate full object size.
var objectSize int64
@@ -790,6 +789,12 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
// Calculate consolidated actual size.
var objectActualSize int64
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return oi, err
}
// Order online disks in accordance with distribution order.
// Order parts metadata in accordance with distribution order.
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
@@ -901,7 +906,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
// Rename the multipart object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
partsMetadata, bucket, object, writeQuorum); err != nil {
fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
return oi, toObjectErr(err, bucket, object)
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
@@ -72,7 +71,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
// Read metadata associated with the object from all disks.
storageDisks := er.getDisks()
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, false)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
@@ -81,10 +80,10 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
}
// List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, metaArr, errs)
onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
@@ -95,6 +94,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
}
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
versionID := srcInfo.VersionID
if srcInfo.versionOnly {
versionID = dstOpts.VersionID
@@ -122,11 +123,28 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
}
}
tempObj := mustGetUUID()
var online int
// Cleanup in case of xl.meta writing failure
defer func() {
if online != len(onlineDisks) {
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
}
}()
// Write unique `xl.meta` for each disk.
if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil {
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Rename atomically `xl.meta` from tmp location to destination for each disk.
if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
online = countOnlineDisks(onlineDisks)
return fi.ToObjectInfo(srcBucket, srcObject), nil
}
@@ -421,10 +439,10 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
}
// List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err = pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return fi, nil, nil, err
}
@@ -435,7 +453,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
missingBlocks++
continue
}
if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) && metaArr[i].DataDir == fi.DataDir {
if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) {
continue
}
missingBlocks++
@@ -507,7 +525,8 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str
}
// Similar to rename but renames data from srcEntry to dstEntry at dataDir
func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) {
func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dataDir, dstBucket, dstEntry string, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
dataDir = retainSlash(dataDir)
defer ObjectPathUpdated(pathJoin(srcBucket, srcEntry))
defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry))
@@ -520,16 +539,12 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := metadata[index]
// Assign index when index is initialized
if fi.Erasure.Index == 0 {
fi.Erasure.Index = index + 1
if err := disks[index].RenameData(ctx, srcBucket, srcEntry, dataDir, dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
return err
}
}
if fi.IsValid() {
return disks[index].RenameData(ctx, srcBucket, srcEntry, fi, dstBucket, dstEntry)
}
return errFileCorrupt
return nil
}, index)
}
@@ -539,6 +554,23 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
// We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if err == errErasureWriteQuorum {
ug := errgroup.WithNErrs(len(disks))
for index, disk := range disks {
if disk == nil {
continue
}
index := index
ug.Go(func() error {
// Undo all the partial rename operations.
if errs[index] == nil {
_ = disks[index].RenameData(context.Background(), dstBucket, dstEntry, dataDir, srcBucket, srcEntry)
}
return nil
}, index)
}
ug.Wait()
}
return evalDisks(disks, errs), err
}
@@ -658,7 +690,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi.Erasure.Distribution)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
@@ -702,28 +734,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
}
}()
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
var inlineBuffers []*bytes.Buffer
if shardFileSize >= 0 {
if !opts.Versioned && shardFileSize < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if shardFileSize < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
}
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if len(inlineBuffers) > 0 {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj,
shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize(), false)
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
}
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
@@ -753,11 +770,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
onlineDisks[i] = nil
continue
}
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
} else {
partsMetadata[i].Data = nil
}
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
@@ -787,9 +799,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
partsMetadata[index].ModTime = modTime
}
// Write unique `xl.meta` for each disk.
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Rename the successfully written temporary object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
logger.LogIf(ctx, err)
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@@ -1161,56 +1177,6 @@ func (er erasureObjects) addPartial(bucket, object, versionID string) {
}
}
func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
var err error
// Lock the object before updating tags.
lk := er.NewNSLock(bucket, object)
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
defer lk.Unlock()
disks := er.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
}
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object)
return objInfo, nil
}
// PutObjectTags - replace or add tags to an existing object
func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
var err error
@@ -1227,16 +1193,16 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@@ -1247,43 +1213,98 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
fi.Metadata[xhttp.AmzObjectTagging] = tags
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
for i, metaFi := range metaArr {
if metaFi.IsValid() {
// clean fi.Meta of tag key, before updating the new tags
delete(metaFi.Metadata, xhttp.AmzObjectTagging)
// Don't update for empty tags
if tags != "" {
metaFi.Metadata[xhttp.AmzObjectTagging] = tags
}
for k, v := range opts.UserDefined {
metaFi.Metadata[k] = v
}
metaArr[i].Metadata = metaFi.Metadata
}
}
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
tempObj := mustGetUUID()
// Write unique `xl.meta` for each disk.
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object), nil
// Atomically rename metadata from tmp location to destination for each disk.
if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object)
objInfo.UserTags = tags
return objInfo, nil
}
// updateObjectMeta will update the metadata of a file.
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo) error {
if len(fi.Metadata) == 0 {
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, meta map[string]string, opts ObjectOptions) error {
if len(meta) == 0 {
return nil
}
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].UpdateMetadata(ctx, bucket, object, fi)
}, index)
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return toObjectErr(err, bucket, object)
}
// Wait for all the routines.
mErrs := g.Wait()
// List all online disks.
_, modTime := listOnlineDisks(disks, metaArr, errs)
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, getWriteQuorum(len(disks)))
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return toObjectErr(err, bucket, object)
}
// Update metadata
for k, v := range meta {
fi.Metadata[k] = v
}
if fi.Deleted {
if opts.VersionID == "" {
return toObjectErr(errFileNotFound, bucket, object)
}
return toObjectErr(errMethodNotAllowed, bucket, object)
}
for i := range metaArr {
if errs[i] != nil {
// Avoid disks where loading metadata fail
continue
}
metaArr[i].Metadata = fi.Metadata
}
tempObj := mustGetUUID()
// Write unique `xl.meta` for each disk.
if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return toObjectErr(err, bucket, object)
}
// Atomically rename metadata from tmp location to destination for each disk.
if _, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
return toObjectErr(err, bucket, object)
}
return nil
}
// DeleteObjectTags - delete object tags from an existing object

View File

@@ -24,7 +24,6 @@ import (
"io"
"io/ioutil"
"os"
"strconv"
"testing"
humanize "github.com/dustin/go-humanize"
@@ -110,10 +109,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
for _, test := range testCases {
test := test
t.Run("", func(t *testing.T) {
_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
if err != nil {
t.Fatal("dir/obj not found before last test")
}
_, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{})
if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr)
@@ -302,7 +297,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
buf := make([]byte, smallFileThreshold*16)
buf := make([]byte, 129<<10)
if _, err = io.ReadFull(crand.Reader, buf); err != nil {
t.Fatal(err)
}
@@ -467,7 +462,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
object := "object"
opts := ObjectOptions{}
// Create "object" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
@@ -475,8 +470,8 @@ func TestPutObjectNoQuorum(t *testing.T) {
// Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,4)
for f := 0; f < 2; f++ {
// invocations, where f - [0,3)
for f := 0; f < 3; f++ {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
@@ -496,78 +491,13 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
z.serverPools[0].erasureDisksMu.Unlock()
// Upload new content to same object "object"
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if !errors.Is(err, errErasureWriteQuorum) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
}
}
func TestPutObjectNoQuorumSmall(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
// Create "object" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts)
if err != nil {
t.Fatal(err)
}
// Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2)
for f := 0; f < 2; f++ {
t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
}
erasureDisks := xl.getDisks()
for i := range erasureDisks[:9] {
switch diskType := erasureDisks[i].(type) {
case *naughtyDisk:
erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
default:
erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
}
}
z.serverPools[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI {
return erasureDisks
}
z.serverPools[0].erasureDisksMu.Unlock()
// Upload new content to same object "object"
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts)
if !errors.Is(err, errErasureWriteQuorum) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
})
}
}
func TestObjectQuorumFromMeta(t *testing.T) {
ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta)
}

View File

@@ -433,7 +433,7 @@ func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []er
return storageInfo, errs
}
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -448,7 +448,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
}
if len(allBuckets) == 0 {
updates <- madmin.DataUsageInfo{} // no buckets found update data usage to reflect latest state
updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state
return nil
}
@@ -1035,23 +1035,10 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
}
for idx, pool := range z.serverPools {
result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
if err != nil {
return "", err
}
// If there is a multipart upload with the same bucket/object name,
// create the new multipart in the same pool, this will avoid
// creating two multiparts uploads in two different pools
if len(result.Uploads) != 0 {
return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts)
}
}
// We multiply the size by 2 to account for erasure coding.
idx := z.getAvailablePoolIdx(ctx, (1<<30)*2)
if idx < 0 {
return "", toObjectErr(errDiskFull)
// We don't know the exact size, so we ask for at least 1GiB file.
idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30)
if err != nil {
return "", err
}
return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts)
@@ -1193,13 +1180,20 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
}
// Purge any existing object.
for _, pool := range z.serverPools {
_, err := pool.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err == nil {
pool.DeleteObject(ctx, bucket, object, opts)
}
for _, pool := range z.serverPools {
result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
if err != nil {
return objInfo, err
}
if result.Lookup(uploadID) {
return pool.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
}
}
return objInfo, InvalidUploadID{
Bucket: bucket,
Object: object,
@@ -1784,22 +1778,6 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
}
}
// PutObjectMetadata - replace or add tags to an existing object
func (z *erasureServerPools) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
object = encodeDirObject(object)
if z.SinglePool() {
return z.serverPools[0].PutObjectMetadata(ctx, bucket, object, opts)
}
// We don't know the size here set 1GiB atleast.
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
if err != nil {
return ObjectInfo{}, err
}
return z.serverPools[idx].PutObjectMetadata(ctx, bucket, object, opts)
}
// PutObjectTags - replace or add tags to an existing object
func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
object = encodeDirObject(object)

View File

@@ -355,7 +355,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
sets: make([]*erasureObjects, setCount),
erasureDisks: make([][]StorageAPI, setCount),
erasureLockers: make([][]dsync.NetLocker, setCount),
erasureLockOwner: globalLocalNodeName,
erasureLockOwner: GetLocalPeer(globalEndpoints),
endpoints: endpoints,
endpointStrings: endpointStrings,
setCount: setCount,
@@ -612,7 +612,7 @@ func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
}
errs := make([]error, 0, len(s.sets)*s.setDriveCount)
var errs []error
for i := range s.sets {
errs = append(errs, storageInfoErrs[i]...)
}
@@ -1318,12 +1318,6 @@ func (s *erasureSets) HealObject(ctx context.Context, bucket, object, versionID
return s.getHashedSet(object).HealObject(ctx, bucket, object, versionID, opts)
}
// PutObjectMetadata - replace or add metadata to an existing object/version
func (s *erasureSets) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
er := s.getHashedSet(object)
return er.PutObjectMetadata(ctx, bucket, object, opts)
}
// PutObjectTags - replace or add tags to an existing object
func (s *erasureSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
er := s.getHashedSet(object)

View File

@@ -435,11 +435,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
}
}()
// Shuffle disks to ensure a total randomness of bucket/disk association to ensure
// that objects that are not present in all disks are accounted and ILM applied.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(disks), func(i, j int) { disks[i], disks[j] = disks[j], disks[i] })
// Start one scanner per disk
var wg sync.WaitGroup
wg.Add(len(disks))

View File

@@ -663,7 +663,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
// Compute the local disks eligible for meta volumes (re)initialization
disksToInit := make([]StorageAPI, 0, len(storageDisks))
var disksToInit []StorageAPI
for index := range storageDisks {
if formats[index] == nil || storageDisks[index] == nil || !storageDisks[index].IsLocal() {
// Ignore create meta volume on disks which are not found or not local.
@@ -913,7 +913,7 @@ func makeFormatErasureMetaVolumes(disk StorageAPI) error {
return errDiskNotFound
}
// Attempt to create MinIO internal buckets.
return disk.MakeVolBulk(context.TODO(), minioMetaBucket, minioMetaTmpBucket, minioMetaMultipartBucket, minioMetaTmpDeletedBucket, dataUsageBucket, minioMetaTmpBucket+"-old")
return disk.MakeVolBulk(context.TODO(), minioMetaBucket, minioMetaTmpBucket, minioMetaMultipartBucket, dataUsageBucket)
}
// Initialize a new set of set formats which will be written to all disks.

View File

@@ -1,28 +0,0 @@
// Code generated by "stringer -type=format -trimprefix=format untar.go"; DO NOT EDIT.
package cmd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[formatUnknown-0]
_ = x[formatGzip-1]
_ = x[formatZstd-2]
_ = x[formatLZ4-3]
_ = x[formatS2-4]
_ = x[formatBZ2-5]
}
const _format_name = "UnknownGzipZstdLZ4S2BZ2"
var _format_index = [...]uint8{0, 7, 11, 15, 18, 20, 23}
func (i format) String() string {
if i < 0 || i >= format(len(_format_index)-1) {
return "format(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _format_name[_format_index[i]:_format_index[i+1]]
}

View File

@@ -237,7 +237,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
}
// NSScanner returns data usage stats of the current FS deployment
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
// Load bucket totals
var totalCache dataUsageCache
err := totalCache.load(ctx, fs, dataUsageCacheName)

View File

@@ -47,17 +47,11 @@ func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo,
}
// NSScanner - scanner is not implemented for gateway
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
logger.CriticalIf(ctx, errors.New("not implemented"))
return NotImplemented{}
}
// PutObjectMetadata - not implemented for gateway.
func (a GatewayUnsupported) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
logger.CriticalIf(ctx, errors.New("not implemented"))
return ObjectInfo{}, NotImplemented{}
}
// NewNSLock is a dummy stub for gateway.
func (a GatewayUnsupported) NewNSLock(bucket string, objects ...string) RWLocker {
logger.CriticalIf(context.Background(), errors.New("not implemented"))

View File

@@ -407,20 +407,6 @@ func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object s
return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
}
func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects {
_, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
}
return dobjects, errs
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {

View File

@@ -20,7 +20,6 @@ import (
"context"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/minio/minio-go/v7/pkg/set"
@@ -64,7 +63,6 @@ func setRequestHeaderSizeLimitHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if isHTTPHeaderSizeTooLarge(r.Header) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrMetadataTooLarge), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsHeader, 1)
return
}
h.ServeHTTP(w, r)
@@ -346,7 +344,6 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
// header, for all requests where Date header is not
// present we will reject such clients.
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
}
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
@@ -354,7 +351,6 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
curTime := UTCNow()
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
}
}
@@ -362,6 +358,105 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
})
}
var supportedDummyBucketAPIs = map[string][]string{
"acl": {http.MethodPut, http.MethodGet},
"cors": {http.MethodGet},
"website": {http.MethodGet, http.MethodDelete},
"logging": {http.MethodGet},
"accelerate": {http.MethodGet},
"requestPayment": {http.MethodGet},
}
// List of not implemented bucket queries
var notImplementedBucketResourceNames = map[string]struct{}{
"cors": {},
"metrics": {},
"website": {},
"logging": {},
"inventory": {},
"accelerate": {},
"requestPayment": {},
"analytics": {},
"intelligent-tiering": {},
"ownershipControls": {},
"publicAccessBlock": {},
}
// Checks requests for not implemented Bucket resources
func ignoreNotImplementedBucketResources(req *http.Request) bool {
for name := range req.URL.Query() {
methods, ok := supportedDummyBucketAPIs[name]
if ok {
for _, method := range methods {
if method == req.Method {
return false
}
}
}
if _, ok := notImplementedBucketResourceNames[name]; ok {
return true
}
}
return false
}
var supportedDummyObjectAPIs = map[string][]string{
"acl": {http.MethodGet, http.MethodPut},
}
// List of not implemented object APIs
var notImplementedObjectResourceNames = map[string]struct{}{
"torrent": {},
}
// Checks requests for not implemented Object resources
func ignoreNotImplementedObjectResources(req *http.Request) bool {
for name := range req.URL.Query() {
methods, ok := supportedDummyObjectAPIs[name]
if ok {
for _, method := range methods {
if method == req.Method {
return false
}
}
}
if _, ok := notImplementedObjectResourceNames[name]; ok {
return true
}
}
return false
}
// setIgnoreResourcesHandler -
// Ignore resources handler is wrapper handler used for API request resource validation
// Since we do not support all the S3 queries, it is necessary for us to throw back a
// valid error message indicating that requested feature is not implemented.
func setIgnoreResourcesHandler(h http.Handler) http.Handler {
// Resource handler ServeHTTP() wrapper
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
bucketName, objectName := request2BucketObjectName(r)
// If bucketName is present and not objectName check for bucket level resource queries.
if bucketName != "" && objectName == "" {
if ignoreNotImplementedBucketResources(r) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
}
// If bucketName and objectName are present check for its resource queries.
if bucketName != "" && objectName != "" {
if ignoreNotImplementedObjectResources(r) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
}
// Serve HTTP.
h.ServeHTTP(w, r)
})
}
// setHttpStatsHandler sets a http Stats handler to gather HTTP statistics
func setHTTPStatsHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -422,7 +517,6 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
// Check for bad components in URL path.
if hasBadPathComponent(r.URL.Path) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
return
}
// Check for bad components in URL query values.
@@ -430,14 +524,12 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
for _, v := range vv {
if hasBadPathComponent(v) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
return
}
}
}
if hasMultipleAuth(r) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
return
}
h.ServeHTTP(w, r)

View File

@@ -18,7 +18,6 @@ package cmd
import (
"crypto/x509"
"errors"
"net/http"
"os"
"sync"
@@ -27,9 +26,8 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/pkg/bucket/bandwidth"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/kms"
"github.com/dustin/go-humanize"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/dns"
@@ -37,6 +35,7 @@ import (
"github.com/minio/minio/cmd/config/identity/openid"
"github.com/minio/minio/cmd/config/policy/opa"
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/auth"
etcd "go.etcd.io/etcd/clientv3"
@@ -182,9 +181,9 @@ var (
globalHTTPServerErrorCh = make(chan error)
globalOSSignalCh = make(chan os.Signal, 1)
// global Trace system to send HTTP request/response
// and Storage/OS calls info to registered listeners.
globalTrace = pubsub.New()
// global Trace system to send HTTP request/response logs to
// registered listeners
globalHTTPTrace = pubsub.New()
// global Listen system to send S3 API events to registered listeners
globalHTTPListen = pubsub.New()
@@ -195,9 +194,6 @@ var (
globalEndpoints EndpointServerPools
// The name of this local node, fetched from arguments
globalLocalNodeName string
globalRemoteEndpoints map[string]Endpoint
// Global server's network statistics
@@ -233,7 +229,7 @@ var (
globalCacheConfig cache.Config
// Initialized KMS configuration for disk cache
globalCacheKMS kms.KMS
globalCacheKMS crypto.KMS
// Allocated etcd endpoint for config and bucket DNS.
globalEtcdClient *etcd.Client
@@ -246,7 +242,7 @@ var (
globalDNSConfig dns.Store
// GlobalKMS initialized KMS configuration
GlobalKMS kms.KMS
GlobalKMS crypto.KMS
// Auto-Encryption, if enabled, turns any non-SSE-C request
// into an SSE-S3 request. If enabled a valid, non-empty KMS
@@ -258,7 +254,7 @@ var (
globalCompressConfig compress.Config
// Some standard object extensions which we strictly dis-allow for compression.
standardExcludeCompressExtensions = []string{".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov", ".jpg", ".png", ".gif"}
standardExcludeCompressExtensions = []string{".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov"}
// Some standard content-types which we strictly dis-allow for compression.
standardExcludeCompressContentTypes = []string{"video/*", "audio/*", "application/zip", "application/x-gzip", "application/x-zip-compressed", " application/x-compress", "application/x-spoon"}
@@ -296,8 +292,6 @@ var (
// Add new variable global values here.
)
var errSelfTestFailure = errors.New("self test failed. unsafe to start server")
// Returns minio global information, as a key value map.
// returned list of global values is not an exhaustive
// list. Feel free to add new relevant fields.

View File

@@ -364,24 +364,24 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil
// Log headers and body.
func httpTraceAll(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if globalTrace.NumSubscribers() == 0 {
if globalHTTPTrace.NumSubscribers() == 0 {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, true, w, r)
globalTrace.Publish(trace)
globalHTTPTrace.Publish(trace)
}
}
// Log only the headers.
func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if globalTrace.NumSubscribers() == 0 {
if globalHTTPTrace.NumSubscribers() == 0 {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, false, w, r)
globalTrace.Publish(trace)
globalHTTPTrace.Publish(trace)
}
}
@@ -546,7 +546,7 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
// gets host name for current node
func getHostName(r *http.Request) (hostName string) {
if globalIsDistErasure {
hostName = globalLocalNodeName
hostName = GetLocalPeer(globalEndpoints)
} else {
hostName = r.Host
}

View File

@@ -35,7 +35,7 @@ import (
func getLocalCPUInfo(ctx context.Context, r *http.Request) madmin.ServerCPUInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
info, err := cpuhw.InfoWithContext(ctx)
@@ -106,7 +106,7 @@ func getLocalDrives(ctx context.Context, parallel bool, endpointServerPools Endp
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(endpointServerPools)
}
if parallel {
return madmin.ServerDrivesInfo{
@@ -123,7 +123,7 @@ func getLocalDrives(ctx context.Context, parallel bool, endpointServerPools Endp
func getLocalMemInfo(ctx context.Context, r *http.Request) madmin.ServerMemInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
swap, err := memhw.SwapMemoryWithContext(ctx)
@@ -152,7 +152,7 @@ func getLocalMemInfo(ctx context.Context, r *http.Request) madmin.ServerMemInfo
func getLocalProcInfo(ctx context.Context, r *http.Request) madmin.ServerProcInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
errProcInfo := func(tag string, err error) madmin.ServerProcInfo {

View File

@@ -34,7 +34,7 @@ import (
func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
srvrOsInfo := madmin.ServerOsInfo{Addr: addr}
@@ -65,7 +65,7 @@ func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo {
func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskHwInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
parts, err := diskhw.PartitionsWithContext(ctx, true)

View File

@@ -30,7 +30,7 @@ import (
func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskHwInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
return madmin.ServerDiskHwInfo{
@@ -42,7 +42,7 @@ func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskH
func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
addr = GetLocalPeer(globalEndpoints)
}
return madmin.ServerOsInfo{

View File

@@ -137,15 +137,11 @@ func (stats *HTTPAPIStats) Load() map[string]int {
// HTTPStats holds statistics information about
// HTTP requests made by all clients
type HTTPStats struct {
s3RequestsInQueue int32
currentS3Requests HTTPAPIStats
totalS3Requests HTTPAPIStats
totalS3Errors HTTPAPIStats
totalS3Canceled HTTPAPIStats
rejectedRequestsAuth uint64
rejectedRequestsTime uint64
rejectedRequestsHeader uint64
rejectedRequestsInvalid uint64
s3RequestsInQueue int32
currentS3Requests HTTPAPIStats
totalS3Requests HTTPAPIStats
totalS3Errors HTTPAPIStats
totalS3Canceled HTTPAPIStats
}
func (st *HTTPStats) addRequestsInQueue(i int32) {
@@ -156,10 +152,6 @@ func (st *HTTPStats) addRequestsInQueue(i int32) {
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{}
serverStats.S3RequestsInQueue = atomic.LoadInt32(&st.s3RequestsInQueue)
serverStats.TotalS3RejectedAuth = atomic.LoadUint64(&st.rejectedRequestsAuth)
serverStats.TotalS3RejectedTime = atomic.LoadUint64(&st.rejectedRequestsTime)
serverStats.TotalS3RejectedHeader = atomic.LoadUint64(&st.rejectedRequestsHeader)
serverStats.TotalS3RejectedInvalid = atomic.LoadUint64(&st.rejectedRequestsInvalid)
serverStats.CurrentS3Requests = ServerHTTPAPIStats{
APIStats: st.currentS3Requests.Load(),
}

View File

@@ -124,26 +124,20 @@ func WebTrace(ri *jsonrpc.RequestInfo) trace.Info {
reqHeaders.Set("Transfer-Encoding", strings.Join(r.TransferEncoding, ","))
}
now := time.Now().UTC()
t := trace.Info{TraceType: trace.HTTP, FuncName: name, Time: now}
t := trace.Info{FuncName: name}
t.NodeName = r.Host
if globalIsDistErasure {
t.NodeName = globalLocalNodeName
}
if t.NodeName == "" {
t.NodeName = globalLocalNodeName
t.NodeName = GetLocalPeer(globalEndpoints)
}
// strip only standard port from the host address
if host, port, err := net.SplitHostPort(t.NodeName); err == nil {
if port == "443" || port == "80" {
t.NodeName = host
}
// strip port from the host address
if host, _, err := net.SplitHostPort(t.NodeName); err == nil {
t.NodeName = host
}
vars := mux.Vars(r)
rq := trace.RequestInfo{
Time: now,
Time: time.Now().UTC(),
Proto: r.Proto,
Method: r.Method,
Path: SlashSeparator + pathJoin(vars["bucket"], vars["object"]),
@@ -191,30 +185,20 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ
reqHeaders.Set("Transfer-Encoding", strings.Join(r.TransferEncoding, ","))
}
reqBodyRecorder := &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders}
var reqBodyRecorder *recordRequest
t := trace.Info{FuncName: name}
reqBodyRecorder = &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders}
r.Body = ioutil.NopCloser(reqBodyRecorder)
now := time.Now().UTC()
t := trace.Info{TraceType: trace.HTTP, FuncName: name, Time: now}
t.NodeName = r.Host
if globalIsDistErasure {
t.NodeName = globalLocalNodeName
t.NodeName = GetLocalPeer(globalEndpoints)
}
if t.NodeName == "" {
t.NodeName = globalLocalNodeName
// strip port from the host address
if host, _, err := net.SplitHostPort(t.NodeName); err == nil {
t.NodeName = host
}
// strip only standard port from the host address
if host, port, err := net.SplitHostPort(t.NodeName); err == nil {
if port == "443" || port == "80" {
t.NodeName = host
}
}
rq := trace.RequestInfo{
Time: now,
Time: time.Now().UTC(),
Proto: r.Proto,
Method: r.Method,
Path: r.URL.Path,

View File

@@ -121,8 +121,6 @@ func NewDNSCache(freq time.Duration, lookupTimeout time.Duration, loggerOnce fun
doneCh: make(chan struct{}),
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
timer := time.NewTimer(freq)
go func() {
defer timer.Stop()
@@ -130,10 +128,7 @@ func NewDNSCache(freq time.Duration, lookupTimeout time.Duration, loggerOnce fun
for {
select {
case <-timer.C:
// Make sure that refreshes on DNS do not be attempted
// at the same time, allows for reduced load on the
// DNS servers.
timer.Reset(time.Duration(rnd.Float64() * float64(freq)))
timer.Reset(freq)
r.Refresh()
case <-r.doneCh:

View File

@@ -88,8 +88,6 @@ const (
AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold"
AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention"
AmzBucketReplicationStatus = "X-Amz-Replication-Status"
AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract"
// Multipart parts count
AmzMpPartsCount = "x-amz-mp-parts-count"

View File

@@ -33,7 +33,6 @@ import (
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/pkg/certs"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/fips"
)
const (
@@ -160,6 +159,28 @@ func (srv *Server) Shutdown() error {
}
}
// Secure Go implementations of modern TLS ciphers
// The following ciphers are excluded because:
// - RC4 ciphers: RC4 is broken
// - 3DES ciphers: Because of the 64 bit blocksize of DES (Sweet32)
// - CBC-SHA256 ciphers: No countermeasures against Lucky13 timing attack
// - CBC-SHA ciphers: Legacy ciphers (SHA-1) and non-constant time
// implementation of CBC.
// (CBC-SHA ciphers can be enabled again if required)
// - RSA key exchange ciphers: Disabled because of dangerous PKCS1-v1.5 RSA
// padding scheme. See Bleichenbacher attacks.
var secureCipherSuites = []uint16{
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
// Go only provides constant-time implementations of Curve25519 and NIST P-256 curve.
var secureCurves = []tls.CurveID{tls.X25519, tls.CurveP256}
// NewServer - creates new HTTP server using given arguments.
func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificateFunc) *Server {
secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn
@@ -167,15 +188,17 @@ func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificat
var tlsConfig *tls.Config
if getCert != nil {
tlsConfig = &tls.Config{
// TLS hardening
PreferServerCipherSuites: true,
MinVersion: tls.VersionTLS12,
NextProtos: []string{"http/1.1", "h2"},
GetCertificate: getCert,
}
if secureCiphers || fips.Enabled() {
tlsConfig.CipherSuites = fips.CipherSuitesTLS()
tlsConfig.CurvePreferences = fips.EllipticCurvesTLS()
}
tlsConfig.GetCertificate = getCert
}
if secureCiphers && tlsConfig != nil {
tlsConfig.CipherSuites = secureCipherSuites
tlsConfig.CurvePreferences = secureCurves
}
httpServer := &Server{

View File

@@ -55,7 +55,7 @@ func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
// suffix := "config.json"
// result is foo
func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
return pathClean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix))
return path.Clean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix))
}
// IAMEtcdStore implements IAMStorageAPI

View File

@@ -30,6 +30,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@@ -672,10 +673,8 @@ func (sys *IAMSys) DeletePolicy(policyName string) error {
if pset.Contains(policyName) {
cr, ok := sys.iamUsersMap[u]
if !ok {
// This case can happen when an temporary account
// is deleted or expired, removed it from userPolicyMap.
delete(sys.iamUserPolicyMap, u)
continue
// This case cannot happen
return errNoSuchUser
}
pset.Remove(policyName)
// User is from STS if the cred are temporary
@@ -842,31 +841,40 @@ func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyNa
return errServerNotInitialized
}
sys.store.lock()
defer sys.store.unlock()
ttl := int64(cred.Expiration.Sub(UTCNow()).Seconds())
// If OPA is not set we honor any policy claims for this
// temporary user which match with pre-configured canned
// policies for this server.
if globalPolicyOPA == nil && policyName != "" {
var availablePolicies []iampolicy.Policy
mp := newMappedPolicy(policyName)
combinedPolicy := sys.GetCombinedPolicy(mp.toSlice()...)
if combinedPolicy.IsEmpty() {
return fmt.Errorf("specified policy %s, not found %w", policyName, errNoSuchPolicy)
for _, policy := range mp.toSlice() {
p, found := sys.iamPolicyDocsMap[policy]
if found {
availablePolicies = append(availablePolicies, p)
}
}
sys.store.lock()
defer sys.store.unlock()
combinedPolicy := availablePolicies[0]
for i := 1; i < len(availablePolicies); i++ {
combinedPolicy.Statements = append(combinedPolicy.Statements,
availablePolicies[i].Statements...)
}
if combinedPolicy.IsEmpty() {
delete(sys.iamUserPolicyMap, accessKey)
return nil
}
if err := sys.store.saveMappedPolicy(context.Background(), accessKey, stsUser, false, mp, options{ttl: ttl}); err != nil {
sys.store.unlock()
return err
}
sys.iamUserPolicyMap[accessKey] = mp
} else {
sys.store.lock()
defer sys.store.unlock()
}
u := newUserIdentity(cred)
@@ -1038,9 +1046,9 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
SecretKey: cred.SecretKey,
Status: func() string {
if status == madmin.AccountEnabled {
return auth.AccountOn
return config.EnableOn
}
return auth.AccountOff
return config.EnableOff
}(),
})
@@ -1052,25 +1060,19 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
return nil
}
type newServiceAccountOpts struct {
sessionPolicy *iampolicy.Policy
accessKey string
secretKey string
}
// NewServiceAccount - create a new service account
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, groups []string, opts newServiceAccountOpts) (auth.Credentials, error) {
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, groups []string, sessionPolicy *iampolicy.Policy) (auth.Credentials, error) {
if !sys.Initialized() {
return auth.Credentials{}, errServerNotInitialized
}
var policyBuf []byte
if opts.sessionPolicy != nil {
err := opts.sessionPolicy.Validate()
if sessionPolicy != nil {
err := sessionPolicy.Validate()
if err != nil {
return auth.Credentials{}, err
}
policyBuf, err = json.Marshal(opts.sessionPolicy)
policyBuf, err = json.Marshal(sessionPolicy)
if err != nil {
return auth.Credentials{}, err
}
@@ -1123,22 +1125,13 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
m[iamPolicyClaimNameSA()] = "inherited-policy"
}
var (
cred auth.Credentials
err error
)
if len(opts.accessKey) > 0 {
cred, err = auth.CreateNewCredentialsWithMetadata(opts.accessKey, opts.secretKey, m, globalActiveCred.SecretKey)
} else {
cred, err = auth.GetNewCredentialsWithMetadata(m, globalActiveCred.SecretKey)
}
secret := globalActiveCred.SecretKey
cred, err := auth.GetNewCredentialsWithMetadata(m, secret)
if err != nil {
return auth.Credentials{}, err
}
cred.ParentUser = parentUser
cred.Groups = groups
cred.Status = string(auth.AccountOn)
u := newUserIdentity(cred)
@@ -1151,69 +1144,8 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
return cred, nil
}
type updateServiceAccountOpts struct {
sessionPolicy *iampolicy.Policy
secretKey string
status string
}
// UpdateServiceAccount - edit a service account
func (sys *IAMSys) UpdateServiceAccount(ctx context.Context, accessKey string, opts updateServiceAccountOpts) error {
if !sys.Initialized() {
return errServerNotInitialized
}
sys.store.lock()
defer sys.store.unlock()
cr, ok := sys.iamUsersMap[accessKey]
if !ok || !cr.IsServiceAccount() {
return errNoSuchServiceAccount
}
if opts.secretKey != "" {
cr.SecretKey = opts.secretKey
}
if opts.status != "" {
cr.Status = opts.status
}
if opts.sessionPolicy != nil {
m := make(map[string]interface{})
err := opts.sessionPolicy.Validate()
if err != nil {
return err
}
policyBuf, err := json.Marshal(opts.sessionPolicy)
if err != nil {
return err
}
if len(policyBuf) > 16*humanize.KiByte {
return fmt.Errorf("Session policy should not exceed 16 KiB characters")
}
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf)
m[iamPolicyClaimNameSA()] = "embedded-policy"
m[parentClaim] = cr.ParentUser
cr.SessionToken, err = auth.JWTSignWithAccessKey(accessKey, m, globalActiveCred.SecretKey)
if err != nil {
return err
}
}
u := newUserIdentity(cr)
if err := sys.store.saveUserIdentity(context.Background(), u.Credentials.AccessKey, srvAccUser, u); err != nil {
return err
}
sys.iamUsersMap[u.Credentials.AccessKey] = u.Credentials
return nil
}
// ListServiceAccounts - lists all services accounts associated to a specific user
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]auth.Credentials, error) {
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]string, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
@@ -1223,56 +1155,30 @@ func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([
sys.store.rlock()
defer sys.store.runlock()
var serviceAccounts []auth.Credentials
for _, v := range sys.iamUsersMap {
var serviceAccounts []string
for k, v := range sys.iamUsersMap {
if v.IsServiceAccount() && v.ParentUser == accessKey {
// Hide secret key & session key here
v.SecretKey = ""
v.SessionToken = ""
serviceAccounts = append(serviceAccounts, v)
serviceAccounts = append(serviceAccounts, k)
}
}
return serviceAccounts, nil
}
// GetServiceAccount - gets information about a service account
func (sys *IAMSys) GetServiceAccount(ctx context.Context, accessKey string) (auth.Credentials, *iampolicy.Policy, error) {
// GetServiceAccountParent - gets information about a service account
func (sys *IAMSys) GetServiceAccountParent(ctx context.Context, accessKey string) (string, error) {
if !sys.Initialized() {
return auth.Credentials{}, nil, errServerNotInitialized
return "", errServerNotInitialized
}
sys.store.rlock()
defer sys.store.runlock()
sa, ok := sys.iamUsersMap[accessKey]
if !ok || !sa.IsServiceAccount() {
return auth.Credentials{}, nil, errNoSuchServiceAccount
if ok && sa.IsServiceAccount() {
return sa.ParentUser, nil
}
var embeddedPolicy *iampolicy.Policy
jwtClaims, err := auth.ExtractClaims(sa.SessionToken, globalActiveCred.SecretKey)
if err == nil {
pt, ptok := jwtClaims.Lookup(iamPolicyClaimNameSA())
sp, spok := jwtClaims.Lookup(iampolicy.SessionPolicyName)
if ptok && spok && pt == "embedded-policy" {
policyBytes, err := base64.StdEncoding.DecodeString(sp)
if err == nil {
p, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))
if err == nil {
policy := iampolicy.Policy{}.Merge(*p)
embeddedPolicy = &policy
}
}
}
}
// Hide secret & session keys
sa.SecretKey = ""
sa.SessionToken = ""
return sa, embeddedPolicy, nil
return "", nil
}
// DeleteServiceAccount - delete a service account
@@ -1325,12 +1231,7 @@ func (sys *IAMSys) CreateUser(accessKey string, uinfo madmin.UserInfo) error {
u := newUserIdentity(auth.Credentials{
AccessKey: accessKey,
SecretKey: uinfo.SecretKey,
Status: func() string {
if uinfo.Status == madmin.AccountEnabled {
return auth.AccountOn
}
return auth.AccountOff
}(),
Status: string(uinfo.Status),
})
if err := sys.store.saveUserIdentity(context.Background(), accessKey, regularUser, u); err != nil {
@@ -1729,14 +1630,7 @@ func (sys *IAMSys) policyDBSet(name, policyName string, userType IAMUserType, is
// Handle policy mapping removal
if policyName == "" {
if sys.usersSysType == LDAPUsersSysType {
// Add a fallback removal towards previous content that may come back
// as a ghost user due to lack of delete, this change occurred
// introduced in PR #11840
sys.store.deleteMappedPolicy(context.Background(), name, regularUser, false)
}
err := sys.store.deleteMappedPolicy(context.Background(), name, userType, isGroup)
if err != nil && err != errNoSuchPolicy {
if err := sys.store.deleteMappedPolicy(context.Background(), name, userType, isGroup); err != nil && err != errNoSuchPolicy {
return err
}
if !isGroup {

View File

@@ -130,8 +130,8 @@ func (h *Target) startHTTPLogger() {
resp, err := h.client.Do(req)
cancel()
if err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%w', please check your endpoint configuration",
h.endpoint, err), h.endpoint)
logger.LogIf(ctx, fmt.Errorf("%s returned '%w', please check your endpoint configuration\n",
h.endpoint, err))
continue
}
@@ -141,11 +141,11 @@ func (h *Target) startHTTPLogger() {
if resp.StatusCode != http.StatusOK {
switch resp.StatusCode {
case http.StatusForbidden:
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%s', please check if your auth token is correctly set",
h.endpoint, resp.Status), h.endpoint)
logger.LogIf(ctx, fmt.Errorf("%s returned '%s', please check if your auth token is correctly set",
h.endpoint, resp.Status))
default:
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%s', please check your endpoint configuration",
h.endpoint, resp.Status), h.endpoint)
logger.LogIf(ctx, fmt.Errorf("%s returned '%s', please check your endpoint configuration",
h.endpoint, resp.Status))
}
}
}

View File

@@ -128,7 +128,7 @@ func (e *metaCacheEntry) fileInfo(bucket string) (*FileInfo, error) {
}, nil
}
if e.cached == nil {
fi, err := getFileInfo(e.metadata, bucket, e.name, "", false)
fi, err := getFileInfo(e.metadata, bucket, e.name, "")
if err != nil {
return nil, err
}

View File

@@ -686,14 +686,7 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
// Update block 0 metadata.
var retries int
for {
meta := b.headerKV()
fi := FileInfo{
Metadata: make(map[string]string, len(meta)),
}
for k, v := range meta {
fi.Metadata[k] = v
}
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), fi)
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{})
if err == nil {
break
}
@@ -818,9 +811,6 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
if len(disks) == 0 {
return fmt.Errorf("listPathRaw: 0 drives provided")
}
// Cancel upstream if we finish before we expect.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
askDisks := len(disks)
readers := make([]*metacacheReader, askDisks)
@@ -831,8 +821,6 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
if err != nil {
return err
}
// Make sure we close the pipe so blocked writes doesn't stay around.
defer r.CloseWithError(context.Canceled)
// Send request to each disk.
go func() {
werr := d.WalkDir(ctx, WalkDirOptions{
@@ -844,10 +832,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
ForwardTo: opts.forwardTo,
}, w)
w.CloseWithError(werr)
if werr != io.EOF && werr != nil &&
werr.Error() != errFileNotFound.Error() &&
werr.Error() != errVolumeNotFound.Error() &&
!errors.Is(werr, context.Canceled) {
if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() {
logger.LogIf(ctx, werr)
}
}()

View File

@@ -55,7 +55,6 @@ type WalkDirOptions struct {
// WalkDir will traverse a directory and return all entries found.
// On success a sorted meta cache stream will be returned.
// Metadata has data stripped, if any.
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error {
// Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(opts.Bucket)
@@ -95,7 +94,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// behavior.
out <- metaCacheEntry{
name: opts.BaseDir,
metadata: xlMetaV2TrimData(metadata),
metadata: metadata,
}
} else {
if st, err := os.Lstat(pathJoin(volumeDir, opts.BaseDir, xlStorageFormatFile)); err == nil && st.Mode().IsRegular() {
@@ -108,9 +107,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
forward := opts.ForwardTo
var scanDir func(path string) error
scanDir = func(current string) error {
if contextCanceled(ctx) {
return ctx.Err()
}
entries, err := s.ListDir(ctx, opts.Bucket, current, -1)
if err != nil {
// Folder could have gone away in-between
@@ -146,9 +142,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// Do do not retain the file.
entries[i] = ""
if contextCanceled(ctx) {
return ctx.Err()
}
// If root was an object return it as such.
if HasSuffix(entry, xlStorageFormatFile) {
var meta metaCacheEntry
@@ -157,7 +150,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
logger.LogIf(ctx, err)
continue
}
meta.metadata = xlMetaV2TrimData(meta.metadata)
meta.name = strings.TrimSuffix(entry, xlStorageFormatFile)
meta.name = strings.TrimSuffix(meta.name, SlashSeparator)
meta.name = pathJoin(current, meta.name)
@@ -191,9 +183,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
if entry == "" {
continue
}
if contextCanceled(ctx) {
return ctx.Err()
}
meta := metaCacheEntry{name: PathJoin(current, entry)}
// If directory entry on stack before this, pop it now.

View File

@@ -44,60 +44,53 @@ const (
healMetricNamespace MetricNamespace = "minio_heal"
interNodeMetricNamespace MetricNamespace = "minio_inter_node"
nodeMetricNamespace MetricNamespace = "minio_node"
minioMetricNamespace MetricNamespace = "minio"
minIOMetricNamespace MetricNamespace = "minio"
s3MetricNamespace MetricNamespace = "minio_s3"
)
const (
cacheSubsystem MetricSubsystem = "cache"
capacityRawSubsystem MetricSubsystem = "capacity_raw"
capacityUsableSubsystem MetricSubsystem = "capacity_usable"
diskSubsystem MetricSubsystem = "disk"
fileDescriptorSubsystem MetricSubsystem = "file_descriptor"
goRoutines MetricSubsystem = "go_routine"
ioSubsystem MetricSubsystem = "io"
nodesSubsystem MetricSubsystem = "nodes"
objectsSubsystem MetricSubsystem = "objects"
processSubsystem MetricSubsystem = "process"
replicationSubsystem MetricSubsystem = "replication"
requestsSubsystem MetricSubsystem = "requests"
requestsRejectedSubsystem MetricSubsystem = "requests_rejected"
timeSubsystem MetricSubsystem = "time"
trafficSubsystem MetricSubsystem = "traffic"
softwareSubsystem MetricSubsystem = "software"
sysCallSubsystem MetricSubsystem = "syscall"
usageSubsystem MetricSubsystem = "usage"
cacheSubsystem MetricSubsystem = "cache"
capacityRawSubsystem MetricSubsystem = "capacity_raw"
capacityUsableSubsystem MetricSubsystem = "capacity_usable"
diskSubsystem MetricSubsystem = "disk"
fileDescriptorSubsystem MetricSubsystem = "file_descriptor"
goRoutines MetricSubsystem = "go_routine"
ioSubsystem MetricSubsystem = "io"
nodesSubsystem MetricSubsystem = "nodes"
objectsSubsystem MetricSubsystem = "objects"
processSubsystem MetricSubsystem = "process"
replicationSubsystem MetricSubsystem = "replication"
requestsSubsystem MetricSubsystem = "requests"
timeSubsystem MetricSubsystem = "time"
trafficSubsystem MetricSubsystem = "traffic"
softwareSubsystem MetricSubsystem = "software"
sysCallSubsystem MetricSubsystem = "syscall"
usageSubsystem MetricSubsystem = "usage"
)
// MetricName are the individual names for the metric.
type MetricName string
const (
authTotal MetricName = "auth_total"
canceledTotal MetricName = "canceled_total"
errorsTotal MetricName = "errors_total"
headerTotal MetricName = "header_total"
healTotal MetricName = "heal_total"
hitsTotal MetricName = "hits_total"
inflightTotal MetricName = "inflight_total"
invalidTotal MetricName = "invalid_total"
limitTotal MetricName = "limit_total"
missedTotal MetricName = "missed_total"
waitingTotal MetricName = "waiting_total"
objectTotal MetricName = "object_total"
offlineTotal MetricName = "offline_total"
onlineTotal MetricName = "online_total"
openTotal MetricName = "open_total"
readTotal MetricName = "read_total"
timestampTotal MetricName = "timestamp_total"
writeTotal MetricName = "write_total"
total MetricName = "total"
total MetricName = "total"
errorsTotal MetricName = "error_total"
canceledTotal MetricName = "canceled_total"
healTotal MetricName = "heal_total"
hitsTotal MetricName = "hits_total"
inflightTotal MetricName = "inflight_total"
limitTotal MetricName = "limit_total"
missedTotal MetricName = "missed_total"
waitingTotal MetricName = "waiting_total"
objectTotal MetricName = "object_total"
offlineTotal MetricName = "offline_total"
onlineTotal MetricName = "online_total"
openTotal MetricName = "open_total"
readTotal MetricName = "read_total"
writeTotal MetricName = "write_total"
failedCount MetricName = "failed_count"
failedBytes MetricName = "failed_bytes"
freeBytes MetricName = "free_bytes"
pendingBytes MetricName = "pending_bytes"
pendingCount MetricName = "pending_count"
readBytes MetricName = "read_bytes"
rcharBytes MetricName = "rchar_bytes"
receivedBytes MetricName = "received_bytes"
@@ -358,16 +351,6 @@ func getNodeDiskTotalBytesMD() MetricDescription {
Type: gaugeMetric,
}
}
func getUsageLastScanActivityMD() MetricDescription {
return MetricDescription{
Namespace: minioMetricNamespace,
Subsystem: usageSubsystem,
Name: lastActivityTime,
Help: "Time elapsed (in nano seconds) since last scan activity. This is set to 0 until first scan cycle",
Type: gaugeMetric,
}
}
func getBucketUsageTotalBytesMD() MetricDescription {
return MetricDescription{
Namespace: bucketMetricNamespace,
@@ -422,24 +405,6 @@ func getBucketRepReceivedBytesMD() MetricDescription {
Type: gaugeMetric,
}
}
func getBucketRepPendingOperationsMD() MetricDescription {
return MetricDescription{
Namespace: bucketMetricNamespace,
Subsystem: replicationSubsystem,
Name: pendingCount,
Help: "Total number of objects pending replication",
Type: gaugeMetric,
}
}
func getBucketRepFailedOperationsMD() MetricDescription {
return MetricDescription{
Namespace: bucketMetricNamespace,
Subsystem: replicationSubsystem,
Name: failedCount,
Help: "Total number of objects which failed replication",
Type: gaugeMetric,
}
}
func getBucketObjectDistributionMD() MetricDescription {
return MetricDescription{
Namespace: bucketMetricNamespace,
@@ -540,42 +505,6 @@ func getS3RequestsCanceledMD() MetricDescription {
Type: counterMetric,
}
}
func getS3RejectedAuthRequestsTotalMD() MetricDescription {
return MetricDescription{
Namespace: s3MetricNamespace,
Subsystem: requestsRejectedSubsystem,
Name: authTotal,
Help: "Total number S3 requests rejected for auth failure.",
Type: counterMetric,
}
}
func getS3RejectedHeaderRequestsTotalMD() MetricDescription {
return MetricDescription{
Namespace: s3MetricNamespace,
Subsystem: requestsRejectedSubsystem,
Name: headerTotal,
Help: "Total number S3 requests rejected for invalid header.",
Type: counterMetric,
}
}
func getS3RejectedTimestampRequestsTotalMD() MetricDescription {
return MetricDescription{
Namespace: s3MetricNamespace,
Subsystem: requestsRejectedSubsystem,
Name: timestampTotal,
Help: "Total number S3 requests rejected for invalid timestamp.",
Type: counterMetric,
}
}
func getS3RejectedInvalidRequestsTotalMD() MetricDescription {
return MetricDescription{
Namespace: s3MetricNamespace,
Subsystem: requestsRejectedSubsystem,
Name: invalidTotal,
Help: "Total number S3 invalid requests.",
Type: counterMetric,
}
}
func getCacheHitsTotalMD() MetricDescription {
return MetricDescription{
Namespace: minioNamespace,
@@ -696,7 +625,7 @@ func getNodeOfflineTotalMD() MetricDescription {
}
func getMinIOVersionMD() MetricDescription {
return MetricDescription{
Namespace: minioMetricNamespace,
Namespace: minIOMetricNamespace,
Subsystem: softwareSubsystem,
Name: versionInfo,
Help: "MinIO Release tag for the server",
@@ -705,7 +634,7 @@ func getMinIOVersionMD() MetricDescription {
}
func getMinIOCommitMD() MetricDescription {
return MetricDescription{
Namespace: minioMetricNamespace,
Namespace: minIOMetricNamespace,
Subsystem: softwareSubsystem,
Name: commitInfo,
Help: "Git commit hash for the MinIO release.",
@@ -1026,14 +955,13 @@ func getMinioHealingMetrics() MetricsGroup {
if !exists {
return
}
if bgSeq.lastHealActivity.IsZero() {
return
var dur time.Duration
if !bgSeq.lastHealActivity.IsZero() {
dur = time.Since(bgSeq.lastHealActivity)
}
metrics = append(metrics, Metric{
Description: getHealLastActivityTimeMD(),
Value: float64(time.Since(bgSeq.lastHealActivity)),
Value: float64(dur),
})
metrics = append(metrics, getObjectsScanned(bgSeq)...)
metrics = append(metrics, getScannedItems(bgSeq)...)
@@ -1144,22 +1072,6 @@ func getHTTPMetrics() MetricsGroup {
len(httpStats.CurrentS3Requests.APIStats)+
len(httpStats.TotalS3Requests.APIStats)+
len(httpStats.TotalS3Errors.APIStats))
metrics = append(metrics, Metric{
Description: getS3RejectedAuthRequestsTotalMD(),
Value: float64(httpStats.TotalS3RejectedAuth),
})
metrics = append(metrics, Metric{
Description: getS3RejectedTimestampRequestsTotalMD(),
Value: float64(httpStats.TotalS3RejectedTime),
})
metrics = append(metrics, Metric{
Description: getS3RejectedHeaderRequestsTotalMD(),
Value: float64(httpStats.TotalS3RejectedHeader),
})
metrics = append(metrics, Metric{
Description: getS3RejectedInvalidRequestsTotalMD(),
Value: float64(httpStats.TotalS3RejectedInvalid),
})
metrics = append(metrics, Metric{
Description: getS3RequestsInQueueMD(),
Value: float64(httpStats.S3RequestsInQueue),
@@ -1255,14 +1167,7 @@ func getBucketUsageMetrics() MetricsGroup {
return
}
metrics = append(metrics, Metric{
Description: getUsageLastScanActivityMD(),
Value: float64(time.Since(dataUsageInfo.LastUpdate)),
})
for bucket, usage := range dataUsageInfo.BucketsUsage {
stat := getLatestReplicationStats(bucket, usage)
metrics = append(metrics, Metric{
Description: getBucketUsageTotalBytesMD(),
Value: float64(usage.Size),
@@ -1275,35 +1180,25 @@ func getBucketUsageMetrics() MetricsGroup {
VariableLabels: map[string]string{"bucket": bucket},
})
if stat.hasReplicationUsage() {
if usage.hasReplicationUsage() {
metrics = append(metrics, Metric{
Description: getBucketRepPendingBytesMD(),
Value: float64(stat.PendingSize),
Value: float64(usage.ReplicationPendingSize),
VariableLabels: map[string]string{"bucket": bucket},
})
metrics = append(metrics, Metric{
Description: getBucketRepFailedBytesMD(),
Value: float64(stat.FailedSize),
Value: float64(usage.ReplicationFailedSize),
VariableLabels: map[string]string{"bucket": bucket},
})
metrics = append(metrics, Metric{
Description: getBucketRepSentBytesMD(),
Value: float64(stat.ReplicatedSize),
Value: float64(usage.ReplicatedSize),
VariableLabels: map[string]string{"bucket": bucket},
})
metrics = append(metrics, Metric{
Description: getBucketRepReceivedBytesMD(),
Value: float64(stat.ReplicaSize),
VariableLabels: map[string]string{"bucket": bucket},
})
metrics = append(metrics, Metric{
Description: getBucketRepPendingOperationsMD(),
Value: float64(stat.PendingCount),
VariableLabels: map[string]string{"bucket": bucket},
})
metrics = append(metrics, Metric{
Description: getBucketRepFailedOperationsMD(),
Value: float64(stat.FailedCount),
Value: float64(usage.ReplicaSize),
VariableLabels: map[string]string{"bucket": bucket},
})
}
@@ -1420,6 +1315,13 @@ func getClusterStorageMetrics() MetricsGroup {
}
}
func (b *BucketUsageInfo) hasReplicationUsage() bool {
return b.ReplicationPendingSize > 0 ||
b.ReplicationFailedSize > 0 ||
b.ReplicatedSize > 0 ||
b.ReplicaSize > 0
}
type minioClusterCollector struct {
desc *prometheus.Desc
}
@@ -1504,7 +1406,7 @@ func ReportMetrics(ctx context.Context, generators func() []MetricsGenerator) <-
if m.VariableLabels == nil {
m.VariableLabels = make(map[string]string)
}
m.VariableLabels[serverName] = globalLocalNodeName
m.VariableLabels[serverName] = GetLocalPeer(globalEndpoints)
for {
select {
case ch <- m:
@@ -1551,7 +1453,7 @@ func (c *minioCollectorV2) Collect(ch chan<- prometheus.Metric) {
populateAndPublish(c.generator, func(metric Metric) bool {
labels, values := getOrderedLabelValueArrays(metric.VariableLabels)
values = append(values, globalLocalNodeName)
values = append(values, GetLocalPeer(globalEndpoints))
labels = append(labels, serverName)
if metric.Description.Type == histogramMetric {

View File

@@ -23,7 +23,6 @@ import (
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
@@ -431,67 +430,6 @@ func networkMetricsPrometheus(ch chan<- prometheus.Metric) {
)
}
// get the most current of in-memory replication stats and data usage info from crawler.
func getLatestReplicationStats(bucket string, u madmin.BucketUsageInfo) (s BucketReplicationStats) {
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
replStats := BucketReplicationStats{}
for _, bucketStat := range bucketStats {
replStats.FailedCount += bucketStat.ReplicationStats.FailedCount
replStats.FailedSize += bucketStat.ReplicationStats.FailedSize
replStats.PendingCount += bucketStat.ReplicationStats.PendingCount
replStats.PendingSize += bucketStat.ReplicationStats.PendingSize
replStats.ReplicaSize += bucketStat.ReplicationStats.ReplicaSize
replStats.ReplicatedSize += bucketStat.ReplicationStats.ReplicatedSize
}
usageStat := globalReplicationStats.GetInitialUsage(bucket)
replStats.FailedCount += usageStat.FailedCount
replStats.FailedSize += usageStat.FailedSize
replStats.PendingCount += usageStat.PendingCount
replStats.PendingSize += usageStat.PendingSize
replStats.ReplicaSize += usageStat.ReplicaSize
replStats.ReplicatedSize += usageStat.ReplicatedSize
// use in memory replication stats if it is ahead of usage info.
if replStats.ReplicatedSize >= u.ReplicatedSize {
s.ReplicatedSize = replStats.ReplicatedSize
} else {
s.ReplicatedSize = u.ReplicatedSize
}
if replStats.PendingSize > u.ReplicationPendingSize {
s.PendingSize = replStats.PendingSize
} else {
s.PendingSize = u.ReplicationPendingSize
}
if replStats.FailedSize > u.ReplicationFailedSize {
s.FailedSize = replStats.FailedSize
} else {
s.FailedSize = u.ReplicationFailedSize
}
if replStats.ReplicaSize > u.ReplicaSize {
s.ReplicaSize = replStats.ReplicaSize
} else {
s.ReplicaSize = u.ReplicaSize
}
if replStats.PendingCount > u.ReplicationPendingCount {
s.PendingCount = replStats.PendingCount
} else {
s.PendingCount = u.ReplicationPendingCount
}
if replStats.FailedCount > u.ReplicationFailedCount {
s.FailedCount = replStats.FailedCount
} else {
s.FailedCount = u.ReplicationFailedCount
}
return s
}
// Populates prometheus with bucket usage metrics, this metrics
// is only enabled if scanner is enabled.
func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
@@ -509,13 +447,13 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
if err != nil {
return
}
// data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() {
return
}
for bucket, usageInfo := range dataUsageInfo.BucketsUsage {
stat := getLatestReplicationStats(bucket, usageInfo)
// Total space used by bucket
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
@@ -541,7 +479,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total capacity pending to be replicated",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.PendingSize),
float64(usageInfo.ReplicationPendingSize),
bucket,
)
ch <- prometheus.MustNewConstMetric(
@@ -550,7 +488,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total capacity failed to replicate at least once",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.FailedSize),
float64(usageInfo.ReplicationFailedSize),
bucket,
)
ch <- prometheus.MustNewConstMetric(
@@ -559,7 +497,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total capacity replicated to destination",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.ReplicatedSize),
float64(usageInfo.ReplicatedSize),
bucket,
)
ch <- prometheus.MustNewConstMetric(
@@ -568,25 +506,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total capacity replicated to this instance",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.ReplicaSize),
bucket,
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("bucket", "replication", "pending_count"),
"Total replication operations pending",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.PendingCount),
bucket,
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName("bucket", "replication", "failed_count"),
"Total replication operations failed",
[]string{"bucket"}, nil),
prometheus.GaugeValue,
float64(stat.FailedCount),
float64(usageInfo.ReplicaSize),
bucket,
)
for k, v := range usageInfo.ObjectSizesHistogram {
@@ -618,7 +538,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
}
server := getLocalServerProperty(globalEndpoints, &http.Request{
Host: globalLocalNodeName,
Host: GetLocalPeer(globalEndpoints),
})
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(server.Disks)

View File

@@ -1,5 +1,5 @@
/*
* MinIO Cloud Storage, (C) 2016-2021 MinIO, Inc.
* MinIO Cloud Storage, (C) 2016, 2017, 2018, 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -211,18 +211,16 @@ func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeou
lockSource := getSource(2)
start := UTCNow()
const readLock = false
success := make([]int, len(li.paths))
var success []int
for i, path := range li.paths {
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
timeout.LogFailure()
for si, sint := range success {
if sint == 1 {
li.ns.unlock(li.volume, li.paths[si], readLock)
}
for _, sint := range success {
li.ns.unlock(li.volume, li.paths[sint], readLock)
}
return nil, OperationTimedOut{}
}
success[i] = 1
success = append(success, i)
}
timeout.LogSuccess(UTCNow().Sub(start))
return ctx, nil
@@ -241,18 +239,16 @@ func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeo
lockSource := getSource(2)
start := UTCNow()
const readLock = true
success := make([]int, len(li.paths))
var success []int
for i, path := range li.paths {
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
timeout.LogFailure()
for si, sint := range success {
if sint == 1 {
li.ns.unlock(li.volume, li.paths[si], readLock)
}
for _, sint := range success {
li.ns.unlock(li.volume, li.paths[sint], readLock)
}
return nil, OperationTimedOut{}
}
success[i] = 1
success = append(success, i)
}
timeout.LogSuccess(UTCNow().Sub(start))
return ctx, nil

Some files were not shown because too many files have changed in this diff Show More