Merge branch 'master' into release

This commit is contained in:
Minio Trusted
2016-12-12 10:50:37 -08:00
84 changed files with 2108 additions and 1401 deletions

View File

@@ -13,6 +13,7 @@ env:
- ARCH=i686
script:
- make
- make test GOFLAGS="-race"
- make coverage

View File

@@ -83,8 +83,8 @@ fmt:
lint:
@echo "Running $@:"
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint github.com/minio/minio/cmd...
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint github.com/minio/minio/pkg...
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
@GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
ineffassign:
@echo "Running $@:"

View File

@@ -33,7 +33,7 @@ import (
// Enforces bucket policies for a bucket for a given tatusaction.
func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error APIErrorCode) {
// Verify if bucket actually exists
if err := isBucketExist(bucket, newObjectLayerFn()); err != nil {
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
err = errorCause(err)
switch err.(type) {
case BucketNameInvalid:
@@ -170,11 +170,15 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
}
// ListBuckets does not have any bucket action.
if s3Error := checkRequestAuthType(r, "", "", "us-east-1"); s3Error != ErrNone {
s3Error := checkRequestAuthType(r, "", "", "us-east-1")
if s3Error == ErrInvalidRegion {
// Clients like boto3 send listBuckets() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Invoke the list buckets.
bucketsInfo, err := objectAPI.ListBuckets()
if err != nil {
@@ -330,6 +334,10 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Proceed to creating a bucket.
err := objectAPI.MakeBucket(bucket)
if err != nil {
@@ -370,13 +378,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
bucket := mux.Vars(r)["bucket"]
formValues["Bucket"] = bucket
object := formValues["Key"]
if fileName != "" && strings.Contains(object, "${filename}") {
if fileName != "" && strings.Contains(formValues["Key"], "${filename}") {
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
object = strings.Replace(object, "${filename}", fileName, -1)
formValues["Key"] = strings.Replace(formValues["Key"], "${filename}", fileName, -1)
}
object := formValues["Key"]
// Verify policy signature.
apiErr := doesPolicySignatureMatch(formValues)
@@ -427,6 +435,10 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
sha256sum := ""
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
objInfo, err := objectAPI.PutObject(bucket, object, -1, fileBody, metadata, sha256sum)
if err != nil {
errorIf(err, "Unable to create object.")
@@ -474,6 +486,10 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.RLock()
defer bucketLock.RUnlock()
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@@ -499,6 +515,10 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Attempt to delete bucket.
if err := objectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "Unable to delete a bucket.")

View File

@@ -16,7 +16,10 @@
package cmd
import "encoding/json"
import (
"encoding/json"
"net/rpc"
)
// BucketMetaState - Interface to update bucket metadata in-memory
// state.
@@ -34,6 +37,11 @@ type BucketMetaState interface {
SendEvent(args *EventArgs) error
}
// BucketUpdater - Interface implementer calls one of BucketMetaState's methods.
type BucketUpdater interface {
BucketUpdate(client BucketMetaState) error
}
// Type that implements BucketMetaState for local node.
type localBucketMetaState struct {
ObjectAPI func() ObjectLayer
@@ -104,26 +112,62 @@ type remoteBucketMetaState struct {
// change to remote peer via RPC call.
func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error {
reply := GenericReply{}
return rc.Call("S3.SetBucketNotificationPeer", args, &reply)
err := rc.Call("S3.SetBucketNotificationPeer", args, &reply)
// Check for network error and retry once.
if err != nil && err == rpc.ErrShutdown {
// Close the underlying connection to attempt once more.
rc.Close()
// Attempt again and proceed.
err = rc.Call("S3.SetBucketNotificationPeer", args, &reply)
}
return err
}
// remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to
// remote peer via RPC call.
func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error {
reply := GenericReply{}
return rc.Call("S3.SetBucketListenerPeer", args, &reply)
err := rc.Call("S3.SetBucketListenerPeer", args, &reply)
// Check for network error and retry once.
if err != nil && err == rpc.ErrShutdown {
// Close the underlying connection to attempt once more.
rc.Close()
// Attempt again and proceed.
err = rc.Call("S3.SetBucketListenerPeer", args, &reply)
}
return err
}
// remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote
// peer via RPC call.
func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error {
reply := GenericReply{}
return rc.Call("S3.SetBucketPolicyPeer", args, &reply)
err := rc.Call("S3.SetBucketPolicyPeer", args, &reply)
// Check for network error and retry once.
if err != nil && err == rpc.ErrShutdown {
// Close the underlying connection to attempt once more.
rc.Close()
// Attempt again and proceed.
err = rc.Call("S3.SetBucketPolicyPeer", args, &reply)
}
return err
}
// remoteBucketMetaState.SendEvent - sends event for bucket listener to remote
// peer via RPC call.
func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error {
reply := GenericReply{}
return rc.Call("S3.Event", args, &reply)
err := rc.Call("S3.Event", args, &reply)
// Check for network error and retry once.
if err != nil && err == rpc.ErrShutdown {
// Close the underlying connection to attempt once more.
rc.Close()
// Attempt again and proceed.
err = rc.Call("S3.Event", args, &reply)
}
return err
}

View File

@@ -23,7 +23,6 @@ import (
"fmt"
"io"
"net/http"
"path"
"time"
"github.com/gorilla/mux"
@@ -174,7 +173,7 @@ func PutBucketNotificationConfig(bucket string, ncfg *notificationConfig, objAPI
// Acquire a write lock on bucket before modifying its
// configuration.
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
// Release lock after notifying peers
defer bucketLock.Unlock()
@@ -381,7 +380,7 @@ func AddBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectL
// Acquire a write lock on bucket before modifying its
// configuration.
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
// Release lock after notifying peers
defer bucketLock.Unlock()
@@ -423,7 +422,7 @@ func RemoveBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI Obje
// Acquire a write lock on bucket before modifying its
// configuration.
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
// Release lock after notifying peers
defer bucketLock.Unlock()
@@ -441,14 +440,3 @@ func RemoveBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI Obje
// peers (including local)
S3PeersUpdateBucketListener(bucket, updatedLcfgs)
}
// Removes notification.xml for a given bucket, only used during DeleteBucket.
func removeNotificationConfig(bucket string, objAPI ObjectLayer) error {
// Verify bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
return objAPI.DeleteObject(minioMetaBucket, notificationConfigPath)
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"fmt"
"io"
"io/ioutil"
@@ -166,65 +165,17 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
// Parse bucket policy.
var policy = &bucketPolicy{}
err = parseBucketPolicy(bytes.NewReader(policyBytes), policy)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path)
return
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(bucket, policy); s3Error != ErrNone {
// Parse validate and save bucket policy.
if s3Error := parseAndPersistBucketPolicy(bucket, policyBytes, objAPI); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Save bucket policy.
if err = persistAndNotifyBucketPolicyChange(bucket, policyChange{false, policy}, objAPI); err != nil {
switch err.(type) {
case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
default:
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
}
return
}
// Success.
writeSuccessNoContent(w)
}
// persistAndNotifyBucketPolicyChange - takes a policyChange argument,
// persists it to storage, and notify nodes in the cluster about the
// change. In-memory state is updated in response to the notification.
func persistAndNotifyBucketPolicyChange(bucket string, pCh policyChange, objAPI ObjectLayer) error {
// Acquire a write lock on bucket before modifying its
// configuration.
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock.Lock()
// Release lock after notifying peers
defer bucketLock.Unlock()
if pCh.IsRemove {
if err := removeBucketPolicy(bucket, objAPI); err != nil {
return err
}
} else {
if pCh.BktPolicy == nil {
return errInvalidArgument
}
if err := writeBucketPolicy(bucket, objAPI, pCh.BktPolicy); err != nil {
return err
}
}
// Notify all peers (including self) to update in-memory state
S3PeersUpdateBucketPolicy(bucket, pCh)
return nil
}
// DeleteBucketPolicyHandler - DELETE Bucket policy
// -----------------
// This implementation of the DELETE operation uses the policy

View File

@@ -144,6 +144,12 @@ func getOldBucketsConfigPath() (string, error) {
// if bucket policy is not found.
func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader io.Reader, err error) {
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
// Acquire a read lock on policy config before reading.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath)
objLock.RLock()
defer objLock.RUnlock()
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, policyPath)
if err != nil {
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
@@ -188,6 +194,10 @@ func readBucketPolicy(bucket string, objAPI ObjectLayer) (*bucketPolicy, error)
// if no policies are found.
func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
// Acquire a write lock on policy config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath)
objLock.Lock()
defer objLock.Unlock()
if err := objAPI.DeleteObject(minioMetaBucket, policyPath); err != nil {
errorIf(err, "Unable to remove bucket-policy on bucket %s.", bucket)
err = errorCause(err)
@@ -207,9 +217,70 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy *bucketPolicy) err
return err
}
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
// Acquire a write lock on policy config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath)
objLock.Lock()
defer objLock.Unlock()
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, int64(len(buf)), bytes.NewReader(buf), nil, ""); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err)
}
return nil
}
func parseAndPersistBucketPolicy(bucket string, policyBytes []byte, objAPI ObjectLayer) APIErrorCode {
// Parse bucket policy.
var policy = &bucketPolicy{}
err := parseBucketPolicy(bytes.NewReader(policyBytes), policy)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
return ErrInvalidPolicyDocument
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(bucket, policy); s3Error != ErrNone {
return s3Error
}
// Acquire a write lock on bucket before modifying its configuration.
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
// Release lock after notifying peers
defer bucketLock.Unlock()
// Save bucket policy.
if err = persistAndNotifyBucketPolicyChange(bucket, policyChange{false, policy}, objAPI); err != nil {
switch err.(type) {
case BucketNameInvalid:
return ErrInvalidBucketName
case BucketNotFound:
return ErrNoSuchBucket
default:
errorIf(err, "Unable to save bucket policy.")
return ErrInternalError
}
}
return ErrNone
}
// persistAndNotifyBucketPolicyChange - takes a policyChange argument,
// persists it to storage, and notify nodes in the cluster about the
// change. In-memory state is updated in response to the notification.
func persistAndNotifyBucketPolicyChange(bucket string, pCh policyChange, objAPI ObjectLayer) error {
if pCh.IsRemove {
if err := removeBucketPolicy(bucket, objAPI); err != nil {
return err
}
} else {
if pCh.BktPolicy == nil {
return errInvalidArgument
}
if err := writeBucketPolicy(bucket, objAPI, pCh.BktPolicy); err != nil {
return err
}
}
// Notify all peers (including self) to update in-memory state
S3PeersUpdateBucketPolicy(bucket, pCh)
return nil
}

View File

@@ -23,8 +23,11 @@ import (
"github.com/minio/minio/pkg/quick"
)
// Read Write mutex for safe access to ServerConfig.
var serverConfigMu sync.RWMutex
// serverConfigV10 server configuration version '10' which is like version '9'
// except it drops support of syslog config
// except it drops support of syslog config.
type serverConfigV10 struct {
Version string `json:"version"`
@@ -37,9 +40,6 @@ type serverConfigV10 struct {
// Notification queue configuration.
Notify notifier `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
}
// initConfig - initialize server config and indicate if we are creating a new file or we are just loading
@@ -68,16 +68,18 @@ func initConfig() (bool, error) {
srvCfg.Notify.NATS["1"] = natsNotify{}
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
srvCfg.rwMutex = &sync.RWMutex{}
// Create config path.
err := createConfigPath()
if err != nil {
return false, err
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
serverConfigMu.Lock()
serverConfig = srvCfg
serverConfigMu.Unlock()
// Save config into file.
return true, serverConfig.Save()
@@ -91,7 +93,6 @@ func initConfig() (bool, error) {
}
srvCfg := &serverConfigV10{}
srvCfg.Version = globalMinioConfigVersion
srvCfg.rwMutex = &sync.RWMutex{}
qc, err := quick.New(srvCfg)
if err != nil {
return false, err
@@ -99,8 +100,12 @@ func initConfig() (bool, error) {
if err := qc.Load(configFile); err != nil {
return false, err
}
// hold the mutex lock before a new config is assigned.
serverConfigMu.Lock()
// Save the loaded config globally.
serverConfig = srvCfg
serverConfigMu.Unlock()
// Set the version properly after the unmarshalled json is loaded.
serverConfig.Version = globalMinioConfigVersion
@@ -112,168 +117,191 @@ var serverConfig *serverConfigV10
// GetVersion get current config version.
func (s serverConfigV10) GetVersion() string {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Version
}
/// Logger related.
func (s *serverConfigV10) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Notify.AMQP[accountID] = amqpn
}
func (s serverConfigV10) GetAMQP() map[string]amqpNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.AMQP
}
// GetAMQPNotify get current AMQP logger.
func (s serverConfigV10) GetAMQPNotifyByID(accountID string) amqpNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.AMQP[accountID]
}
//
func (s *serverConfigV10) SetNATSNotifyByID(accountID string, natsn natsNotify) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Notify.NATS[accountID] = natsn
}
func (s serverConfigV10) GetNATS() map[string]natsNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.NATS
}
// GetNATSNotify get current NATS logger.
func (s serverConfigV10) GetNATSNotifyByID(accountID string) natsNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.NATS[accountID]
}
func (s *serverConfigV10) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Notify.ElasticSearch[accountID] = esNotify
}
func (s serverConfigV10) GetElasticSearch() map[string]elasticSearchNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.ElasticSearch
}
// GetElasticSearchNotify get current ElasicSearch logger.
func (s serverConfigV10) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.ElasticSearch[accountID]
}
func (s *serverConfigV10) SetRedisNotifyByID(accountID string, rNotify redisNotify) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Notify.Redis[accountID] = rNotify
}
func (s serverConfigV10) GetRedis() map[string]redisNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.Redis
}
// GetRedisNotify get current Redis logger.
func (s serverConfigV10) GetRedisNotifyByID(accountID string) redisNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.Redis[accountID]
}
func (s *serverConfigV10) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Notify.PostgreSQL[accountID] = pgn
}
func (s serverConfigV10) GetPostgreSQL() map[string]postgreSQLNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.PostgreSQL
}
func (s serverConfigV10) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Notify.PostgreSQL[accountID]
}
// SetFileLogger set new file logger.
func (s *serverConfigV10) SetFileLogger(flogger fileLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Logger.File = flogger
}
// GetFileLogger get current file logger.
func (s serverConfigV10) GetFileLogger() fileLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Logger.File
}
// SetConsoleLogger set new console logger.
func (s *serverConfigV10) SetConsoleLogger(clogger consoleLogger) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Logger.Console = clogger
}
// GetConsoleLogger get current console logger.
func (s serverConfigV10) GetConsoleLogger() consoleLogger {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Logger.Console
}
// SetRegion set new region.
func (s *serverConfigV10) SetRegion(region string) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Region = region
}
// GetRegion get current region.
func (s serverConfigV10) GetRegion() string {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Region
}
// SetCredentials set new credentials.
func (s *serverConfigV10) SetCredential(creds credential) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
serverConfigMu.Lock()
defer serverConfigMu.Unlock()
s.Credential = creds
}
// GetCredentials get current credentials.
func (s serverConfigV10) GetCredential() credential {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
return s.Credential
}
// Save config.
func (s serverConfigV10) Save() error {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
serverConfigMu.RLock()
defer serverConfigMu.RUnlock()
// get config file.
configFile, err := getConfigFile()

View File

@@ -29,7 +29,7 @@ import (
// all the disks, writes also calculate individual block's checksum
// for future bit-rot protection.
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, blockSize int64, dataBlocks int, parityBlocks int, algo string, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
// Allocated blockSized buffer for reading.
// Allocated blockSized buffer for reading from incoming stream.
buf := make([]byte, blockSize)
hashWriters := newHashWriters(len(disks), algo)

View File

@@ -21,6 +21,7 @@ import (
"errors"
"hash"
"io"
"sync"
"github.com/klauspost/reedsolomon"
"github.com/minio/blake2b-simd"
@@ -47,13 +48,23 @@ func newHash(algo string) hash.Hash {
}
}
// Hash buffer pool is a pool of reusable
// buffers used while checksumming a stream.
var hashBufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, readSizeV1)
return &b
},
}
// hashSum calculates the hash of the entire path and returns.
func hashSum(disk StorageAPI, volume, path string, writer hash.Hash) ([]byte, error) {
// Allocate staging buffer of 128KiB for copyBuffer.
buf := make([]byte, readSizeV1)
// Fetch staging a new staging buffer from the pool.
bufp := hashBufferPool.Get().(*[]byte)
defer hashBufferPool.Put(bufp)
// Copy entire buffer to writer.
if err := copyBuffer(writer, disk, volume, path, buf); err != nil {
if err := copyBuffer(writer, disk, volume, path, *bufp); err != nil {
return nil, err
}

View File

@@ -301,8 +301,14 @@ func eventNotify(event eventData) {
// structured notification config.
func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationConfig, error) {
// Construct the notification config path.
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath)
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath)
objLock.RLock()
defer objLock.RUnlock()
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, ncPath)
if err != nil {
// 'notification.xml' not found return
// 'errNoSuchNotifications'. This is default when no
@@ -315,7 +321,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
return nil, err
}
var buffer bytes.Buffer
err = objAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, &buffer)
err = objAPI.GetObject(minioMetaBucket, ncPath, 0, objInfo.Size, &buffer)
if err != nil {
// 'notification.xml' not found return
// 'errNoSuchNotifications'. This is default when no
@@ -350,8 +356,14 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
}
// Construct the notification config path.
listenerConfigPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, listenerConfigPath)
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, lcPath)
objLock.RLock()
defer objLock.RUnlock()
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, lcPath)
if err != nil {
// 'listener.json' not found return
// 'errNoSuchNotifications'. This is default when no
@@ -364,7 +376,7 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
return nil, err
}
var buffer bytes.Buffer
err = objAPI.GetObject(minioMetaBucket, listenerConfigPath, 0, objInfo.Size, &buffer)
err = objAPI.GetObject(minioMetaBucket, lcPath, 0, objInfo.Size, &buffer)
if err != nil {
// 'notification.xml' not found return
// 'errNoSuchNotifications'. This is default when no
@@ -399,6 +411,11 @@ func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj Obje
// build path
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath)
objLock.Lock()
defer objLock.Unlock()
// write object to path
sha256Sum := getSHA256Hash(buf)
_, err = obj.PutObject(minioMetaBucket, ncPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
@@ -419,6 +436,11 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer
// build path
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, lcPath)
objLock.Lock()
defer objLock.Unlock()
// write object to path
sha256Sum := getSHA256Hash(buf)
_, err = obj.PutObject(minioMetaBucket, lcPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
@@ -428,12 +450,34 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer
return err
}
// Removes notification.xml for a given bucket, only used during DeleteBucket.
func removeNotificationConfig(bucket string, objAPI ObjectLayer) error {
// Verify bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath)
objLock.Lock()
err := objAPI.DeleteObject(minioMetaBucket, ncPath)
objLock.Unlock()
return err
}
// Remove listener configuration from storage layer. Used when a bucket is deleted.
func removeListenerConfig(bucket string, objAPI ObjectLayer) error {
// make the path
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
// remove it
return objAPI.DeleteObject(minioMetaBucket, lcPath)
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, lcPath)
objLock.Lock()
err := objAPI.DeleteObject(minioMetaBucket, lcPath)
objLock.Unlock()
return err
}
// Loads both notification and listener config.

View File

@@ -206,23 +206,24 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
return formatConfigs, sErrs
}
// genericFormatCheck - validates and returns error.
// genericFormatCheckFS - validates format config and returns an error if any.
func genericFormatCheckFS(formatConfig *formatConfigV1, sErr error) (err error) {
if sErr != nil {
return sErr
}
// Successfully read, validate if FS.
if !isFSFormat(formatConfig) {
return errFSDiskFormat
}
return nil
}
// genericFormatCheckXL - validates and returns error.
// if (no quorum) return error
// if (any disk is corrupt) return error // phase2
// if (jbod inconsistent) return error // phase2
// if (disks not recognized) // Always error.
func genericFormatCheck(formatConfigs []*formatConfigV1, sErrs []error) (err error) {
if len(formatConfigs) == 1 {
// Successfully read, validate further.
if sErrs[0] == nil {
if !isFSFormat(formatConfigs[0]) {
return errFSDiskFormat
}
return nil
} // Returns error here.
return sErrs[0]
}
func genericFormatCheckXL(formatConfigs []*formatConfigV1, sErrs []error) (err error) {
// Calculate the errors.
var (
errCorruptFormatCount = 0
@@ -248,12 +249,12 @@ func genericFormatCheck(formatConfigs []*formatConfigV1, sErrs []error) (err err
// Calculate read quorum.
readQuorum := len(formatConfigs) / 2
// Validate the err count under tolerant limit.
// Validate the err count under read quorum.
if errCount > len(formatConfigs)-readQuorum {
return errXLReadQuorum
}
// Check if number of corrupted format under quorum
// Check if number of corrupted format under read quorum
if errCorruptFormatCount > len(formatConfigs)-readQuorum {
return errCorruptedFormat
}
@@ -793,8 +794,7 @@ func loadFormatXL(bootstrapDisks []StorageAPI, readQuorum int) (disks []StorageA
return reorderDisks(bootstrapDisks, formatConfigs)
}
// checkFormatXL - verifies if format.json format is intact.
func checkFormatXL(formatConfigs []*formatConfigV1) error {
func checkFormatXLValues(formatConfigs []*formatConfigV1) error {
for _, formatXL := range formatConfigs {
if formatXL == nil {
continue
@@ -813,6 +813,14 @@ func checkFormatXL(formatConfigs []*formatConfigV1) error {
return fmt.Errorf("Number of disks %d did not match the backend format %d", len(formatConfigs), len(formatXL.XL.JBOD))
}
}
return nil
}
// checkFormatXL - verifies if format.json format is intact.
func checkFormatXL(formatConfigs []*formatConfigV1) error {
if err := checkFormatXLValues(formatConfigs); err != nil {
return err
}
if err := checkJBODConsistency(formatConfigs); err != nil {
return err
}

View File

@@ -664,36 +664,58 @@ func TestReduceFormatErrs(t *testing.T) {
}
}
// Tests for genericFormatCheck()
func TestGenericFormatCheck(t *testing.T) {
// Tests for genericFormatCheckFS()
func TestGenericFormatCheckFS(t *testing.T) {
// Generate format configs for XL.
formatConfigs := genFormatXLInvalidJBOD()
// Validate disk format is fs, should fail.
if err := genericFormatCheckFS(formatConfigs[0], nil); err != errFSDiskFormat {
t.Fatalf("Unexpected error, expected %s, got %s", errFSDiskFormat, err)
}
// Validate disk is unformatted, should fail.
if err := genericFormatCheckFS(nil, errUnformattedDisk); err != errUnformattedDisk {
t.Fatalf("Unexpected error, expected %s, got %s", errUnformattedDisk, err)
}
// Validate when disk is in FS format.
format := newFSFormatV1()
if err := genericFormatCheckFS(format, nil); err != nil {
t.Fatalf("Unexpected error should pass, failed with %s", err)
}
}
// Tests for genericFormatCheckXL()
func TestGenericFormatCheckXL(t *testing.T) {
var errs []error
formatConfigs := genFormatXLInvalidJBOD()
// Some disks has corrupted formats, one faulty disk
errs = []error{nil, nil, errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, errCorruptedFormat,
errCorruptedFormat, errFaultyDisk}
if err := genericFormatCheck(formatConfigs, errs); err != errCorruptedFormat {
if err := genericFormatCheckXL(formatConfigs, errs); err != errCorruptedFormat {
t.Fatal("Got unexpected err: ", err)
}
// Many faulty disks
errs = []error{nil, nil, errFaultyDisk, errFaultyDisk, errFaultyDisk, errFaultyDisk,
errCorruptedFormat, errFaultyDisk}
if err := genericFormatCheck(formatConfigs, errs); err != errXLReadQuorum {
if err := genericFormatCheckXL(formatConfigs, errs); err != errXLReadQuorum {
t.Fatal("Got unexpected err: ", err)
}
// All formats successfully loaded
errs = []error{nil, nil, nil, nil, nil, nil, nil, nil}
if err := genericFormatCheck(formatConfigs, errs); err == nil {
if err := genericFormatCheckXL(formatConfigs, errs); err == nil {
t.Fatalf("Should fail here")
}
errs = []error{nil}
if err := genericFormatCheck([]*formatConfigV1{genFormatFS()}, errs); err != nil {
t.Fatal("Got unexpected err: ", err)
if err := genericFormatCheckXL([]*formatConfigV1{genFormatFS()}, errs); err == nil {
t.Fatalf("Should fail here")
}
errs = []error{errFaultyDisk}
if err := genericFormatCheck([]*formatConfigV1{genFormatFS()}, errs); err == nil {
if err := genericFormatCheckXL([]*formatConfigV1{genFormatFS()}, errs); err == nil {
t.Fatalf("Should fail here")
}
}

View File

@@ -27,11 +27,9 @@ func fsCreateFile(disk StorageAPI, reader io.Reader, buf []byte, tmpBucket, temp
return 0, traceError(rErr)
}
bytesWritten += int64(n)
if n > 0 {
wErr := disk.AppendFile(tmpBucket, tempObj, buf[0:n])
if wErr != nil {
return 0, traceError(wErr)
}
wErr := disk.AppendFile(tmpBucket, tempObj, buf[0:n])
if wErr != nil {
return 0, traceError(wErr)
}
if rErr == io.EOF {
break

View File

@@ -91,9 +91,9 @@ func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID stri
// Called on complete-multipart-upload. Returns nil if the required parts have been appended.
func (b *backgroundAppend) complete(disk StorageAPI, bucket, object, uploadID string, meta fsMetaV1) error {
b.Lock()
defer b.Unlock()
info, ok := b.infoMap[uploadID]
delete(b.infoMap, uploadID)
b.Unlock()
if !ok {
return errPartsMissing
}
@@ -121,13 +121,15 @@ func (b *backgroundAppend) abort(uploadID string) {
return
}
delete(b.infoMap, uploadID)
close(info.abortCh)
info.abortCh <- struct{}{}
}
// This is run as a go-routine that appends the parts in the background.
func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID string, info bgAppendPartsInfo) {
// Holds the list of parts that is already appended to the "append" file.
appendMeta := fsMetaV1{}
// Allocate staging read buffer.
buf := make([]byte, readSizeV1)
for {
select {
case input := <-info.inputCh:
@@ -150,7 +152,7 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
}
break
}
if err := appendPart(disk, bucket, object, uploadID, part); err != nil {
if err := appendPart(disk, bucket, object, uploadID, part, buf); err != nil {
disk.DeleteFile(minioMetaTmpBucket, uploadID)
appendMeta.Parts = nil
input.errCh <- err
@@ -161,9 +163,11 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
case <-info.abortCh:
// abort-multipart-upload closed abortCh to end the appendParts go-routine.
disk.DeleteFile(minioMetaTmpBucket, uploadID)
close(info.timeoutCh) // So that any racing PutObjectPart does not leave a dangling go-routine.
return
case <-info.completeCh:
// complete-multipart-upload closed completeCh to end the appendParts go-routine.
close(info.timeoutCh) // So that any racing PutObjectPart does not leave a dangling go-routine.
return
case <-time.After(appendPartsTimeout):
// Timeout the goroutine to garbage collect its resources. This would happen if the client initiates
@@ -182,12 +186,11 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
// Appends the "part" to the append-file inside "tmp/" that finally gets moved to the actual location
// upon complete-multipart-upload.
func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPartInfo) error {
func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPartInfo, buf []byte) error {
partPath := pathJoin(bucket, object, uploadID, part.Name)
offset := int64(0)
totalLeft := part.Size
buf := make([]byte, readSizeV1)
for totalLeft > 0 {
curLeft := int64(readSizeV1)
if totalLeft < readSizeV1 {

View File

@@ -27,20 +27,6 @@ func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
return err == nil
}
// Checks whether bucket exists.
func (fs fsObjects) isBucketExist(bucket string) bool {
// Check whether bucket exists.
_, err := fs.storage.StatVol(bucket)
if err != nil {
if err == errVolumeNotFound {
return false
}
errorIf(err, "Stat failed on bucket "+bucket+".")
return false
}
return true
}
// isUploadIDExists - verify if a given uploadID exists and is valid.
func (fs fsObjects) isUploadIDExists(bucket, object, uploadID string) bool {
uploadIDPath := path.Join(bucket, object, uploadID)

View File

@@ -23,39 +23,6 @@ import (
"time"
)
// TestFSIsBucketExist - complete test of isBucketExist
func TestFSIsBucketExist(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj := initFSObjects(disk, t)
fs := obj.(fsObjects)
bucketName := "bucket"
if err := obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
// Test with a valid bucket
if found := fs.isBucketExist(bucketName); !found {
t.Fatal("isBucketExist should true")
}
// Test with a inexistant bucket
if found := fs.isBucketExist("foo"); found {
t.Fatal("isBucketExist should false")
}
// Using a faulty disk
fsStorage := fs.storage.(*retryStorage)
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
fs.storage = naughty
if found := fs.isBucketExist(bucketName); found {
t.Fatal("isBucketExist should return false because it is wired to a corrupted disk")
}
}
// TestFSIsUploadExists - complete test with valid and invalid cases
func TestFSIsUploadExists(t *testing.T) {
// Prepare for testing

View File

@@ -26,8 +26,6 @@ import (
"path"
"strings"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
)
// listMultipartUploads - lists all multipart uploads.
@@ -59,7 +57,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
var err error
var eof bool
if uploadIDMarker != "" {
keyMarkerLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
keyMarkerLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, keyMarker))
keyMarkerLock.RLock()
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, fs.storage)
@@ -114,7 +112,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
var end bool
uploadIDMarker = ""
entryLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
entryLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, entry))
entryLock.RLock()
tmpUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, fs.storage)
@@ -172,45 +170,8 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
// Validate input arguments.
if !IsValidBucketName(bucket) {
return ListMultipartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
if !fs.isBucketExist(bucket) {
return ListMultipartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListMultipartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListMultipartsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
return ListMultipartsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: keyMarker,
Prefix: prefix,
})
}
if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) {
return ListMultipartsInfo{}, traceError(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return ListMultipartsInfo{}, traceError(err)
}
if id.IsZero() {
return ListMultipartsInfo{}, traceError(MalformedUploadID{
UploadID: uploadIDMarker,
})
}
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return ListMultipartsInfo{}, err
}
return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
}
@@ -231,7 +192,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
// This lock needs to be held for any changes to the directory
// contents of ".minio.sys/multipart/object/"
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object))
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
@@ -256,17 +217,8 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
//
// Implements S3 compatible initiate multipart API.
func (fs fsObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
// Verify if bucket name is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !fs.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
// Verify if object name is valid.
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkNewMultipartArgs(bucket, object, fs); err != nil {
return "", err
}
return fs.newMultipartUpload(bucket, object, meta)
}
@@ -290,21 +242,13 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo,
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !fs.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return "", err
}
uploadIDPath := path.Join(bucket, object, uploadID)
preUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
preUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
preUploadIDLock.RLock()
// Just check if the uploadID exists to avoid copy if it doesn't.
uploadIDExists := fs.isUploadIDExists(bucket, object, uploadID)
@@ -357,6 +301,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
fs.storage.DeleteFile(minioMetaTmpBucket, tmpPartPath)
return "", toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
@@ -384,7 +329,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
// Hold write lock as we are updating fs.json
postUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
postUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
postUploadIDLock.Lock()
defer postUploadIDLock.Unlock()
@@ -403,7 +348,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
partPath := path.Join(bucket, object, uploadID, partSuffix)
// Lock the part so that another part upload with same part-number gets blocked
// while the part is getting appended in the background.
partLock := nsMutex.NewNSLock(minioMetaMultipartBucket, partPath)
partLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, partPath)
partLock.Lock()
err = fs.storage.RenameFile(minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath)
if err != nil {
@@ -416,9 +361,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Append the part in background.
errCh := fs.bgAppend.append(fs.storage, bucket, object, uploadID, fsMeta)
go func() {
// Append the part in background.
errCh := fs.bgAppend.append(fs.storage, bucket, object, uploadID, fsMeta)
// Also receive the error so that the appendParts go-routine does not block on send.
// But the error received is ignored as fs.PutObjectPart() would have already
// returned success to the client.
@@ -488,21 +433,13 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListPartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !fs.isBucketExist(bucket) {
return ListPartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return ListPartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkListPartsArgs(bucket, object, fs); err != nil {
return ListPartsInfo{}, err
}
// Hold lock so that there is no competing
// abort-multipart-upload or complete-multipart-upload.
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
uploadIDLock.Lock()
defer uploadIDLock.Unlock()
@@ -532,19 +469,8 @@ func (fs fsObjects) totalObjectSize(fsMeta fsMetaV1, parts []completePart) (int6
//
// Implements S3 compatible Complete multipart API.
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !fs.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil {
return "", err
}
uploadIDPath := path.Join(bucket, object, uploadID)
@@ -553,7 +479,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// 1) no one aborts this multipart upload
// 2) no one does a parallel complete-multipart-upload on this
// multipart upload
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
uploadIDLock.Lock()
defer uploadIDLock.Unlock()
@@ -574,6 +500,8 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
}
// This lock is held during rename of the appended tmp file to the actual
// location so that any competing GetObject/PutObject/DeleteObject do not race.
appendFallback := true // In case background-append did not append the required parts.
if isPartsSame(fsMeta.Parts, parts) {
err = fs.bgAppend.complete(fs.storage, bucket, object, uploadID, fsMeta)
@@ -686,7 +614,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Hold the lock so that two parallel
// complete-multipart-uploads do not leave a stale
// uploads.json behind.
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object))
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
@@ -705,11 +633,12 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// the directory at '.minio.sys/multipart/bucket/object/uploadID' holding
// all the upload parts.
func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error {
// Signal appendParts routine to stop waiting for new parts to arrive.
fs.bgAppend.abort(uploadID)
// Cleanup all uploaded parts.
if err := cleanupUploadedParts(bucket, object, uploadID, fs.storage); err != nil {
return err
}
fs.bgAppend.abort(uploadID)
// remove entry from uploads.json with quorum
if err := fs.removeUploadID(bucket, object, uploadID); err != nil {
return toObjectErr(err, bucket, object)
@@ -732,20 +661,13 @@ func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error
// no affect and further requests to the same uploadID would not be
// honored.
func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
if !fs.isBucketExist(bucket) {
return traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkAbortMultipartArgs(bucket, object, fs); err != nil {
return err
}
// Hold lock so that there is no competing
// complete-multipart-upload or put-object-part.
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
uploadIDLock.Lock()
defer uploadIDLock.Unlock()

View File

@@ -59,6 +59,12 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
// TestPutObjectPartFaultyDisk - test PutObjectPart with faulty disks
func TestPutObjectPartFaultyDisk(t *testing.T) {
root, err := newTestConfig("us-east-1")
if err != nil {
t.Fatal(err)
}
defer removeAll(root)
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
@@ -69,7 +75,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
data := []byte("12345")
dataLen := int64(len(data))
if err := obj.MakeBucket(bucketName); err != nil {
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
@@ -97,7 +103,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
t.Fatal("Unexpected error ", err)
}
case 3:
case 2, 4, 5:
case 2, 4, 5, 6:
if !isSameType(errorCause(err), InvalidUploadID{}) {
t.Fatal("Unexpected error ", err)
}

View File

@@ -81,12 +81,20 @@ func newFSObjects(storage StorageAPI) (ObjectLayer, error) {
// Should be called when process shuts down.
func (fs fsObjects) Shutdown() error {
// List if there are any multipart entries.
_, err := fs.storage.ListDir(minioMetaBucket, mpartMetaPrefix)
if err != errFileNotFound {
// A nil err means that multipart directory is not empty hence do not remove '.minio.sys' volume.
prefix := ""
entries, err := fs.storage.ListDir(minioMetaMultipartBucket, prefix)
if err != nil {
// A non nil err means that an unexpected error occurred
return toObjectErr(traceError(err))
}
if len(entries) > 0 {
// Should not remove .minio.sys if there are any multipart
// uploads were found.
return nil
}
if err = fs.storage.DeleteVol(minioMetaMultipartBucket); err != nil {
return toObjectErr(traceError(err))
}
// List if there are any bucket configuration entries.
_, err = fs.storage.ListDir(minioMetaBucket, bucketConfigPrefix)
if err != errFileNotFound {
@@ -94,11 +102,18 @@ func (fs fsObjects) Shutdown() error {
// A non nil err means that an unexpected error occurred
return toObjectErr(traceError(err))
}
// Cleanup everything else.
prefix := ""
if err = cleanupDir(fs.storage, minioMetaBucket, prefix); err != nil {
// Cleanup and delete tmp bucket.
if err = cleanupDir(fs.storage, minioMetaTmpBucket, prefix); err != nil {
return err
}
if err = fs.storage.DeleteVol(minioMetaTmpBucket); err != nil {
return toObjectErr(traceError(err))
}
// Remove format.json and delete .minio.sys bucket
if err = fs.storage.DeleteFile(minioMetaBucket, fsFormatJSONFile); err != nil {
return toObjectErr(traceError(err))
}
if err = fs.storage.DeleteVol(minioMetaBucket); err != nil {
if err != errVolumeNotEmpty {
return toObjectErr(traceError(err))
@@ -204,13 +219,8 @@ func (fs fsObjects) DeleteBucket(bucket string) error {
// GetObject - get an object.
func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, writer io.Writer) (err error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err = checkGetObjArgs(bucket, object); err != nil {
return err
}
// Offset and length cannot be negative.
if offset < 0 || length < 0 {
@@ -236,11 +246,6 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
return traceError(InvalidRange{offset, length, fi.Size})
}
// Lock the object before reading.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
var totalLeft = length
bufSize := int64(readSizeV1)
if length > 0 && bufSize > length {
@@ -315,8 +320,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
}
}
// Guess content-type from the extension if possible.
return ObjectInfo{
objInfo := ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: fi.ModTime,
@@ -325,34 +329,29 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
MD5Sum: fsMeta.Meta["md5Sum"],
ContentType: fsMeta.Meta["content-type"],
ContentEncoding: fsMeta.Meta["content-encoding"],
UserDefined: fsMeta.Meta,
}, nil
}
// md5Sum has already been extracted into objInfo.MD5Sum. We
// need to remove it from fsMeta.Meta to avoid it from appearing as
// part of response headers. e.g, X-Minio-* or X-Amz-*.
delete(fsMeta.Meta, "md5Sum")
objInfo.UserDefined = fsMeta.Meta
return objInfo, nil
}
// GetObjectInfo - get object info.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return ObjectInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
}
return fs.getObjectInfo(bucket, object)
}
// PutObject - create an object.
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return ObjectInfo{}, traceError(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
return ObjectInfo{}, err
}
// No metadata is set, allocate a new one.
if metadata == nil {
@@ -388,45 +387,37 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
limitDataReader = data
}
if size == 0 {
// For size 0 we write a 0byte file.
err = fs.storage.AppendFile(minioMetaTmpBucket, tempObj, []byte(""))
// Prepare file to avoid disk fragmentation
if size > 0 {
err = fs.storage.PrepareFile(minioMetaTmpBucket, tempObj, size)
if err != nil {
fs.storage.DeleteFile(minioMetaTmpBucket, tempObj)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
}
} else {
// Prepare file to avoid disk fragmentation
if size > 0 {
err = fs.storage.PrepareFile(minioMetaTmpBucket, tempObj, size)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
}
// Allocate a buffer to Read() from request body
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
teeReader := io.TeeReader(limitDataReader, multiWriter)
var bytesWritten int64
bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaTmpBucket, tempObj)
if err != nil {
fs.storage.DeleteFile(minioMetaTmpBucket, tempObj)
errorIf(err, "Failed to create object %s/%s", bucket, object)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
fs.storage.DeleteFile(minioMetaTmpBucket, tempObj)
return ObjectInfo{}, traceError(IncompleteBody{})
}
}
// Allocate a buffer to Read() from request body
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
teeReader := io.TeeReader(limitDataReader, multiWriter)
var bytesWritten int64
bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaTmpBucket, tempObj)
if err != nil {
fs.storage.DeleteFile(minioMetaTmpBucket, tempObj)
errorIf(err, "Failed to create object %s/%s", bucket, object)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
fs.storage.DeleteFile(minioMetaTmpBucket, tempObj)
return ObjectInfo{}, traceError(IncompleteBody{})
}
// Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be
// nothing to delete.
@@ -454,56 +445,44 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
}
}
// Lock the object before committing the object.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
err = fs.storage.RenameFile(minioMetaTmpBucket, tempObj, bucket, object)
if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
}
// Save additional metadata. Initialize `fs.json` values.
fsMeta := newFSMetaV1()
fsMeta.Meta = metadata
if bucket != minioMetaBucket {
// Save objects' metadata in `fs.json`.
// Skip creating fs.json if bucket is .minio.sys as the object would have been created
// by minio's S3 layer (ex. policy.json)
fsMeta := newFSMetaV1()
fsMeta.Meta = metadata
fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
}
}
objInfo, err = fs.getObjectInfo(bucket, object)
if err == nil {
// If MINIO_ENABLE_FSMETA is not enabled objInfo.MD5Sum will be empty.
objInfo.MD5Sum = newMD5Hex
}
return objInfo, err
return fs.getObjectInfo(bucket, object)
}
// DeleteObject - deletes an object from a bucket, this operation is destructive
// and there are no rollbacks supported.
func (fs fsObjects) DeleteObject(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkDelObjArgs(bucket, object); err != nil {
return err
}
// Lock the object before deleting so that an in progress GetObject does not return
// corrupt data or there is no race with a PutObject.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
err := fs.storage.DeleteFile(minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile))
if err != nil && err != errFileNotFound {
return toObjectErr(traceError(err), bucket, object)
if bucket != minioMetaBucket {
// We don't store fs.json for minio-S3-layer created files like policy.json,
// hence we don't try to delete fs.json for such files.
err := fs.storage.DeleteFile(minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile))
if err != nil && err != errFileNotFound {
return toObjectErr(traceError(err), bucket, object)
}
}
if err = fs.storage.DeleteFile(bucket, object); err != nil {
if err := fs.storage.DeleteFile(bucket, object); err != nil {
return toObjectErr(traceError(err), bucket, object)
}
return nil
@@ -526,31 +505,8 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
return
}
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket exists.
if !fs.isBucketExist(bucket) {
return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if marker != "" {
if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
}
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
return ListObjectsInfo{}, err
}
// With max keys of zero we have reached eof, return right here.

View File

@@ -126,9 +126,8 @@ func TestFSShutdown(t *testing.T) {
}
removeAll(disk)
// FIXME: Check why Shutdown returns success when second posix call returns faulty disk error
// Test Shutdown with faulty disk
/* for i := 1; i <= 5; i++ {
for i := 1; i <= 5; i++ {
fs, disk := prepareTest()
fs.DeleteObject(bucketName, objectName)
fsStorage := fs.storage.(*retryStorage)
@@ -137,7 +136,7 @@ func TestFSShutdown(t *testing.T) {
t.Fatal(i, ", Got unexpected fs shutdown error: ", err)
}
removeAll(disk)
} */
}
}
// TestFSLoadFormatFS - test loadFormatFS with healty and faulty disks

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -65,38 +65,66 @@ func (h requestSizeLimitHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
h.handler.ServeHTTP(w, r)
}
// Adds redirect rules for incoming requests.
type redirectHandler struct {
handler http.Handler
locationPrefix string
}
// Reserved bucket.
const (
reservedBucket = "/minio"
)
// Adds redirect rules for incoming requests.
type redirectHandler struct {
handler http.Handler
}
func setBrowserRedirectHandler(h http.Handler) http.Handler {
return redirectHandler{handler: h, locationPrefix: reservedBucket}
return redirectHandler{handler: h}
}
// Fetch redirect location if urlPath satisfies certain
// criteria. Some special names are considered to be
// redirectable, this is purely internal function and
// serves only limited purpose on redirect-handler for
// browser requests.
func getRedirectLocation(urlPath string) (rLocation string) {
if urlPath == reservedBucket {
rLocation = reservedBucket + "/"
}
if contains([]string{
"/",
"/webrpc",
"/login",
"/favicon.ico",
}, urlPath) {
rLocation = reservedBucket + urlPath
}
return rLocation
}
// guessIsBrowserReq - returns true if the request is browser.
// This implementation just validates user-agent and
// looks for "Mozilla" string. This is no way certifiable
// way to know if the request really came from a browser
// since User-Agent's can be arbitrary. But this is just
// a best effort function.
func guessIsBrowserReq(req *http.Request) bool {
if req == nil {
return false
}
return strings.Contains(req.Header.Get("User-Agent"), "Mozilla")
}
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Re-direction handled specifically for browsers.
if strings.Contains(r.Header.Get("User-Agent"), "Mozilla") && !isRequestSignatureV4(r) {
switch r.URL.Path {
case "/", "/webrpc", "/login", "/favicon.ico":
// '/' is redirected to 'locationPrefix/'
// '/webrpc' is redirected to 'locationPrefix/webrpc'
// '/login' is redirected to 'locationPrefix/login'
location := h.locationPrefix + r.URL.Path
// Redirect to new location.
http.Redirect(w, r, location, http.StatusTemporaryRedirect)
return
case h.locationPrefix:
// locationPrefix is redirected to 'locationPrefix/'
location := h.locationPrefix + "/"
http.Redirect(w, r, location, http.StatusTemporaryRedirect)
return
aType := getRequestAuthType(r)
// Re-direct only for JWT and anonymous requests from browser.
if aType == authTypeJWT || aType == authTypeAnonymous {
// Re-direction is handled specifically for browser requests.
if guessIsBrowserReq(r) && globalIsBrowserEnabled {
// Fetch the redirect location if any.
redirectLocation := getRedirectLocation(r.URL.Path)
if redirectLocation != "" {
// Employ a temporary re-direct.
http.Redirect(w, r, redirectLocation, http.StatusTemporaryRedirect)
return
}
}
}
h.handler.ServeHTTP(w, r)
@@ -112,10 +140,11 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler {
}
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
if r.Method == "GET" && guessIsBrowserReq(r) && globalIsBrowserEnabled {
// For all browser requests set appropriate Cache-Control policies
match, e := regexp.MatchString(reservedBucket+`/([^/]+\.js|favicon.ico)`, r.URL.Path)
if e != nil {
match, err := regexp.Match(reservedBucket+`/([^/]+\.js|favicon.ico)`, []byte(r.URL.Path))
if err != nil {
errorIf(err, "Unable to match incoming URL %s", r.URL)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return
}
@@ -143,13 +172,22 @@ func setPrivateBucketHandler(h http.Handler) http.Handler {
func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// For all non browser requests, reject access to 'reservedBucket'.
if !strings.Contains(r.Header.Get("User-Agent"), "Mozilla") && path.Clean(r.URL.Path) == reservedBucket {
if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == reservedBucket {
writeErrorResponse(w, r, ErrAllAccessDisabled, r.URL.Path)
return
}
h.handler.ServeHTTP(w, r)
}
type timeValidityHandler struct {
handler http.Handler
}
// setTimeValidityHandler to validate parsable time over http header
func setTimeValidityHandler(h http.Handler) http.Handler {
return timeValidityHandler{h}
}
// Supported Amz date formats.
var amzDateFormats = []string{
time.RFC1123,
@@ -158,23 +196,23 @@ var amzDateFormats = []string{
// Add new AMZ date formats here.
}
// parseAmzDate - parses date string into supported amz date formats.
func parseAmzDate(amzDateStr string) (amzDate time.Time, apiErr APIErrorCode) {
for _, dateFormat := range amzDateFormats {
amzDate, e := time.Parse(dateFormat, amzDateStr)
if e == nil {
return amzDate, ErrNone
}
}
return time.Time{}, ErrMalformedDate
}
// Supported Amz date headers.
var amzDateHeaders = []string{
"x-amz-date",
"date",
}
// parseAmzDate - parses date string into supported amz date formats.
func parseAmzDate(amzDateStr string) (amzDate time.Time, apiErr APIErrorCode) {
for _, dateFormat := range amzDateFormats {
amzDate, err := time.Parse(dateFormat, amzDateStr)
if err == nil {
return amzDate, ErrNone
}
}
return time.Time{}, ErrMalformedDate
}
// parseAmzDateHeader - parses supported amz date headers, in
// supported amz date formats.
func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) {
@@ -188,15 +226,6 @@ func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) {
return time.Time{}, ErrMissingDateHeader
}
type timeValidityHandler struct {
handler http.Handler
}
// setTimeValidityHandler to validate parsable time over http header
func setTimeValidityHandler(h http.Handler) http.Handler {
return timeValidityHandler{h}
}
func (h timeValidityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
@@ -243,47 +272,6 @@ func setIgnoreResourcesHandler(h http.Handler) http.Handler {
return resourceHandler{h}
}
// Resource handler ServeHTTP() wrapper
func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Skip the first element which is usually '/' and split the rest.
splits := strings.SplitN(r.URL.Path[1:], "/", 2)
// Save bucketName and objectName extracted from url Path.
var bucketName, objectName string
if len(splits) == 1 {
bucketName = splits[0]
}
if len(splits) == 2 {
bucketName = splits[0]
objectName = splits[1]
}
// If bucketName is present and not objectName check for bucket level resource queries.
if bucketName != "" && objectName == "" {
if ignoreNotImplementedBucketResources(r) {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
}
// If bucketName and objectName are present check for its resource queries.
if bucketName != "" && objectName != "" {
if ignoreNotImplementedObjectResources(r) {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
}
// A put method on path "/" doesn't make sense, ignore it.
if r.Method == "PUT" && r.URL.Path == "/" {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
// Serve HTTP.
h.handler.ServeHTTP(w, r)
}
//// helpers
// Checks requests for not implemented Bucket resources
func ignoreNotImplementedBucketResources(req *http.Request) bool {
for name := range req.URL.Query() {
@@ -324,3 +312,42 @@ var notimplementedObjectResourceNames = map[string]bool{
"acl": true,
"policy": true,
}
// Resource handler ServeHTTP() wrapper
func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Skip the first element which is usually '/' and split the rest.
splits := strings.SplitN(r.URL.Path[1:], "/", 2)
// Save bucketName and objectName extracted from url Path.
var bucketName, objectName string
if len(splits) == 1 {
bucketName = splits[0]
}
if len(splits) == 2 {
bucketName = splits[0]
objectName = splits[1]
}
// If bucketName is present and not objectName check for bucket level resource queries.
if bucketName != "" && objectName == "" {
if ignoreNotImplementedBucketResources(r) {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
}
// If bucketName and objectName are present check for its resource queries.
if bucketName != "" && objectName != "" {
if ignoreNotImplementedObjectResources(r) {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
}
// A put method on path "/" doesn't make sense, ignore it.
if r.Method == "PUT" && r.URL.Path == "/" {
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
return
}
// Serve HTTP.
h.handler.ServeHTTP(w, r)
}

View File

@@ -0,0 +1,90 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"testing"
)
// Tests getRedirectLocation function for all its criteria.
func TestRedirectLocation(t *testing.T) {
testCases := []struct {
urlPath string
location string
}{
{
// 1. When urlPath is '/minio'
urlPath: reservedBucket,
location: reservedBucket + "/",
},
{
// 2. When urlPath is '/'
urlPath: "/",
location: reservedBucket + "/",
},
{
// 3. When urlPath is '/webrpc'
urlPath: "/webrpc",
location: reservedBucket + "/webrpc",
},
{
// 4. When urlPath is '/login'
urlPath: "/login",
location: reservedBucket + "/login",
},
{
// 5. When urlPath is '/favicon.ico'
urlPath: "/favicon.ico",
location: reservedBucket + "/favicon.ico",
},
{
// 6. When urlPath is '/unknown'
urlPath: "/unknown",
location: "",
},
}
// Validate all conditions.
for i, testCase := range testCases {
loc := getRedirectLocation(testCase.urlPath)
if testCase.location != loc {
t.Errorf("Test %d: Unexpected location expected %s, got %s", i+1, testCase.location, loc)
}
}
}
// Tests browser request guess function.
func TestGuessIsBrowser(t *testing.T) {
if guessIsBrowserReq(nil) {
t.Fatal("Unexpected return for nil request")
}
r := &http.Request{
Header: http.Header{},
}
r.Header.Set("User-Agent", "Mozilla")
if !guessIsBrowserReq(r) {
t.Fatal("Test shouldn't fail for a possible browser request.")
}
r = &http.Request{
Header: http.Header{},
}
r.Header.Set("User-Agent", "mc")
if guessIsBrowserReq(r) {
t.Fatal("Test shouldn't report as browser for a non browser request.")
}
}

View File

@@ -18,10 +18,14 @@ package cmd
import (
"crypto/x509"
"os"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/objcache"
)
@@ -53,13 +57,20 @@ const (
)
var (
globalQuiet = false // Quiet flag set via command line.
globalIsDistXL = false // "Is Distributed?" flag.
globalQuiet = false // quiet flag set via command line.
globalConfigDir = mustGetConfigPath() // config-dir flag set via command line
// Add new global flags here.
// Maximum cache size.
globalMaxCacheSize = uint64(maxCacheSize)
globalIsDistXL = false // "Is Distributed?" flag.
// This flag is set to 'true' by default, it is set to `false`
// when MINIO_BROWSER env is set to 'off'.
globalIsBrowserEnabled = !strings.EqualFold(os.Getenv("MINIO_BROWSER"), "off")
// Maximum cache size. Defaults to disabled.
// Caching is enabled only for RAM size > 8GiB.
globalMaxCacheSize = uint64(0)
// Cache expiry.
globalCacheExpiry = objcache.DefaultExpiry
// Minio local server address (in `host:port` format)
@@ -90,3 +101,19 @@ var (
colorBlue = color.New(color.FgBlue).SprintfFunc()
colorGreen = color.New(color.FgGreen).SprintfFunc()
)
// Parse command arguments and set global variables accordingly
func setGlobalsFromContext(c *cli.Context) {
// Set config dir
switch {
case c.IsSet("config-dir"):
globalConfigDir = c.String("config-dir")
case c.GlobalIsSet("config-dir"):
globalConfigDir = c.GlobalString("config-dir")
}
if globalConfigDir == "" {
console.Fatalf("Unable to get config file. Config directory is empty.")
}
// Set global quiet flag.
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
}

View File

@@ -119,26 +119,26 @@ func verifyRPCLockInfoResponse(l lockStateCase, rpcLockInfoMap map[string]*Syste
}
}
// Asserts the lock counter from the global nsMutex inmemory lock with the expected one.
// Asserts the lock counter from the global globalNSMutex inmemory lock with the expected one.
func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) {
nsMutex.lockMapMutex.Lock()
globalNSMutex.lockMapMutex.Lock()
// Verifying the lock stats.
if nsMutex.globalLockCounter != int64(l.expectedGlobalLockCount) {
if globalNSMutex.globalLockCounter != int64(l.expectedGlobalLockCount) {
t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount),
nsMutex.globalLockCounter)
globalNSMutex.globalLockCounter)
}
// verify the count for total blocked locks.
if nsMutex.blockedCounter != int64(l.expectedBlockedLockCount) {
if globalNSMutex.blockedCounter != int64(l.expectedBlockedLockCount) {
t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount),
nsMutex.blockedCounter)
globalNSMutex.blockedCounter)
}
// verify the count for total running locks.
if nsMutex.runningLockCounter != int64(l.expectedRunningLockCount) {
if globalNSMutex.runningLockCounter != int64(l.expectedRunningLockCount) {
t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount),
nsMutex.runningLockCounter)
globalNSMutex.runningLockCounter)
}
nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Unlock()
// Verifying again with the JSON response of the lock info.
// Verifying the lock stats.
sysLockState, err := getSystemLockState()
@@ -164,35 +164,35 @@ func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) {
// Verify the lock counter for entries of given <volume, path> pair.
func verifyLockStats(l lockStateCase, t *testing.T, testNum int) {
nsMutex.lockMapMutex.Lock()
defer nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Lock()
defer globalNSMutex.lockMapMutex.Unlock()
param := nsParam{l.volume, l.path}
// Verify the total locks (blocked+running) for given <vol,path> pair.
if nsMutex.debugLockMap[param].ref != int64(l.expectedVolPathLockCount) {
if globalNSMutex.debugLockMap[param].ref != int64(l.expectedVolPathLockCount) {
t.Errorf("Test %d: Expected the total lock count for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum,
param.volume, param.path, int64(l.expectedVolPathLockCount), nsMutex.debugLockMap[param].ref)
param.volume, param.path, int64(l.expectedVolPathLockCount), globalNSMutex.debugLockMap[param].ref)
}
// Verify the total running locks for given <volume, path> pair.
if nsMutex.debugLockMap[param].running != int64(l.expectedVolPathRunningCount) {
if globalNSMutex.debugLockMap[param].running != int64(l.expectedVolPathRunningCount) {
t.Errorf("Test %d: Expected the total running locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path,
int64(l.expectedVolPathRunningCount), nsMutex.debugLockMap[param].running)
int64(l.expectedVolPathRunningCount), globalNSMutex.debugLockMap[param].running)
}
// Verify the total blocked locks for givne <volume, path> pair.
if nsMutex.debugLockMap[param].blocked != int64(l.expectedVolPathBlockCount) {
if globalNSMutex.debugLockMap[param].blocked != int64(l.expectedVolPathBlockCount) {
t.Errorf("Test %d: Expected the total blocked locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path,
int64(l.expectedVolPathBlockCount), nsMutex.debugLockMap[param].blocked)
int64(l.expectedVolPathBlockCount), globalNSMutex.debugLockMap[param].blocked)
}
}
// verifyLockState - function which asserts the expected lock info in the system with the actual values in the nsMutex.
// verifyLockState - function which asserts the expected lock info in the system with the actual values in the globalNSMutex.
func verifyLockState(l lockStateCase, t *testing.T, testNum int) {
param := nsParam{l.volume, l.path}
verifyGlobalLockStats(l, t, testNum)
nsMutex.lockMapMutex.Lock()
globalNSMutex.lockMapMutex.Lock()
// Verifying the lock statuS fields.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if debugLockMap, ok := globalNSMutex.debugLockMap[param]; ok {
if lockInfo, ok := debugLockMap.lockInfo[l.opsID]; ok {
// Validating the lock type filed in the debug lock information.
if l.readLock {
@@ -222,7 +222,7 @@ func verifyLockState(l lockStateCase, t *testing.T, testNum int) {
t.Errorf("Test case %d: Debug lock entry for volume: %s, path: %s doesn't exist", testNum, param.volume, param.path)
}
// verifyLockStats holds its own lock.
nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Unlock()
// verify the lock count.
verifyLockStats(l, t, testNum)
@@ -319,7 +319,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
// Since the data structures for
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockSource,
actualErr := globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,
testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
@@ -327,14 +327,14 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
}
nsMutex = &nsLockMap{
globalNSMutex = &nsLockMap{
// entries of <volume,path> -> stateInfo of locks, for instrumentation purpose.
debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath),
lockMap: make(map[nsParam]*nsLock),
}
// Entry for <volume, path> pair is set to nil. Should fail with `errLockNotInitialized`.
nsMutex.debugLockMap[param] = nil
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockSource,
globalNSMutex.debugLockMap[param] = nil
actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,
testCases[0].opsID, testCases[0].readLock)
if errorCause(actualErr) != errLockNotInitialized {
@@ -342,14 +342,14 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
}
// Setting the lock info the be `nil`.
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
globalNSMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
lockInfo: nil, // setting the lockinfo to nil.
ref: 0,
blocked: 0,
running: 0,
}
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockSource,
actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,
testCases[0].opsID, testCases[0].readLock)
expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID}
@@ -359,7 +359,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
// Next case: ase whether an attempt to change the state of the lock to "Running" done,
// but the initial state if already "Running". Such an attempt should fail
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
globalNSMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
lockInfo: make(map[string]debugLockInfo),
ref: 0,
blocked: 0,
@@ -368,13 +368,13 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
// Setting the status of the lock to be "Running".
// The initial state of the lock should set to "Blocked", otherwise its not possible to change the state from "Blocked" -> "Running".
nsMutex.debugLockMap[param].lockInfo[testCases[0].opsID] = debugLockInfo{
globalNSMutex.debugLockMap[param].lockInfo[testCases[0].opsID] = debugLockInfo{
lockSource: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
status: "Running", // State set to "Running". Should fail with `LockInfoStateNotBlocked`.
since: time.Now().UTC(),
}
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockSource,
actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,
testCases[0].opsID, testCases[0].readLock)
expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID}
@@ -390,22 +390,22 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
param := nsParam{testCase.volume, testCase.path}
// status of the lock to be set to "Blocked", before setting Blocked->Running.
if testCase.setBlocked {
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCase.lockSource, testCase.opsID, testCase.readLock)
globalNSMutex.lockMapMutex.Lock()
err := globalNSMutex.statusNoneToBlocked(param, testCase.lockSource, testCase.opsID, testCase.readLock)
if err != nil {
t.Fatalf("Test %d: Initializing the initial state to Blocked failed <ERROR> %s", i+1, err)
}
nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Unlock()
}
// invoking the method under test.
actualErr = nsMutex.statusBlockedToRunning(param, testCase.lockSource, testCase.opsID, testCase.readLock)
actualErr = globalNSMutex.statusBlockedToRunning(param, testCase.lockSource, testCase.opsID, testCase.readLock)
if errorCause(actualErr) != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
}
// In case of no error proceed with validating the lock state information.
if actualErr == nil {
// debug entry for given <volume, path> pair should exist.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if debugLockMap, ok := globalNSMutex.debugLockMap[param]; ok {
if lockInfo, ok := debugLockMap.lockInfo[testCase.opsID]; ok {
// Validating the lock type filed in the debug lock information.
if testCase.readLock {
@@ -514,7 +514,7 @@ func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
// Since the data structures for
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockSource,
actualErr := globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource,
testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
@@ -524,13 +524,13 @@ func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
// Iterate over the cases and assert the result.
for i, testCase := range testCases {
nsMutex.lockMapMutex.Lock()
globalNSMutex.lockMapMutex.Lock()
param := nsParam{testCase.volume, testCase.path}
actualErr := nsMutex.statusNoneToBlocked(param, testCase.lockSource, testCase.opsID, testCase.readLock)
actualErr := globalNSMutex.statusNoneToBlocked(param, testCase.lockSource, testCase.opsID, testCase.readLock)
if actualErr != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
}
nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Unlock()
if actualErr == nil {
verifyLockState(testCase, t, i+1)
}
@@ -559,7 +559,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
actualErr := nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr := globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr {
@@ -568,17 +568,17 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
// Case - 2.
// Lock state is set to Running and then an attempt to delete the info for non-existent opsID done.
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
globalNSMutex.lockMapMutex.Lock()
err := globalNSMutex.statusNoneToBlocked(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err)
}
nsMutex.lockMapMutex.Unlock()
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
globalNSMutex.lockMapMutex.Unlock()
err = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err)
}
actualErr = nsMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID")
actualErr = globalNSMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID")
expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existent-OpsID"}
if errorCause(actualErr) != expectedOpsIDErr {
@@ -589,7 +589,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
// All metrics should be 0 after deleting the entry.
// Verify that the entry the opsID exists.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if debugLockMap, ok := globalNSMutex.debugLockMap[param]; ok {
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; !ok {
t.Fatalf("Entry for OpsID \"%s\" in <volume> %s, <path> %s should have existed. ", testCases[0].opsID, param.volume, param.path)
}
@@ -597,27 +597,27 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
}
actualErr = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr = globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
if actualErr != nil {
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)
}
// Verify that the entry for the opsId doesn't exists.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if debugLockMap, ok := globalNSMutex.debugLockMap[param]; ok {
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; ok {
t.Fatalf("The entry for opsID \"%s\" should have been deleted", testCases[0].opsID)
}
} else {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
}
if nsMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter)
if globalNSMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.runningLockCounter)
}
if nsMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter)
if globalNSMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.blockedCounter)
}
if nsMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter)
if globalNSMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.globalLockCounter)
}
}
@@ -643,7 +643,7 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
// Set the status of the lock to blocked and then to running.
param := nsParam{testCases[0].volume, testCases[0].path}
actualErr := nsMutex.deleteLockInfoEntryForVolumePath(param)
actualErr := globalNSMutex.deleteLockInfoEntryForVolumePath(param)
expectedNilErr := LockInfoVolPathMissing{param.volume, param.path}
if errorCause(actualErr) != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
@@ -654,39 +654,39 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
// All metrics should be 0 after deleting the entry.
// Registering the entry first.
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
globalNSMutex.lockMapMutex.Lock()
err := globalNSMutex.statusNoneToBlocked(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err)
}
nsMutex.lockMapMutex.Unlock()
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
globalNSMutex.lockMapMutex.Unlock()
err = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err)
}
// Verify that the entry the for given <volume, path> exists.
if _, ok := nsMutex.debugLockMap[param]; !ok {
if _, ok := globalNSMutex.debugLockMap[param]; !ok {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path)
}
// first delete the entry for the operation ID.
_ = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param)
_ = globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr = globalNSMutex.deleteLockInfoEntryForVolumePath(param)
if actualErr != nil {
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)
}
// Verify that the entry for the opsId doesn't exists.
if _, ok := nsMutex.debugLockMap[param]; ok {
if _, ok := globalNSMutex.debugLockMap[param]; ok {
t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path)
}
// The lock count values should be 0.
if nsMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter)
if globalNSMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.runningLockCounter)
}
if nsMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter)
if globalNSMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.blockedCounter)
}
if nsMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter)
if globalNSMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.globalLockCounter)
}
}

View File

@@ -34,6 +34,7 @@ const lockCheckValidityInterval = 2 * time.Minute
// LockArgs besides lock name, holds Token and Timestamp for session
// authentication and validation server restart.
type LockArgs struct {
loginServer
Name string
Token string
Timestamp time.Time
@@ -125,25 +126,6 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
/// Distributed lock handlers
// LoginHandler - handles LoginHandler RPC call.
func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential())
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = time.Now().UTC()
reply.ServerVersion = Version
return nil
}
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
l.mutex.Lock()

View File

@@ -61,16 +61,16 @@ type OpsLockState struct {
// Read entire state of the locks in the system and return.
func getSystemLockState() (SystemLockState, error) {
nsMutex.lockMapMutex.Lock()
defer nsMutex.lockMapMutex.Unlock()
globalNSMutex.lockMapMutex.Lock()
defer globalNSMutex.lockMapMutex.Unlock()
lockState := SystemLockState{}
lockState.TotalBlockedLocks = nsMutex.blockedCounter
lockState.TotalLocks = nsMutex.globalLockCounter
lockState.TotalAcquiredLocks = nsMutex.runningLockCounter
lockState.TotalBlockedLocks = globalNSMutex.blockedCounter
lockState.TotalLocks = globalNSMutex.globalLockCounter
lockState.TotalAcquiredLocks = globalNSMutex.runningLockCounter
for param, debugLock := range nsMutex.debugLockMap {
for param, debugLock := range globalNSMutex.debugLockMap {
volLockInfo := VolumeLockInfo{}
volLockInfo.Bucket = param.volume
volLockInfo.Object = param.path

41
cmd/login-server.go Normal file
View File

@@ -0,0 +1,41 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "time"
type loginServer struct {
}
// LoginHandler - Handles JWT based RPC logic.
func (b loginServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential())
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = time.Now().UTC()
reply.ServerVersion = Version
return nil
}

67
cmd/login-server_test.go Normal file
View File

@@ -0,0 +1,67 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "testing"
func TestLoginHandler(t *testing.T) {
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Failed to create test config - %v", err)
}
defer removeAll(rootPath)
creds := serverConfig.GetCredential()
ls := loginServer{}
testCases := []struct {
args RPCLoginArgs
expectedErr error
}{
// Valid username and password
{
args: RPCLoginArgs{Username: creds.AccessKeyID, Password: creds.SecretAccessKey},
expectedErr: nil,
},
// Invalid username length
{
args: RPCLoginArgs{Username: "aaa", Password: "minio123"},
expectedErr: errInvalidAccessKeyLength,
},
// Invalid password length
{
args: RPCLoginArgs{Username: "minio", Password: "aaa"},
expectedErr: errInvalidSecretKeyLength,
},
// Invalid username
{
args: RPCLoginArgs{Username: "aaaaa", Password: creds.SecretAccessKey},
expectedErr: errInvalidAccessKeyID,
},
// Invalid password
{
args: RPCLoginArgs{Username: creds.AccessKeyID, Password: "aaaaaaaa"},
expectedErr: errAuthentication,
},
}
for i, test := range testCases {
reply := RPCLoginReply{}
err := ls.LoginHandler(&test.args, &reply)
if err != test.expectedErr {
t.Errorf("Test %d: Expected error %v but received %v",
i+1, test.expectedErr, err)
}
}
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"errors"
"fmt"
"os"
"sort"
@@ -148,72 +147,70 @@ func checkMainSyntax(c *cli.Context) {
}
}
// Check for updates and print a notification message
func checkUpdate() {
// Do not print update messages, if quiet flag is set.
if !globalQuiet {
updateMsg, _, err := getReleaseUpdate(minioUpdateStableURL, 1*time.Second)
if err != nil {
// Ignore any errors during getReleaseUpdate(), possibly
// because of network errors.
return
}
if updateMsg.Update {
console.Println(updateMsg)
}
}
}
// Generic Minio initialization to create/load config, prepare loggers, etc..
func minioInit() {
// Sets new config directory.
setGlobalConfigPath(globalConfigDir)
// Migrate any old version of config / state files to newer format.
migrate()
// Initialize config.
configCreated, err := initConfig()
if err != nil {
console.Fatalf("Unable to initialize minio config. Err: %s.\n", err)
}
if configCreated {
console.Println("Created minio configuration file at " + mustGetConfigPath())
}
// Enable all loggers by now so we can use errorIf() and fatalIf()
enableLoggers()
// Fetch access keys from environment variables and update the config.
accessKey := os.Getenv("MINIO_ACCESS_KEY")
secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" {
// Set new credentials.
serverConfig.SetCredential(credential{
AccessKeyID: accessKey,
SecretAccessKey: secretKey,
})
}
if !isValidAccessKey(serverConfig.GetCredential().AccessKeyID) {
fatalIf(errInvalidArgument, "Invalid access key. Accept only a string starting with a alphabetic and containing from 5 to 20 characters.")
}
if !isValidSecretKey(serverConfig.GetCredential().SecretAccessKey) {
fatalIf(errInvalidArgument, "Invalid secret key. Accept only a string containing from 8 to 40 characters.")
}
// Init the error tracing module.
initError()
}
// Main main for minio server.
func Main() {
app := registerApp()
app.Before = func(c *cli.Context) error {
configDir := c.GlobalString("config-dir")
if configDir == "" {
fatalIf(errors.New("Config directory is empty"), "Unable to get config file.")
}
// Sets new config directory.
setGlobalConfigPath(configDir)
// Valid input arguments to main.
checkMainSyntax(c)
// Migrate any old version of config / state files to newer format.
migrate()
// Initialize config.
configCreated, err := initConfig()
if err != nil {
console.Fatalf("Unable to initialize minio config. Err: %s.\n", err)
}
if configCreated {
console.Println("Created minio configuration file at " + mustGetConfigPath())
}
// Enable all loggers by now so we can use errorIf() and fatalIf()
enableLoggers()
// Fetch access keys from environment variables and update the config.
accessKey := os.Getenv("MINIO_ACCESS_KEY")
secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" {
// Set new credentials.
serverConfig.SetCredential(credential{
AccessKeyID: accessKey,
SecretAccessKey: secretKey,
})
}
if !isValidAccessKey(serverConfig.GetCredential().AccessKeyID) {
fatalIf(errInvalidArgument, "Invalid access key. Accept only a string starting with a alphabetic and containing from 5 to 20 characters.")
}
if !isValidSecretKey(serverConfig.GetCredential().SecretAccessKey) {
fatalIf(errInvalidArgument, "Invalid secret key. Accept only a string containing from 8 to 40 characters.")
}
// Init the error tracing module.
initError()
// Set global quiet flag.
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
// Do not print update messages, if quiet flag is set.
if !globalQuiet {
if c.Args().Get(0) != "update" {
updateMsg, _, err := getReleaseUpdate(minioUpdateStableURL, 1*time.Second)
if err != nil {
// Ignore any errors during getReleaseUpdate(), possibly
// because of network errors.
return nil
}
if updateMsg.Update {
console.Println(updateMsg)
}
}
}
return nil
}

View File

@@ -26,7 +26,7 @@ import (
)
// Global name space lock.
var nsMutex *nsLockMap
var globalNSMutex *nsLockMap
// Initialize distributed locking only in case of distributed setup.
// Returns if the setup is distributed or not on success.
@@ -57,15 +57,15 @@ func initDsyncNodes(eps []*url.URL) error {
}
// initNSLock - initialize name space lock map.
func initNSLock(isDist bool) {
nsMutex = &nsLockMap{
isDist: isDist,
lockMap: make(map[nsParam]*nsLock),
func initNSLock(isDistXL bool) {
globalNSMutex = &nsLockMap{
isDistXL: isDistXL,
lockMap: make(map[nsParam]*nsLock),
}
// Initialize nsLockMap with entry for instrumentation information.
// Entries of <volume,path> -> stateInfo of locks
nsMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
globalNSMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
}
// RWLocker - interface that any read-write locking library should implement.
@@ -98,7 +98,7 @@ type nsLockMap struct {
// Indicates whether the locking service is part
// of a distributed setup or not.
isDist bool
isDistXL bool
lockMap map[nsParam]*nsLock
lockMapMutex sync.Mutex
}
@@ -113,8 +113,8 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
if !found {
nsLk = &nsLock{
RWLocker: func() RWLocker {
if n.isDist {
return dsync.NewDRWMutex(pathutil.Join(volume, path))
if n.isDistXL {
return dsync.NewDRWMutex(pathJoin(volume, path))
}
return &sync.RWMutex{}
}(),
@@ -126,7 +126,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
// Change the state of the lock to be blocked for the given
// pair of <volume, path> and <OperationID> till the lock
// unblocks. The lock for accessing `nsMutex` is held inside
// unblocks. The lock for accessing `globalNSMutex` is held inside
// the function itself.
if err := n.statusNoneToBlocked(param, lockSource, opsID, readLock); err != nil {
errorIf(err, "Failed to set lock state to blocked")
@@ -226,7 +226,7 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
defer n.lockMapMutex.Unlock()
// Clarification on operation:
// - In case of FS or XL we call ForceUnlock on the local nsMutex
// - In case of FS or XL we call ForceUnlock on the local globalNSMutex
// (since there is only a single server) which will cause the 'stuck'
// mutex to be removed from the map. Existing operations for this
// will continue to be blocked (and timeout). New operations on this
@@ -238,9 +238,8 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
// that participated in granting the lock. Any pending dsync locks that
// are blocking can now proceed as normal and any new locks will also
// participate normally.
if n.isDist { // For distributed mode, broadcast ForceUnlock message.
dsync.NewDRWMutex(pathutil.Join(volume, path)).ForceUnlock()
if n.isDistXL { // For distributed mode, broadcast ForceUnlock message.
dsync.NewDRWMutex(pathJoin(volume, path)).ForceUnlock()
}
param := nsParam{volume, path}

View File

@@ -37,22 +37,22 @@ func TestNamespaceLockTest(t *testing.T) {
shouldPass bool
}{
{
lk: nsMutex.Lock,
unlk: nsMutex.Unlock,
lk: globalNSMutex.Lock,
unlk: globalNSMutex.Unlock,
lockedRefCount: 1,
unlockedRefCount: 0,
shouldPass: true,
},
{
rlk: nsMutex.RLock,
runlk: nsMutex.RUnlock,
rlk: globalNSMutex.RLock,
runlk: globalNSMutex.RUnlock,
lockedRefCount: 4,
unlockedRefCount: 2,
shouldPass: true,
},
{
rlk: nsMutex.RLock,
runlk: nsMutex.RUnlock,
rlk: globalNSMutex.RLock,
runlk: globalNSMutex.RUnlock,
lockedRefCount: 1,
unlockedRefCount: 0,
shouldPass: true,
@@ -64,7 +64,7 @@ func TestNamespaceLockTest(t *testing.T) {
// Write lock tests.
testCase := testCases[0]
testCase.lk("a", "b", "c") // lock once.
nsLk, ok := nsMutex.lockMap[nsParam{"a", "b"}]
nsLk, ok := globalNSMutex.lockMap[nsParam{"a", "b"}]
if !ok && testCase.shouldPass {
t.Errorf("Lock in map missing.")
}
@@ -76,7 +76,7 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.unlockedRefCount, nsLk.ref)
}
_, ok = nsMutex.lockMap[nsParam{"a", "b"}]
_, ok = globalNSMutex.lockMap[nsParam{"a", "b"}]
if ok && !testCase.shouldPass {
t.Errorf("Lock map found after unlock.")
}
@@ -87,7 +87,7 @@ func TestNamespaceLockTest(t *testing.T) {
testCase.rlk("a", "b", "c") // lock second time.
testCase.rlk("a", "b", "c") // lock third time.
testCase.rlk("a", "b", "c") // lock fourth time.
nsLk, ok = nsMutex.lockMap[nsParam{"a", "b"}]
nsLk, ok = globalNSMutex.lockMap[nsParam{"a", "b"}]
if !ok && testCase.shouldPass {
t.Errorf("Lock in map missing.")
}
@@ -101,7 +101,7 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 2, testCase.unlockedRefCount, nsLk.ref)
}
_, ok = nsMutex.lockMap[nsParam{"a", "b"}]
_, ok = globalNSMutex.lockMap[nsParam{"a", "b"}]
if !ok && testCase.shouldPass {
t.Errorf("Lock map not found.")
}
@@ -110,7 +110,7 @@ func TestNamespaceLockTest(t *testing.T) {
testCase = testCases[2]
testCase.rlk("a", "c", "d") // lock once.
nsLk, ok = nsMutex.lockMap[nsParam{"a", "c"}]
nsLk, ok = globalNSMutex.lockMap[nsParam{"a", "c"}]
if !ok && testCase.shouldPass {
t.Errorf("Lock in map missing.")
}
@@ -122,7 +122,7 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 3, testCase.unlockedRefCount, nsLk.ref)
}
_, ok = nsMutex.lockMap[nsParam{"a", "c"}]
_, ok = globalNSMutex.lockMap[nsParam{"a", "c"}]
if ok && !testCase.shouldPass {
t.Errorf("Lock map not found.")
}
@@ -303,7 +303,7 @@ func TestLockStats(t *testing.T) {
// hold 10 read locks.
for i := 0; i < 10; i++ {
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
globalNSMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
}
// expected lock info.
expectedLockStats := expectedResult[0]
@@ -311,7 +311,7 @@ func TestLockStats(t *testing.T) {
verifyLockState(expectedLockStats, t, 1)
// unlock 5 readlock.
for i := 0; i < 5; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i))
globalNSMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i))
}
expectedLockStats = expectedResult[1]
@@ -323,14 +323,14 @@ func TestLockStats(t *testing.T) {
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(10))
globalNSMutex.Lock("my-bucket", "my-object", strconv.Itoa(10))
// Once the above attempt to lock is unblocked/acquired, we verify the stats and release the lock.
expectedWLockStats := expectedResult[2]
// Since the write lock acquired here, the number of blocked locks should reduce by 1 and
// count of running locks should increase by 1.
verifyLockState(expectedWLockStats, t, 3)
// release the write lock.
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10))
globalNSMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10))
// The number of running locks should decrease by 1.
// expectedWLockStats = expectedResult[3]
// verifyLockState(expectedWLockStats, t, 4)
@@ -348,14 +348,14 @@ func TestLockStats(t *testing.T) {
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(11))
globalNSMutex.Lock("my-bucket", "my-object", strconv.Itoa(11))
// Once the above attempt to lock is unblocked/acquired, we release the lock.
// Unlock the second write lock only after lock stats for first write lock release is taken.
<-syncChan
// The number of running locks should decrease by 1.
expectedWLockStats := expectedResult[4]
verifyLockState(expectedWLockStats, t, 5)
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(11))
globalNSMutex.Unlock("my-bucket", "my-object", strconv.Itoa(11))
}()
expectedLockStats = expectedResult[5]
@@ -366,7 +366,7 @@ func TestLockStats(t *testing.T) {
// unlock 4 out of remaining 5 read locks.
for i := 0; i < 4; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i+5))
globalNSMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i+5))
}
// verify the entry for one remaining read lock and count of blocked write locks.
@@ -375,7 +375,7 @@ func TestLockStats(t *testing.T) {
verifyLockState(expectedLockStats, t, 7)
// Releasing the last read lock.
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(9))
globalNSMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(9))
wg.Wait()
expectedLockStats = expectedResult[7]
// verify the actual lock info with the expected one.
@@ -386,16 +386,16 @@ func TestLockStats(t *testing.T) {
func TestNamespaceForceUnlockTest(t *testing.T) {
// Create lock.
lock := nsMutex.NewNSLock("bucket", "object")
lock := globalNSMutex.NewNSLock("bucket", "object")
lock.Lock()
// Forcefully unlock lock.
nsMutex.ForceUnlock("bucket", "object")
globalNSMutex.ForceUnlock("bucket", "object")
ch := make(chan struct{}, 1)
go func() {
// Try to claim lock again.
anotherLock := nsMutex.NewNSLock("bucket", "object")
anotherLock := globalNSMutex.NewNSLock("bucket", "object")
anotherLock.Lock()
// And signal succes.
ch <- struct{}{}
@@ -412,5 +412,5 @@ func TestNamespaceForceUnlockTest(t *testing.T) {
}
// Clean up lock.
nsMutex.ForceUnlock("bucket", "object")
globalNSMutex.ForceUnlock("bucket", "object")
}

View File

@@ -276,15 +276,3 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
err := delFunc(retainSlash(pathJoin(dirPath)))
return err
}
// Checks whether bucket exists.
func isBucketExist(bucket string, obj ObjectLayer) error {
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
_, err := obj.GetBucketInfo(bucket)
if err != nil {
return BucketNotFound{Bucket: bucket}
}
return nil
}

View File

@@ -0,0 +1,161 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"strings"
"github.com/skyrings/skyring-common/tools/uuid"
)
// Checks on GetObject arguments, bucket and object.
func checkGetObjArgs(bucket, object string) error {
return checkBucketAndObjectNames(bucket, object)
}
// Checks on DeleteObject arguments, bucket and object.
func checkDelObjArgs(bucket, object string) error {
return checkBucketAndObjectNames(bucket, object)
}
// Checks bucket and object name validity, returns nil if both are valid.
func checkBucketAndObjectNames(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
return nil
}
// Checks for all ListObjects arguments validity.
func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is
// important here bucket does not exist error should
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err)
}
// Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) {
return traceError(ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if marker != "" && !strings.HasPrefix(marker, prefix) {
return traceError(InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
}
return nil
}
// Checks for all ListMultipartUploads arguments validity.
func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error {
if err := checkListObjsArgs(bucket, prefix, keyMarker, delimiter, obj); err != nil {
return err
}
if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) {
return traceError(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return traceError(err)
}
if id.IsZero() {
return traceError(MalformedUploadID{
UploadID: uploadIDMarker,
})
}
}
return nil
}
// Checks for NewMultipartUpload arguments validity, also validates if bucket exists.
func checkNewMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkPutObjectArgs(bucket, object, obj)
}
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
func checkPutObjectPartArgs(bucket, object string, obj ObjectLayer) error {
return checkPutObjectArgs(bucket, object, obj)
}
// Checks for ListParts arguments validity, also validates if bucket exists.
func checkListPartsArgs(bucket, object string, obj ObjectLayer) error {
return checkPutObjectArgs(bucket, object, obj)
}
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
func checkCompleteMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkPutObjectArgs(bucket, object, obj)
}
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
func checkAbortMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkPutObjectArgs(bucket, object, obj)
}
// Checks for PutObject arguments validity, also validates if bucket exists.
func checkPutObjectArgs(bucket, object string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is
// important here bucket does not exist error should
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err)
}
// Validates object name validity after bucket exists.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
}
return nil
}
// Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(bucket string, obj ObjectLayer) error {
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
_, err := obj.GetBucketInfo(bucket)
if err != nil {
return BucketNotFound{Bucket: bucket}
}
return nil
}

View File

@@ -23,7 +23,6 @@ import (
"io/ioutil"
"os"
"path"
"runtime"
"testing"
humanize "github.com/dustin/go-humanize"
@@ -329,9 +328,6 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
// Wrapper for calling Multipart PutObject tests for both XL multiple disks and single node setup.
func TestObjectAPIMultipartPutObjectStaleFiles(t *testing.T) {
if runtime.GOOS == "windows" {
return
}
ExecObjectLayerStaleFilesTest(t, testObjectAPIMultipartPutObjectStaleFiles)
}

View File

@@ -96,6 +96,11 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return
}
// Lock the object before reading.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info.")
@@ -195,6 +200,11 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return
}
// Lock the object before reading.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info.")
@@ -269,6 +279,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return
}
// Lock the object before reading.
objectRLock := globalNSMutex.NewNSLock(sourceBucket, sourceObject)
objectRLock.RLock()
defer objectRLock.RUnlock()
objInfo, err := objectAPI.GetObjectInfo(sourceBucket, sourceObject)
if err != nil {
errorIf(err, "Unable to fetch object info.")
@@ -311,6 +326,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
delete(metadata, "md5Sum")
sha256sum := ""
objectWLock := globalNSMutex.NewNSLock(bucket, object)
objectWLock.Lock()
defer objectWLock.Unlock()
// Create the object.
objInfo, err = objectAPI.PutObject(bucket, object, size, pipeReader, metadata, sha256sum)
if err != nil {
@@ -400,6 +420,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
sha256sum := ""
// Lock the object.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
var objInfo ObjectInfo
switch rAuthType {
default:
@@ -731,6 +756,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
completeParts = append(completeParts, part)
}
// Hold write lock on the object.
destLock := globalNSMutex.NewNSLock(bucket, object)
destLock.Lock()
defer destLock.Unlock()
md5Sum, err = objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
if err != nil {
err = errorCause(err)
@@ -801,6 +831,10 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return
}
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
/// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
/// Ignore delete object errors, since we are suppposed to reply
/// only 204.

View File

@@ -31,7 +31,7 @@ func hasPosixReservedPrefix(name string) (isReserved bool) {
isReserved = true
break
}
isReserved = false
}
return isReserved
}

View File

@@ -957,7 +957,7 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
}
// Remove parent dir of the source file if empty
if parentDir := slashpath.Dir(preparePath(srcFilePath)); isDirEmpty(parentDir) {
if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) {
deleteFile(srcVolumeDir, parentDir)
}

View File

@@ -42,7 +42,7 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s"]`, objectKey)
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
// Add content length condition, only accept content sizes of a given length.
contentLengthCondStr := `["content-length-range", 1024, 1048576]`
// Add the algorithm condition, only accept AWS SignV4 Sha256.
@@ -71,7 +71,7 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s"]`, objectKey)
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
// Add the algorithm condition, only accept AWS SignV4 Sha256.
algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
// Add the date condition, only accept the current date.
@@ -96,7 +96,7 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s"]`, objectKey)
keyConditionStr := fmt.Sprintf(`["starts-with", "$key", "%s/upload.txt"]`, objectKey)
// Combine all conditions into one string.
conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
@@ -108,13 +108,13 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []
return []byte(retStr)
}
// Wrapper for calling TestPostPolicyHandlerHandler tests for both XL multiple disks and single node setup.
func TestPostPolicyHandler(t *testing.T) {
ExecObjectLayerTest(t, testPostPolicyHandler)
// Wrapper for calling TestPostPolicyBucketHandler tests for both XL multiple disks and single node setup.
func TestPostPolicyBucketHandler(t *testing.T) {
ExecObjectLayerTest(t, testPostPolicyBucketHandler)
}
// testPostPolicyHandler - Tests validate post policy handler uploading objects.
func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
// testPostPolicyBucketHandler - Tests validate post policy handler uploading objects.
func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
root, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Initializing config.json failed")
@@ -133,17 +133,11 @@ func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandle
// Register the API end points with XL/FS object layer.
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
credentials := serverConfig.GetCredential()
curTime := time.Now().UTC()
curTimePlus5Min := curTime.Add(time.Minute * 5)
// bucketnames[0].
// objectNames[0].
// uploadIds [0].
@@ -237,6 +231,102 @@ func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandle
}
}
// Test cases for signature-V4.
testCasesV4BadData := []struct {
objectName string
data []byte
expectedRespStatus int
accessKey string
secretKey string
dates []interface{}
policy string
corruptedBase64 bool
corruptedMultipart bool
}{
// Success case.
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusNoContent,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`,
},
// Corrupted Base 64 result
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`,
corruptedBase64: true,
},
// Corrupted Multipart body
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`,
corruptedMultipart: true,
},
// Bad case invalid request.
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
accessKey: "",
secretKey: "",
dates: []interface{}{},
policy: ``,
},
// Expired document
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
dates: []interface{}{curTime.Add(-1 * time.Minute * 5).Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`,
},
// Corrupted policy document
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
policy: `{"3/aws4_request"]]}`,
},
}
for i, testCase := range testCasesV4BadData {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// policy := buildGenericPolicy(curTime, testCase.accessKey, bucketName, testCase.objectName, false)
testCase.policy = fmt.Sprintf(testCase.policy, testCase.dates...)
req, perr := newPostRequestV4Generic("", bucketName, testCase.objectName, testCase.data, testCase.accessKey,
testCase.secretKey, curTime, []byte(testCase.policy), testCase.corruptedBase64, testCase.corruptedMultipart)
if perr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
}
testCases2 := []struct {
objectName string
data []byte
@@ -314,7 +404,7 @@ func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secret
formData := map[string]string{
"AWSAccessKeyId": accessKey,
"bucket": bucketName,
"key": objectName,
"key": objectName + "/${filename}",
"policy": encodedPolicy,
"signature": signature,
}
@@ -328,7 +418,7 @@ func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secret
w.WriteField(k, v)
}
// Set the File formData
writer, err := w.CreateFormFile("file", "s3verify/post/object")
writer, err := w.CreateFormFile("file", "upload.txt")
if err != nil {
// return nil, err
return nil, err
@@ -350,27 +440,36 @@ func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secret
return req, nil
}
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, contentLengthRange bool) (*http.Request, error) {
// Keep time.
t := time.Now().UTC()
func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, contentLengthRange bool) []byte {
// Expire the request five minutes from now.
expirationTime := t.Add(time.Minute * 5)
// Get the user credential.
credStr := getCredential(accessKey, serverConfig.GetRegion(), t)
// Create a new post policy.
policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime)
if contentLengthRange {
policy = newPostPolicyBytesV4WithContentRange(credStr, bucketName, objectName, expirationTime)
}
return policy
}
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, t time.Time, policy []byte, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) {
// Get the user credential.
credStr := getCredential(accessKey, serverConfig.GetRegion(), t)
// Only need the encoding.
encodedPolicy := base64.StdEncoding.EncodeToString(policy)
if corruptedB64 {
encodedPolicy = "%!~&" + encodedPolicy
}
// Presign with V4 signature based on the policy.
signature := postPresignSignatureV4(encodedPolicy, t, secretKey, serverConfig.GetRegion())
formData := map[string]string{
"bucket": bucketName,
"key": objectName,
"key": objectName + "/${filename}",
"x-amz-credential": credStr,
"policy": encodedPolicy,
"x-amz-signature": signature,
@@ -386,15 +485,17 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []
for k, v := range formData {
w.WriteField(k, v)
}
// Set the File formData
writer, err := w.CreateFormFile("file", "s3verify/post/object")
if err != nil {
// return nil, err
return nil, err
// Set the File formData but don't if we want send an incomplete multipart request
if !corruptedMultipart {
writer, err := w.CreateFormFile("file", "upload.txt")
if err != nil {
// return nil, err
return nil, err
}
writer.Write(objData)
// Close before creating the new request.
w.Close()
}
writer.Write(objData)
// Close before creating the new request.
w.Close()
// Set the body equal to the created policy.
reader := bytes.NewReader(buf.Bytes())
@@ -410,9 +511,13 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []
}
func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, true)
t := time.Now().UTC()
policy := buildGenericPolicy(t, accessKey, bucketName, objectName, true)
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, false, false)
}
func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, false)
t := time.Now().UTC()
policy := buildGenericPolicy(t, accessKey, bucketName, objectName, false)
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, false, false)
}

View File

@@ -18,8 +18,11 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"time"
)
@@ -33,17 +36,26 @@ func toString(val interface{}) string {
return ""
}
// toLowerString - safely convert interface to lower string
func toLowerString(val interface{}) string {
return strings.ToLower(toString(val))
}
// toInteger _ Safely convert interface to integer without causing panic.
func toInteger(val interface{}) int64 {
func toInteger(val interface{}) (int64, error) {
switch v := val.(type) {
case float64:
return int64(v)
return int64(v), nil
case int64:
return v
return v, nil
case int:
return int64(v)
return int64(v), nil
case string:
i, err := strconv.Atoi(v)
return int64(i), err
}
return 0
return 0, errors.New("Invalid number format")
}
// isString - Safely check if val is of type string without causing panic.
@@ -111,7 +123,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
}
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
// In this case we will just collapse this into "eq" for all use cases.
parsedPolicy.Conditions.Policies["$"+k] = struct {
parsedPolicy.Conditions.Policies["$"+strings.ToLower(k)] = struct {
Operator string
Value string
}{
@@ -123,7 +135,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
if len(condt) != 3 { // Return error if we have insufficient elements.
return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String())
}
switch toString(condt[0]) {
switch toLowerString(condt[0]) {
case "eq", "starts-with":
for _, v := range condt { // Pre-check all values for type.
if !isString(v) {
@@ -131,7 +143,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt)
}
}
operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2])
operator, matchType, value := toLowerString(condt[0]), toLowerString(condt[1]), toString(condt[2])
parsedPolicy.Conditions.Policies[matchType] = struct {
Operator string
Value string
@@ -140,9 +152,19 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
Value: value,
}
case "content-length-range":
min, err := toInteger(condt[1])
if err != nil {
return parsedPolicy, err
}
max, err := toInteger(condt[2])
if err != nil {
return parsedPolicy, err
}
parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{
Min: toInteger(condt[1]),
Max: toInteger(condt[2]),
Min: min,
Max: max,
Valid: true,
}
default:
@@ -158,40 +180,74 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
return parsedPolicy, nil
}
// startWithConds - map which indicates if a given condition supports starts-with policy operator
var startsWithConds = map[string]bool{
"$acl": true,
"$bucket": false,
"$cache-control": true,
"$content-type": true,
"$content-disposition": true,
"$content-encoding": true,
"$expires": true,
"$key": true,
"$success_action_redirect": true,
"$redirect": true,
"$success_action_status": false,
"$x-amz-algorithm": false,
"$x-amz-credential": false,
"$x-amz-date": false,
}
// checkPolicyCond returns a boolean to indicate if a condition is satisified according
// to the passed operator
func checkPolicyCond(op string, input1, input2 string) bool {
switch op {
case "eq":
return input1 == input2
case "starts-with":
return strings.HasPrefix(input1, input2)
}
return false
}
// checkPostPolicy - apply policy conditions and validate input values.
// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html)
func checkPostPolicy(formValues map[string]string, postPolicyForm PostPolicyForm) APIErrorCode {
// Check if policy document expiry date is still not reached
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return ErrPolicyAlreadyExpired
}
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
return ErrAccessDenied
}
}
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
return ErrAccessDenied
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
return ErrAccessDenied
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
return ErrAccessDenied
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Key"], postPolicyForm.Conditions.Policies["$key"].Value) {
return ErrAccessDenied
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
if formValues["Key"] != postPolicyForm.Conditions.Policies["$key"].Value {
// Flag to indicate if all policies conditions are satisfied
condPassed := true
// Iterate over policy conditions and check them against received form fields
for cond, v := range postPolicyForm.Conditions.Policies {
// Form fields names are in canonical format, convert conditions names
// to canonical for simplification purpose, so `$key` will become `Key`
formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(cond, "$"))
// Operator for the current policy condition
op := v.Operator
// If the current policy condition is known
if startsWithSupported, condFound := startsWithConds[cond]; condFound {
// Check if the current condition supports starts-with operator
if op == "starts-with" && !startsWithSupported {
return ErrAccessDenied
}
// Check if current policy condition is satisfied
condPassed = checkPolicyCond(op, formValues[formCanonicalName], v.Value)
} else {
// This covers all conditions X-Amz-Meta-* and X-Amz-*
if strings.HasPrefix(cond, "$x-amz-meta-") || strings.HasPrefix(cond, "$x-amz-") {
// Check if policy condition is satisfied
condPassed = checkPolicyCond(op, formValues[formCanonicalName], v.Value)
}
}
// Check if current policy condition is satisfied, quit immediately otherwise
if !condPassed {
return ErrAccessDenied
}
}
return ErrNone
}

View File

@@ -25,39 +25,53 @@ import (
func TestPostPolicyForm(t *testing.T) {
type testCase struct {
Bucket string
Key string
XAmzDate string
XAmzAlgorithm string
ContentType string
Policy string
ErrCode APIErrorCode
Bucket string
Key string
ACL string
XAmzDate string
XAmzServerSideEncryption string
XAmzAlgorithm string
XAmzCredential string
XAmzMetaUUID string
ContentType string
SuccessActionRedirect string
Policy string
ErrCode APIErrorCode
}
testCases := []testCase{
// Different AMZ date
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDIwMDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Key which doesn't start with user/user1/filename
{Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDIwMDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Everything is fine with this test
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrNone},
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", SuccessActionRedirect: "http://127.0.0.1:9000/", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrNone},
// Expired policy document
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", SuccessActionRedirect: "http://127.0.0.1:9000/", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQo=", ErrCode: ErrPolicyAlreadyExpired},
// Passing AMZ date with starts-with operator which is fobidden
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", SuccessActionRedirect: "http://127.0.0.1:9000/", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMTE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LWRhdGUiLCAiMjAxNjA3MjdUMDAwMDAwWiJdIF0gfQo=", ErrCode: ErrAccessDenied},
// Different AMZ date
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDIwMDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Key which doesn't start with user/user1/filename
{Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDIwMDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Incorrect bucket name.
{Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
{Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Incorrect key name
{Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
{Bucket: "testbucket", Key: "incorrect", ACL: "public-read", XAmzDate: "20160727T000000Z", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Incorrect date
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
// Incorrect ContentType
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", ACL: "public-read", XAmzServerSideEncryption: "AES256", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", Policy: "eyAiZXhwaXJhdGlvbiI6ICIyMDE3LTEyLTMwVDEyOjAwOjAwLjAwMFoiLCAiY29uZGl0aW9ucyI6IFsgeyJidWNrZXQiOiAidGVzdGJ1Y2tldCJ9LCBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS9maWxlbmFtZSJdLCB7ImFjbCI6ICJwdWJsaWMtcmVhZCJ9LCB7InN1Y2Nlc3NfYWN0aW9uX3JlZGlyZWN0IjogImh0dHA6Ly8xMjcuMC4wLjE6OTAwMC8ifSwgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLCBbImNvbnRlbnQtbGVuZ3RoLXJhbmdlIiwgMTA0ODU3OSwgMTA0ODU3NjBdLCB7IngtYW16LW1ldGEtdXVpZCI6ICIxNDM2NTEyMzY1MTI3NCJ9LCB7IngtYW16LXNlcnZlci1zaWRlLWVuY3J5cHRpb24iOiAiQUVTMjU2In0sIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLCB7IngtYW16LWNyZWRlbnRpYWwiOiAiS1ZHS01EVVEyM1RDWlhUTFRITFAvMjAxNjA3MjcvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sIHsieC1hbXotZGF0ZSI6ICIyMDE2MDcyN1QwMDAwMDBaIiB9IF0gfQ==", ErrCode: ErrAccessDenied},
}
// Validate all the test cases.
for i, tt := range testCases {
formValues := make(map[string]string)
formValues["Bucket"] = tt.Bucket
formValues["Acl"] = tt.ACL
formValues["Key"] = tt.Key
formValues["X-Amz-Date"] = tt.XAmzDate
formValues["X-Amz-Meta-Uuid"] = tt.XAmzMetaUUID
formValues["X-Amz-Server-Side-Encryption"] = tt.XAmzServerSideEncryption
formValues["X-Amz-Algorithm"] = tt.XAmzAlgorithm
formValues["X-Amz-Credential"] = tt.XAmzCredential
formValues["Content-Type"] = tt.ContentType
formValues["Policy"] = tt.Policy
formValues["Success_action_redirect"] = tt.SuccessActionRedirect
policyBytes, err := base64.StdEncoding.DecodeString(tt.Policy)
if err != nil {
t.Fatal(err)

View File

@@ -27,7 +27,7 @@ import (
)
// Helper to generate integer sequences into a friendlier user consumable format.
func int2Str(i int, t int) string {
func formatInts(i int, t int) string {
if i < 10 {
if t < 10 {
return fmt.Sprintf("0%d/0%d", i, t)
@@ -111,7 +111,7 @@ func getHealMsg(endpoints []*url.URL, storageDisks []StorageAPI) string {
}
msg += fmt.Sprintf(
"\n[%s] %s - %s %s",
int2Str(i+1, len(storageDisks)),
formatInts(i+1, len(storageDisks)),
endpoints[i],
humanize.IBytes(uint64(info.Total)),
func() string {
@@ -141,7 +141,7 @@ func getStorageInitMsg(titleMsg string, endpoints []*url.URL, storageDisks []Sto
}
msg += fmt.Sprintf(
"\n[%s] %s - %s %s",
int2Str(i+1, len(storageDisks)),
formatInts(i+1, len(storageDisks)),
endpoints[i],
humanize.IBytes(uint64(info.Total)),
func() string {
@@ -178,7 +178,7 @@ func getConfigErrMsg(storageDisks []StorageAPI, sErrs []error) string {
}
msg += fmt.Sprintf(
"\n[%s] %s : %s",
int2Str(i+1, len(storageDisks)),
formatInts(i+1, len(storageDisks)),
storageDisks[i],
sErrs[i],
)

View File

@@ -201,6 +201,13 @@ func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []S
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// prepare getElapsedTime() to calculate elapsed time since we started trying formatting disks.
// All times are rounded to avoid showing milli, micro and nano seconds
formatStartTime := time.Now().Round(time.Second)
getElapsedTime := func() string {
return time.Now().Round(time.Second).Sub(formatStartTime).String()
}
// Wait on the jitter retry loop.
retryTimerCh := newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh)
for {
@@ -213,51 +220,59 @@ func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []S
// for disks not being available.
printRetryMsg(sErrs, storageDisks)
}
// Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
if len(formatConfigs) > 1 {
switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
case Abort:
return errCorruptedFormat
case FormatDisks:
console.Eraseline()
printFormatMsg(endpoints, storageDisks, printOnceFn())
return initFormatXL(storageDisks)
case InitObjectLayer:
console.Eraseline()
// Validate formats load before proceeding forward.
err := genericFormatCheck(formatConfigs, sErrs)
if err == nil {
printRegularMsg(endpoints, storageDisks, printOnceFn())
if len(formatConfigs) == 1 {
err := genericFormatCheckFS(formatConfigs[0], sErrs[0])
if err != nil {
if err == errUnformattedDisk {
return initFormatFS(storageDisks[0])
}
return err
case WaitForHeal:
// Validate formats load before proceeding forward.
err := genericFormatCheck(formatConfigs, sErrs)
if err == nil {
printHealMsg(endpoints, storageDisks, printOnceFn())
}
return err
case WaitForQuorum:
console.Printf(
"Initializing data volume. Waiting for minimum %d servers to come online.\n",
len(storageDisks)/2+1,
)
case WaitForConfig:
// Print configuration errors.
printConfigErrMsg(storageDisks, sErrs, printOnceFn())
case WaitForAll:
console.Println("Initializing data volume for first time. Waiting for other servers to come online")
case WaitForFormatting:
console.Println("Initializing data volume for first time. Waiting for first server to come online")
}
continue
} // else We have FS backend now. Check fs format as well now.
if isFormatFound(formatConfigs) {
return nil
} // Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
// Pre-emptively check if one of the formatted disks
// is invalid. This function returns success for the
// most part unless one of the formats is not consistent
// with expected XL format. For example if a user is trying
// to pool FS backend.
if err := checkFormatXLValues(formatConfigs); err != nil {
return err
}
switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
case Abort:
return errCorruptedFormat
case FormatDisks:
console.Eraseline()
// Validate formats load before proceeding forward.
return genericFormatCheck(formatConfigs, sErrs)
} // else initialize the format for FS.
return initFormatFS(storageDisks[0])
printFormatMsg(endpoints, storageDisks, printOnceFn())
return initFormatXL(storageDisks)
case InitObjectLayer:
console.Eraseline()
// Validate formats loaded before proceeding forward.
err := genericFormatCheckXL(formatConfigs, sErrs)
if err == nil {
printRegularMsg(endpoints, storageDisks, printOnceFn())
}
return err
case WaitForHeal:
// Validate formats loaded before proceeding forward.
err := genericFormatCheckXL(formatConfigs, sErrs)
if err == nil {
printHealMsg(endpoints, storageDisks, printOnceFn())
}
return err
case WaitForQuorum:
console.Printf(
"Initializing data volume. Waiting for minimum %d servers to come online. (elapsed %s)\n",
len(storageDisks)/2+1, getElapsedTime(),
)
case WaitForConfig:
// Print configuration errors.
printConfigErrMsg(storageDisks, sErrs, printOnceFn())
case WaitForAll:
console.Printf("Initializing data volume for first time. Waiting for other servers to come online (elapsed %s)\n", getElapsedTime())
case WaitForFormatting:
console.Printf("Initializing data volume for first time. Waiting for first server to come online (elapsed %s)\n", getElapsedTime())
}
case <-globalServiceDoneCh:
return errors.New("Initializing data volumes gracefully stopped")
}

View File

@@ -75,6 +75,30 @@ func newObjectLayer(storageDisks []StorageAPI) (ObjectLayer, error) {
return objAPI, nil
}
// Composed function registering routers for only distributed XL setup.
func registerDistXLRouters(mux *router.Router, srvCmdConfig serverCmdConfig) error {
// Register storage rpc router only if its a distributed setup.
err := registerStorageRPCRouters(mux, srvCmdConfig)
if err != nil {
return err
}
// Register distributed namespace lock.
err = registerDistNSLockRouter(mux, srvCmdConfig)
if err != nil {
return err
}
// Register S3 peer communication router.
err = registerS3PeerRPCRouter(mux)
if err != nil {
return err
}
// Register RPC router for web related calls.
return registerBrowserPeerRPCRouter(mux)
}
// configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) (http.Handler, error) {
// Initialize router. `SkipClean(true)` stops gorilla/mux from
@@ -83,32 +107,14 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) (http.Handler, error)
// Initialize distributed NS lock.
if globalIsDistXL {
// Register storage rpc router only if its a distributed setup.
err := registerStorageRPCRouters(mux, srvCmdConfig)
if err != nil {
registerDistXLRouters(mux, srvCmdConfig)
}
// Register web router when its enabled.
if globalIsBrowserEnabled {
if err := registerWebRouter(mux); err != nil {
return nil, err
}
// Register distributed namespace lock.
err = registerDistNSLockRouter(mux, srvCmdConfig)
if err != nil {
return nil, err
}
}
// Register S3 peer communication router.
err := registerS3PeerRPCRouter(mux)
if err != nil {
return nil, err
}
// Register RPC router for web related calls.
if err = registerBrowserPeerRPCRouter(mux); err != nil {
return nil, err
}
if err = registerWebRouter(mux); err != nil {
return nil, err
}
// Add API router.

View File

@@ -71,8 +71,8 @@ func makeS3Peers(eps []*url.URL) s3Peers {
}
ret = append(ret, s3Peer{
ep.Host,
&remoteBucketMetaState{newAuthClient(&cfg)},
addr: ep.Host,
bmsClient: &remoteBucketMetaState{newAuthClient(&cfg)},
})
seenAddr[ep.Host] = true
}
@@ -108,7 +108,7 @@ func (s3p s3Peers) GetPeerClient(peer string) BucketMetaState {
// The updates are sent via a type implementing the BucketMetaState
// interface. This makes sure that the local node is directly updated,
// and remote nodes are updated via RPC calls.
func (s3p s3Peers) SendUpdate(peerIndex []int, args interface{}) []error {
func (s3p s3Peers) SendUpdate(peerIndex []int, args BucketUpdater) []error {
// peer error array
errs := make([]error, len(s3p))
@@ -119,27 +119,7 @@ func (s3p s3Peers) SendUpdate(peerIndex []int, args interface{}) []error {
// Function that sends update to peer at `index`
sendUpdateToPeer := func(index int) {
defer wg.Done()
var err error
// Get BMS client for peer at `index`. The index is
// already checked for being within array bounds.
client := s3p[index].bmsClient
// Make the appropriate bucket metadata update
// according to the argument type
switch v := args.(type) {
case *SetBucketNotificationPeerArgs:
err = client.UpdateBucketNotification(v)
case *SetBucketListenerPeerArgs:
err = client.UpdateBucketListener(v)
case *SetBucketPolicyPeerArgs:
err = client.UpdateBucketPolicy(v)
default:
err = fmt.Errorf("Unknown arg in BucketMetaState updater - %v", args)
}
errs[index] = err
errs[index] = args.BucketUpdate(s3p[index].bmsClient)
}
// Special (but common) case of peerIndex == nil, implies send

View File

@@ -27,11 +27,13 @@ const (
)
type s3PeerAPIHandlers struct {
loginServer
bms BucketMetaState
}
func registerS3PeerRPCRouter(mux *router.Router) error {
s3PeerHandlers := &s3PeerAPIHandlers{
loginServer{},
&localBucketMetaState{
ObjectAPI: newObjectLayerFn,
},

View File

@@ -16,26 +16,6 @@
package cmd
import "time"
func (s3 *s3PeerAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential())
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.ServerVersion = Version
reply.Timestamp = time.Now().UTC()
return nil
}
// SetBucketNotificationPeerArgs - Arguments collection to SetBucketNotificationPeer RPC
// call
type SetBucketNotificationPeerArgs struct {
@@ -48,6 +28,13 @@ type SetBucketNotificationPeerArgs struct {
NCfg *notificationConfig
}
// BucketUpdate - implements bucket notification updates,
// the underlying operation is a network call updates all
// the peers participating in bucket notification.
func (s *SetBucketNotificationPeerArgs) BucketUpdate(client BucketMetaState) error {
return client.UpdateBucketNotification(s)
}
func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *GenericReply) error {
// check auth
if !isRPCTokenValid(args.Token) {
@@ -68,6 +55,13 @@ type SetBucketListenerPeerArgs struct {
LCfg []listenerConfig
}
// BucketUpdate - implements bucket listener updates,
// the underlying operation is a network call updates all
// the peers participating in listen bucket notification.
func (s *SetBucketListenerPeerArgs) BucketUpdate(client BucketMetaState) error {
return client.UpdateBucketListener(s)
}
func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *GenericReply) error {
// check auth
if !isRPCTokenValid(args.Token) {
@@ -110,6 +104,13 @@ type SetBucketPolicyPeerArgs struct {
PChBytes []byte
}
// BucketUpdate - implements bucket policy updates,
// the underlying operation is a network call updates all
// the peers participating for new set/unset policies.
func (s *SetBucketPolicyPeerArgs) BucketUpdate(client BucketMetaState) error {
return client.UpdateBucketPolicy(s)
}
// tell receiving server to update a bucket policy
func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *GenericReply) error {
// check auth

View File

@@ -55,8 +55,11 @@ FLAGS:
{{end}}
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of 5 to 20 characters in length.
MINIO_SECRET_KEY: Password or secret key of 8 to 40 characters in length.
MINIO_ACCESS_KEY: Custom username or access key of 5 to 20 characters in length.
MINIO_SECRET_KEY: Custom password or secret key of 8 to 40 characters in length.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio server on "/home/shared" directory.
@@ -363,8 +366,13 @@ func serverMain(c *cli.Context) {
cli.ShowCommandHelpAndExit(c, "server", 1)
}
// Set global quiet flag.
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
// Set global variables after parsing passed arguments
setGlobalsFromContext(c)
// Initialization routine, such as config loading, enable logging, ..
minioInit()
checkUpdate()
// Server address.
serverAddr := c.String("address")

View File

@@ -20,6 +20,7 @@ import (
"bufio"
"crypto/tls"
"errors"
"io"
"net"
"net/http"
"net/url"
@@ -75,7 +76,9 @@ func NewConnMux(c net.Conn) *ConnMux {
func (c *ConnMux) PeekProtocol() string {
buf, err := c.bufrw.Peek(maxHTTPVerbLen)
if err != nil {
errorIf(err, "Unable to peek into the protocol")
if err != io.EOF {
errorIf(err, "Unable to peek into the protocol")
}
return "http"
}
for _, m := range defaultHTTP1Methods {

View File

@@ -66,18 +66,18 @@ func setMaxMemory() error {
// Validate if rlimit memory is set to lower
// than max cache size. Then we should use such value.
if uint64(rLimit.Cur) < globalMaxCacheSize {
globalMaxCacheSize = (80 / 100) * uint64(rLimit.Cur)
globalMaxCacheSize = uint64(float64(50*rLimit.Cur) / 100)
}
// Make sure globalMaxCacheSize is less than RAM size.
stats, err := sys.GetStats()
if err != nil && err != sys.ErrNotImplemented {
// sys.GetStats() is implemented only on linux. Ignore errors
// from other OSes.
return err
}
if err == nil && stats.TotalRAM < globalMaxCacheSize {
globalMaxCacheSize = (80 / 100) * stats.TotalRAM
// If TotalRAM is >= minRAMSize we proceed to enable cache.
// cache is always 50% of the totalRAM.
if err == nil && stats.TotalRAM >= minRAMSize {
globalMaxCacheSize = uint64(float64(50*stats.TotalRAM) / 100)
}
return nil
}

View File

@@ -18,6 +18,8 @@
package cmd
import "github.com/minio/minio/pkg/sys"
func setMaxOpenFiles() error {
// Golang uses Win32 file API (CreateFile, WriteFile, ReadFile,
// CloseHandle, etc.), then you don't have a limit on open files
@@ -26,6 +28,15 @@ func setMaxOpenFiles() error {
}
func setMaxMemory() error {
// TODO: explore if Win32 API's provide anything special here.
// Make sure globalMaxCacheSize is less than RAM size.
stats, err := sys.GetStats()
if err != nil && err != sys.ErrNotImplemented {
return err
}
// If TotalRAM is <= minRAMSize we proceed to enable cache.
// cache is always 50% of the totalRAM.
if err == nil && stats.TotalRAM >= minRAMSize {
globalMaxCacheSize = uint64(float64(50*stats.TotalRAM) / 100)
}
return nil
}

View File

@@ -37,6 +37,7 @@ const (
// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
// Whitelist resource list that will be used in query string for signature-V2 calculation.
// The list should be alphabetically sorted
var resourceList = []string{
"acl",
"delete",
@@ -47,6 +48,12 @@ var resourceList = []string{
"partNumber",
"policy",
"requestPayment",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
"response-content-language",
"response-content-type",
"response-expires",
"torrent",
"uploadId",
"uploads",

View File

@@ -17,6 +17,7 @@
package cmd
import (
"bytes"
"io"
"net"
"net/rpc"
@@ -366,6 +367,13 @@ func (n *networkStorage) ReadFile(volume string, path string, offset int64, buff
}
}()
defer func() {
if r := recover(); r != nil {
// Recover any panic from allocation, and return error.
err = bytes.ErrTooLarge
}
}() // Do not crash the server.
// Take remote disk offline if the total network errors.
// are more than maximum allowable IO error limit.
if n.networkIOErrCount > maxAllowedNetworkIOError {
@@ -377,10 +385,12 @@ func (n *networkStorage) ReadFile(volume string, path string, offset int64, buff
Vol: volume,
Path: path,
Offset: offset,
Size: len(buffer),
Buffer: buffer,
}, &result)
// Copy results to buffer.
copy(buffer, result)
// Return length of result, err if any.
return int64(len(result)), toStorageErr(err)
}

View File

@@ -57,8 +57,8 @@ type ReadFileArgs struct {
// Starting offset to start reading into Buffer.
Offset int64
// Data size read from the path at offset.
Size int
// Data buffer read from the path at offset.
Buffer []byte
}
// PrepareFileArgs represents append file RPC arguments.

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"io"
"net/rpc"
"path"
@@ -30,32 +29,12 @@ import (
// Storage server implements rpc primitives to facilitate exporting a
// disk over a network.
type storageServer struct {
loginServer
storage StorageAPI
path string
timestamp time.Time
}
/// Auth operations
// Login - login handler.
func (s *storageServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential())
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = time.Now().UTC()
reply.ServerVersion = Version
return nil
}
/// Storage operations handlers.
// DiskInfoHandler - disk info handler is rpc wrapper for DiskInfo operation.
@@ -156,19 +135,12 @@ func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error
// ReadFileHandler - read file handler is rpc wrapper to read file.
func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err error) {
defer func() {
if r := recover(); r != nil {
// Recover any panic and return ErrCacheFull.
err = bytes.ErrTooLarge
}
}() // Do not crash the server.
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
// Allocate the requested buffer from the client.
*reply = make([]byte, args.Size)
var n int64
n, err = s.storage.ReadFile(args.Vol, args.Path, args.Offset, *reply)
n, err = s.storage.ReadFile(args.Vol, args.Path, args.Offset, args.Buffer)
// Sending an error over the rpc layer, would cause unmarshalling to fail. In situations
// when we have short read i.e `io.ErrUnexpectedEOF` treat it as good condition and copy
// the buffer properly.
@@ -176,7 +148,7 @@ func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err
// Reset to nil as good condition.
err = nil
}
*reply = (*reply)[0:n]
*reply = args.Buffer[0:n]
return err
}

View File

@@ -62,6 +62,9 @@ func init() {
// Disable printing console messages during tests.
color.Output = ioutil.Discard
// Enable caching.
setMaxMemory()
}
func prepareFS() (ObjectLayer, string, error) {
@@ -1921,6 +1924,12 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) {
configPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatal("Failed to create config directory", err)
}
defer removeAll(configPath)
nDisks := 16
erasureDisks, err := getRandomDisks(nDisks)
if err != nil {

View File

@@ -265,8 +265,14 @@ func getReleaseUpdate(updateURL string, duration time.Duration) (updateMsg updat
// main entry point for update command.
func mainUpdate(ctx *cli.Context) {
// Set global quiet flag.
if ctx.Bool("quiet") || ctx.GlobalBool("quiet") {
// Set global variables after parsing passed arguments
setGlobalsFromContext(ctx)
// Initialization routine, such as config loading, enable logging, ..
minioInit()
if globalQuiet {
return
}

View File

@@ -43,8 +43,14 @@ func mainVersion(ctx *cli.Context) {
if len(ctx.Args()) != 0 {
cli.ShowCommandHelpAndExit(ctx, "version", 1)
}
// Set global quiet flag.
if ctx.Bool("quiet") || ctx.GlobalBool("quiet") {
// Set global variables after parsing passed arguments
setGlobalsFromContext(ctx)
// Initialization routine, such as config loading, enable logging, ..
minioInit()
if globalQuiet {
return
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -148,6 +147,9 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
if !isJWTReqAuthenticated(r) {
return toJSONError(errAuthentication)
}
bucketLock := globalNSMutex.NewNSLock(args.BucketName, "")
bucketLock.Lock()
defer bucketLock.Unlock()
if err := objectAPI.MakeBucket(args.BucketName); err != nil {
return toJSONError(err, args.BucketName)
}
@@ -272,6 +274,11 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
if !isJWTReqAuthenticated(r) {
return toJSONError(errAuthentication)
}
objectLock := globalNSMutex.NewNSLock(args.BucketName, args.ObjectName)
objectLock.Lock()
defer objectLock.Unlock()
if err := objectAPI.DeleteObject(args.BucketName, args.ObjectName); err != nil {
if isErrObjectNotFound(err) {
// Ignore object not found error.
@@ -478,16 +485,15 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
// Extract incoming metadata if any.
metadata := extractMetadataFromHeader(r.Header)
sha256sum := ""
if _, err := objectAPI.PutObject(bucket, object, -1, r.Body, metadata, sha256sum); err != nil {
writeWebErrorResponse(w, err)
return
}
// Lock the object.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
// Fetch object info for notifications.
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
sha256sum := ""
objInfo, err := objectAPI.PutObject(bucket, object, -1, r.Body, metadata, sha256sum)
if err != nil {
errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object))
writeWebErrorResponse(w, err)
return
}
@@ -534,6 +540,11 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
// Add content disposition.
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
// Lock the object before reading.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil {
writeWebErrorResponse(w, err)
@@ -691,16 +702,8 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
return toJSONError(err)
}
// Parse bucket policy.
var policy = &bucketPolicy{}
err = parseBucketPolicy(bytes.NewReader(data), policy)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
return toJSONError(err, args.BucketName)
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(args.BucketName, policy); s3Error != ErrNone {
// Parse validate and save bucket policy.
if s3Error := parseAndPersistBucketPolicy(args.BucketName, data, objectAPI); s3Error != ErrNone {
apiErr := getAPIError(s3Error)
var err error
if apiErr.Code == "XMinioPolicyNesting" {
@@ -710,12 +713,6 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
}
return toJSONError(err, args.BucketName)
}
// TODO: update policy statements according to bucket name,
// prefix and policy arguments.
if err := persistAndNotifyBucketPolicyChange(args.BucketName, policyChange{false, policy}, objectAPI); err != nil {
return toJSONError(err, args.BucketName)
}
reply.UIVersion = miniobrowser.UIVersion
return nil
}
@@ -808,8 +805,7 @@ func toJSONError(err error, params ...string) (jerr *json2.Error) {
case "InvalidBucketName":
if len(params) > 0 {
jerr = &json2.Error{
Message: fmt.Sprintf("Bucket Name %s is invalid. Lowercase letters, period and numerals are the only allowed characters.",
params[0]),
Message: fmt.Sprintf("Bucket Name %s is invalid. Lowercase letters, period and numerals are the only allowed characters.", params[0]),
}
}
// Bucket not found custom error message.

View File

@@ -36,10 +36,6 @@ func (xl xlObjects) MakeBucket(bucket string) error {
return traceError(BucketNameInvalid{Bucket: bucket})
}
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Initialize sync waitgroup.
var wg = &sync.WaitGroup{}
@@ -146,19 +142,6 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
return BucketInfo{}, err
}
// Checks whether bucket exists.
func (xl xlObjects) isBucketExist(bucket string) bool {
// Check whether bucket exists.
_, err := xl.getBucketInfo(bucket)
if err != nil {
if err == errVolumeNotFound {
return false
}
return false
}
return true
}
// GetBucketInfo - returns BucketInfo for a bucket.
func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
// Verify if bucket is valid.
@@ -166,10 +149,6 @@ func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
}
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock.RLock()
defer bucketLock.RUnlock()
bucketInfo, err := xl.getBucketInfo(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
@@ -240,10 +219,6 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
return BucketNameInvalid{Bucket: bucket}
}
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Collect if all disks report volume not found.
var wg = &sync.WaitGroup{}
var dErrs = make([]error, len(xl.storageDisks))

View File

@@ -28,10 +28,10 @@ func healFormatXL(storageDisks []StorageAPI) (err error) {
// Attempt to load all `format.json`.
formatConfigs, sErrs := loadAllFormats(storageDisks)
// Generic format check validates
// if (no quorum) return error
// if (disks not recognized) // Always error.
if err = genericFormatCheck(formatConfigs, sErrs); err != nil {
// Generic format check.
// - if (no quorum) return error
// - if (disks not recognized) // Always error.
if err = genericFormatCheckXL(formatConfigs, sErrs); err != nil {
return err
}
@@ -58,14 +58,8 @@ func healFormatXL(storageDisks []StorageAPI) (err error) {
// also heals the missing entries for bucket metadata files
// `policy.json, notification.xml, listeners.json`.
func (xl xlObjects) HealBucket(bucket string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket exists.
if !xl.isBucketExist(bucket) {
return traceError(BucketNotFound{Bucket: bucket})
if err := checkBucketExist(bucket, xl); err != nil {
return err
}
// Heal bucket.
@@ -79,7 +73,7 @@ func (xl xlObjects) HealBucket(bucket string) error {
// Heal bucket - create buckets on disks where it does not exist.
func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error {
bucketLock := nsMutex.NewNSLock(bucket, "")
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
@@ -132,7 +126,7 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
// heals `policy.json`, `notification.xml` and `listeners.json`.
func healBucketMetadata(storageDisks []StorageAPI, bucket string, readQuorum int) error {
healBucketMetaFn := func(metaPath string) error {
metaLock := nsMutex.NewNSLock(minioMetaBucket, metaPath)
metaLock := globalNSMutex.NewNSLock(minioMetaBucket, metaPath)
metaLock.RLock()
defer metaLock.RUnlock()
// Heals the given file at metaPath.
@@ -347,18 +341,12 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum
// and later the disk comes back up again, heal on the object
// should delete it.
func (xl xlObjects) HealObject(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkGetObjArgs(bucket, object); err != nil {
return err
}
// Lock the object before healing.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()

View File

@@ -144,7 +144,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
}
// Check if the current object needs healing
objectLock := nsMutex.NewNSLock(bucket, objInfo.Name)
objectLock := globalNSMutex.NewNSLock(bucket, objInfo.Name)
objectLock.RLock()
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, bucket, objInfo.Name)
if xlShouldHeal(partsMetadata, errs) {
@@ -162,31 +162,8 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
// ListObjects - list all objects at prefix, delimited by '/'.
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket exists.
if !xl.isBucketExist(bucket) {
return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if marker != "" {
if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
}
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
return ListObjectsInfo{}, err
}
// With max keys of zero we have reached eof, return right here.

View File

@@ -0,0 +1,139 @@
/*
* Minio Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"strconv"
"testing"
)
// TestListObjectsHeal - Tests ListObjectsHeal API for XL
func TestListObjectsHeal(t *testing.T) {
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
// Create an instance of xl backend
xl, fsDirs, err := prepareXL()
if err != nil {
t.Fatal(err)
}
// Cleanup backend directories
defer removeRoots(fsDirs)
bucketName := "bucket"
objName := "obj"
// Create test bucket
err = xl.MakeBucket(bucketName)
if err != nil {
t.Fatal(err)
}
// Put 500 objects under sane dir
for i := 0; i < 500; i++ {
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
}
// Put 500 objects under unsane/subdir dir
for i := 0; i < 500; i++ {
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
}
// Structure for testing
type testData struct {
bucket string
object string
marker string
delimiter string
maxKeys int
expectedErr error
foundObjs int
}
// Generic function for testing ListObjectsHeal, needs testData as a parameter
testFunc := func(testCase testData, testRank int) {
objectsNeedHeal, foundErr := xl.ListObjectsHeal(testCase.bucket, testCase.object, testCase.marker, testCase.delimiter, testCase.maxKeys)
if testCase.expectedErr == nil && foundErr != nil {
t.Fatalf("Test %d: Expected nil error, found: %v", testRank, foundErr)
}
if testCase.expectedErr != nil && foundErr.Error() != testCase.expectedErr.Error() {
t.Fatalf("Test %d: Found unexpected error: %v, expected: %v", testRank, foundErr, testCase.expectedErr)
}
if len(objectsNeedHeal.Objects) != testCase.foundObjs {
t.Fatalf("Test %d: Found unexpected number of objects: %d, expected: %v", testRank, len(objectsNeedHeal.Objects), testCase.foundObjs)
}
}
// Start tests
testCases := []testData{
// Wrong bucket name
{"foobucket", "", "", "", 1000, BucketNotFound{Bucket: "foobucket"}, 0},
// Inexistent object
{bucketName, "inexistentObj", "", "", 1000, nil, 0},
// Test ListObjectsHeal when all objects are sane
{bucketName, "", "", "", 1000, nil, 0},
}
for i, testCase := range testCases {
testFunc(testCase, i+1)
}
// Test ListObjectsHeal when all objects under unsane need healing
xlObj := xl.(*xlObjects)
for i := 0; i < 500; i++ {
if err = xlObj.storageDisks[0].DeleteFile(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i)+"/xl.json"); err != nil {
t.Fatal(err)
}
}
// Start tests again with some objects that need healing
testCases = []testData{
// Test ListObjectsHeal when all objects under unsane/ need to be healed
{bucketName, "", "", "", 1000, nil, 500},
// List objects heal under unsane/, should return all elements
{bucketName, "unsane/", "", "", 1000, nil, 500},
// List healing objects under sane/, should return 0
{bucketName, "sane/", "", "", 1000, nil, 0},
// Max Keys == 200
{bucketName, "unsane/", "", "", 200, nil, 200},
// Max key > 1000
{bucketName, "unsane/", "", "", 5000, nil, 500},
// Prefix == Delimiter == "/"
{bucketName, "/", "", "/", 5000, nil, 0},
// Max Keys == 0
{bucketName, "", "", "", 0, nil, 0},
// Testing with marker parameter
{bucketName, "", "unsane/subdir/" + objName + "0", "", 1000, nil, 499},
}
for i, testCase := range testCases {
testFunc(testCase, i+1)
}
}

View File

@@ -100,31 +100,8 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
// ListObjects - list all objects at prefix, delimited by '/'.
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket exists.
if !xl.isBucketExist(bucket) {
return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if marker != "" {
if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
}
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
return ListObjectsInfo{}, err
}
// With max keys of zero we have reached eof, return right here.

View File

@@ -29,7 +29,6 @@ import (
"time"
"github.com/minio/minio/pkg/mimedb"
"github.com/skyrings/skyring-common/tools/uuid"
)
// listMultipartUploads - lists all multipart uploads.
@@ -65,7 +64,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// uploadIDMarker first.
if uploadIDMarker != "" {
// hold lock on keyMarker path
keyMarkerLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
keyMarkerLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, keyMarker))
keyMarkerLock.RLock()
for _, disk := range xl.getLoadBalancedDisks() {
@@ -135,7 +134,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// For the new object entry we get all its
// pending uploadIDs.
entryLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
entryLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, entry))
entryLock.RLock()
var disk StorageAPI
@@ -210,48 +209,10 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
result := ListMultipartsInfo{}
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
return ListMultipartsInfo{}, err
}
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListMultipartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
if !xl.isBucketExist(bucket) {
return ListMultipartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListMultipartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListMultipartsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
return ListMultipartsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: keyMarker,
Prefix: prefix,
})
}
if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) {
return result, traceError(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return result, traceError(err)
}
if id.IsZero() {
return result, traceError(MalformedUploadID{
UploadID: uploadIDMarker,
})
}
}
return xl.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
}
@@ -281,7 +242,7 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
// This lock needs to be held for any changes to the directory
// contents of ".minio.sys/multipart/object/"
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object))
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
@@ -319,17 +280,8 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
//
// Implements S3 compatible initiate multipart API.
func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
// Verify if bucket name is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
// Verify if object name is valid.
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkNewMultipartArgs(bucket, object, xl); err != nil {
return "", err
}
// No metadata is set, allocate a new one.
if meta == nil {
@@ -344,16 +296,8 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
return "", err
}
var partsMetadata []xlMetaV1
@@ -361,7 +305,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
uploadIDPath := pathJoin(bucket, object, uploadID)
// pre-check upload id lock.
preUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
preUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
preUploadIDLock.RLock()
// Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
@@ -470,7 +414,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
// post-upload check (write) lock
postUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
postUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
postUploadIDLock.Lock()
defer postUploadIDLock.Unlock()
@@ -607,21 +551,13 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListPartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return ListPartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return ListPartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkListPartsArgs(bucket, object, xl); err != nil {
return ListPartsInfo{}, err
}
// Hold lock so that there is no competing
// abort-multipart-upload or complete-multipart-upload.
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
uploadIDLock.Lock()
defer uploadIDLock.Unlock()
@@ -640,19 +576,8 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
//
// Implements S3 compatible Complete multipart API.
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return "", traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", traceError(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil {
return "", err
}
// Hold lock so that
@@ -661,7 +586,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
//
// 2) no one does a parallel complete-multipart-upload on this
// multipart upload
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
uploadIDLock.Lock()
defer uploadIDLock.Unlock()
@@ -779,21 +704,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
// Hold write lock on the destination before rename.
destLock := nsMutex.NewNSLock(bucket, object)
destLock.Lock()
defer func() {
// A new complete multipart upload invalidates any
// previously cached object in memory.
xl.objCache.Delete(path.Join(bucket, object))
if xl.objCacheEnabled {
// A new complete multipart upload invalidates any
// previously cached object in memory.
xl.objCache.Delete(path.Join(bucket, object))
// This lock also protects the cache namespace.
destLock.Unlock()
// Prefetch the object from disk by triggering a fake GetObject call
// Unlike a regular single PutObject, multipart PutObject is comes in
// stages and it is harder to cache.
go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard)
// Prefetch the object from disk by triggering a fake GetObject call
// Unlike a regular single PutObject, multipart PutObject is comes in
// stages and it is harder to cache.
go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard)
}
}()
// Rename if an object already exists to temporary location.
@@ -832,7 +753,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Hold the lock so that two parallel
// complete-multipart-uploads do not leave a stale
// uploads.json behind.
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object))
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
@@ -858,7 +779,7 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e
// hold lock so we don't compete with a complete, or abort
// multipart request.
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object))
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
@@ -884,20 +805,13 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e
// that this is an atomic idempotent operation. Subsequent calls have
// no affect and further requests to the same uploadID would not be honored.
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
if !xl.isBucketExist(bucket) {
return traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkAbortMultipartArgs(bucket, object, xl); err != nil {
return err
}
// Hold lock so that there is no competing
// complete-multipart-upload or put-object-part.
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket,
uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
uploadIDLock.Lock()
defer uploadIDLock.Unlock()

View File

@@ -50,13 +50,8 @@ var objectOpIgnoredErrs = []error{
// object to be read at. length indicates the total length of the
// object requested by client.
func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
if err := checkGetObjArgs(bucket, object); err != nil {
return err
}
// Start offset and length cannot be negative.
if startOffset < 0 || length < 0 {
@@ -67,11 +62,6 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
return traceError(errUnexpected)
}
// Lock the object before reading.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(xl.storageDisks, bucket, object)
// Do we have read quorum?
@@ -223,18 +213,9 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, BucketNameInvalid{Bucket: bucket}
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return ObjectInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object}
}
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.RLock()
defer objectLock.RUnlock()
info, err := xl.getObjectInfo(bucket, object)
if err != nil {
@@ -365,19 +346,8 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
// writes `xl.json` which carries the necessary metadata for future
// object operations.
func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify bucket exists.
if !xl.isBucketExist(bucket) {
return ObjectInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
if !IsValidObjectName(object) {
return ObjectInfo{}, traceError(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
if err = checkPutObjectArgs(bucket, object, xl); err != nil {
return ObjectInfo{}, err
}
// No metadata is set, allocate a new one.
if metadata == nil {
@@ -506,11 +476,6 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
}
// Lock the object.
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
@@ -623,17 +588,9 @@ func (xl xlObjects) deleteObject(bucket, object string) error {
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
if err = checkDelObjArgs(bucket, object); err != nil {
return err
}
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
objectLock := nsMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
// Validate object exists.
if !xl.isObject(bucket, object) {
@@ -646,8 +603,10 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
return toObjectErr(err, bucket, object)
}
// Delete from the cache.
xl.objCache.Delete(pathJoin(bucket, object))
if xl.objCacheEnabled {
// Delete from the cache.
xl.objCache.Delete(pathJoin(bucket, object))
}
// Success.
return nil

View File

@@ -19,6 +19,7 @@ package cmd
import (
"fmt"
"os"
"runtime/debug"
"sort"
"strings"
"sync"
@@ -42,8 +43,9 @@ const (
// Uploads metadata file carries per multipart object metadata.
uploadsJSONFile = "uploads.json"
// 8GiB cache by default.
maxCacheSize = 8 * humanize.GiByte
// Represents the minimum required RAM size before
// we enable caching.
minRAMSize = 8 * humanize.GiByte
// Maximum erasure blocks.
maxErasureBlocks = 16
@@ -92,9 +94,6 @@ func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) {
// Calculate data and parity blocks.
dataBlocks, parityBlocks := len(newStorageDisks)/2, len(newStorageDisks)/2
// Initialize object cache.
objCache := objcache.New(globalMaxCacheSize, globalCacheExpiry)
// Initialize list pool.
listPool := newTreeWalkPool(globalLookupTimeout)
@@ -103,13 +102,25 @@ func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) {
// Initialize xl objects.
xl := &xlObjects{
mutex: &sync.Mutex{},
storageDisks: newStorageDisks,
dataBlocks: dataBlocks,
parityBlocks: parityBlocks,
listPool: listPool,
objCache: objCache,
objCacheEnabled: !objCacheDisabled,
mutex: &sync.Mutex{},
storageDisks: newStorageDisks,
dataBlocks: dataBlocks,
parityBlocks: parityBlocks,
listPool: listPool,
}
// Object cache is enabled when _MINIO_CACHE env is missing.
// and cache size is > 0.
xl.objCacheEnabled = !objCacheDisabled && globalMaxCacheSize > 0
// Check if object cache is enabled.
if xl.objCacheEnabled {
// Initialize object cache.
objCache := objcache.New(globalMaxCacheSize, globalCacheExpiry)
objCache.OnEviction = func(key string) {
debug.FreeOSMemory()
}
xl.objCache = objCache
}
// Initialize meta volume, if volume already exists ignores it.

View File

@@ -16,7 +16,7 @@ A stand-alone Minio server would go down if the server hosting the disks goes of
For example, an 8-node distributed Minio setup, with 1 disk per node would stay put, even if upto 4 nodes are offline. But, you'll need atleast 5 nodes online to create new objects.
## Limitations
### Limits
As with Minio in stand-alone mode, distributed Minio has a per tenant limit of minimum 4 and maximum 16 drives (imposed by erasure code). This helps maintain simplicity and yet remain scalable. If you need a multiple tenant setup, you can easily spin multiple Minio instances managed by orchestration tools like Kubernetes.

View File

@@ -39,9 +39,7 @@ docker run -p 9000:9000 --name minio1 \
## 4. Test Distributed Minio on Docker
Currently Minio distributed version is under testing. We do not recommend using it in production.
This example shows how to run 4 node Minio cluster inside different docker containers using [docker-compose](https://docs.docker.com/compose/). Please download [docker-compose.yml](https://raw.githubusercontent.com/minio/minio/master/docs/docker/docker-compose.yml) to your current working directory, docker-compose pulls the Minio Docker image with label ``edge``.
This example shows how to run 4 node Minio cluster inside different docker containers using [docker-compose](https://docs.docker.com/compose/). Please download [docker-compose.yml](https://raw.githubusercontent.com/minio/minio/master/docs/docker/docker-compose.yml) to your current working directory, docker-compose pulls the Minio Docker image.
### Run `docker-compose`

View File

@@ -5,7 +5,7 @@ version: '2'
# 9001 through 9004.
services:
minio1:
image: minio/minio:edge
image: minio/minio
ports:
- "9001:9000"
environment:
@@ -13,7 +13,7 @@ services:
MINIO_SECRET_KEY: minio123
command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport
minio2:
image: minio/minio:edge
image: minio/minio
ports:
- "9002:9000"
environment:
@@ -21,7 +21,7 @@ services:
MINIO_SECRET_KEY: minio123
command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport
minio3:
image: minio/minio:edge
image: minio/minio
ports:
- "9003:9000"
environment:
@@ -29,7 +29,7 @@ services:
MINIO_SECRET_KEY: minio123
command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport
minio4:
image: minio/minio:edge
image: minio/minio
ports:
- "9004:9000"
environment:

View File

@@ -0,0 +1,50 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package objcache implements in memory caching methods.
package objcache
// Used for adding entry to the object cache.
// Implements io.WriteCloser
type cappedWriter struct {
offset int64
cap int64
buffer []byte
onClose func() error
}
// Write implements a limited writer, returns error.
// if the writes go beyond allocated size.
func (c *cappedWriter) Write(b []byte) (n int, err error) {
if c.offset+int64(len(b)) > c.cap {
return 0, ErrExcessData
}
n = copy(c.buffer[int(c.offset):int(c.offset)+len(b)], b)
c.offset = c.offset + int64(n)
return n, nil
}
// Reset relinquishes the allocated underlying buffer.
func (c *cappedWriter) Reset() {
c.buffer = nil
}
// On close, onClose() is called which checks if all object contents
// have been written so that it can save the buffer to the cache.
func (c cappedWriter) Close() (err error) {
return c.onClose()
}

View File

@@ -22,6 +22,7 @@ import (
"bytes"
"errors"
"io"
"runtime/debug"
"sync"
"time"
)
@@ -32,6 +33,13 @@ var NoExpiry = time.Duration(0)
// DefaultExpiry represents default time duration value when individual entries will be expired.
var DefaultExpiry = time.Duration(72 * time.Hour) // 72hrs.
// DefaultBufferRatio represents default ratio used to calculate the
// individual cache entry buffer size.
var DefaultBufferRatio = uint64(10)
// DefaultGCPercent represents default garbage collection target percentage.
var DefaultGCPercent = 20
// buffer represents the in memory cache of a single entry.
// buffer carries value of the data and last accessed time.
type buffer struct {
@@ -46,9 +54,16 @@ type Cache struct {
// read/write requests for cache
mutex sync.Mutex
// Once is used for resetting GC once after
// peak cache usage.
onceGC sync.Once
// maxSize is a total size for overall cache
maxSize uint64
// maxCacheEntrySize is a total size per key buffer.
maxCacheEntrySize uint64
// currentSize is a current size in memory
currentSize uint64
@@ -68,24 +83,58 @@ type Cache struct {
stopGC chan struct{}
}
// New - Return a new cache with a given default expiry duration.
// If the expiry duration is less than one (or NoExpiry),
// the items in the cache never expire (by default), and must be deleted
// manually.
// New - Return a new cache with a given default expiry
// duration. If the expiry duration is less than one
// (or NoExpiry), the items in the cache never expire
// (by default), and must be deleted manually.
func New(maxSize uint64, expiry time.Duration) *Cache {
C := &Cache{
maxSize: maxSize,
entries: make(map[string]*buffer),
expiry: expiry,
if maxSize == 0 {
panic("objcache: setting maximum cache size to zero is forbidden.")
}
// A garbage collection is triggered when the ratio
// of freshly allocated data to live data remaining
// after the previous collection reaches this percentage.
//
// - https://golang.org/pkg/runtime/debug/#SetGCPercent
//
// This means that by default GC is triggered after
// we've allocated an extra amount of memory proportional
// to the amount already in use.
//
// If gcpercent=100 and we're using 4M, we'll gc again
// when we get to 8M.
//
// Set this value to 20% if caching is enabled.
debug.SetGCPercent(DefaultGCPercent)
// Max cache entry size - indicates the
// maximum buffer per key that can be held in
// memory. Currently this value is 1/10th
// the size of requested cache size.
maxCacheEntrySize := func() uint64 {
i := maxSize / DefaultBufferRatio
if i == 0 {
i = maxSize
}
return i
}()
c := &Cache{
onceGC: sync.Once{},
maxSize: maxSize,
maxCacheEntrySize: maxCacheEntrySize,
entries: make(map[string]*buffer),
expiry: expiry,
}
// We have expiry start the janitor routine.
if expiry > 0 {
C.stopGC = make(chan struct{})
// Initialize a new stop GC channel.
c.stopGC = make(chan struct{})
// Start garbage collection routine to expire objects.
C.startGC()
c.StartGC()
}
return C
return c
}
// ErrKeyNotFoundInCache - key not found in cache.
@@ -97,18 +146,6 @@ var ErrCacheFull = errors.New("Not enough space in cache")
// ErrExcessData - excess data was attempted to be written on cache.
var ErrExcessData = errors.New("Attempted excess write on cache")
// Used for adding entry to the object cache. Implements io.WriteCloser
type cacheBuffer struct {
*bytes.Buffer // Implements io.Writer
onClose func() error
}
// On close, onClose() is called which checks if all object contents
// have been written so that it can save the buffer to the cache.
func (c cacheBuffer) Close() (err error) {
return c.onClose()
}
// Create - validates if object size fits with in cache size limit and returns a io.WriteCloser
// to which object contents can be written and finally Close()'d. During Close() we
// checks if the amount of data written is equal to the size of the object, in which
@@ -123,29 +160,46 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
}() // Do not crash the server.
valueLen := uint64(size)
// Check if the size of the object is not bigger than the capacity of the cache.
if c.maxSize > 0 && valueLen > c.maxSize {
// Check if the size of the object is > 1/10th the size
// of the cache, if yes then we ignore it.
if valueLen > c.maxCacheEntrySize {
return nil, ErrCacheFull
}
// Will hold the object contents.
buf := bytes.NewBuffer(make([]byte, 0, size))
// Check if the incoming size is going to exceed
// the effective cache size, if yes return error
// instead.
c.mutex.Lock()
if c.currentSize+valueLen > c.maxSize {
c.mutex.Unlock()
return nil, ErrCacheFull
}
// Change GC percent if the current cache usage
// is already 75% of the maximum allowed usage.
if c.currentSize > (75 * c.maxSize / 100) {
c.onceGC.Do(func() { debug.SetGCPercent(DefaultGCPercent - 10) })
}
c.mutex.Unlock()
cbuf := &cappedWriter{
offset: 0,
cap: size,
buffer: make([]byte, size),
}
// Function called on close which saves the object contents
// to the object cache.
onClose := func() error {
c.mutex.Lock()
defer c.mutex.Unlock()
if size != int64(buf.Len()) {
if size != cbuf.offset {
cbuf.Reset() // Reset resets the buffer to be empty.
// Full object not available hence do not save buf to object cache.
return io.ErrShortBuffer
}
if c.maxSize > 0 && c.currentSize+valueLen > c.maxSize {
return ErrExcessData
}
// Full object available in buf, save it to cache.
c.entries[key] = &buffer{
value: buf.Bytes(),
value: cbuf.buffer,
lastAccessed: time.Now().UTC(), // Save last accessed time.
}
// Account for the memory allocated above.
@@ -153,12 +207,10 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
return nil
}
// Object contents that is written - cacheBuffer.Write(data)
// Object contents that is written - cappedWriter.Write(data)
// will be accumulated in buf which implements io.Writer.
return cacheBuffer{
buf,
onClose,
}, nil
cbuf.onClose = onClose
return cbuf, nil
}
// Open - open the in-memory file, returns an in memory read seeker.
@@ -212,17 +264,17 @@ func (c *Cache) gc() {
// StopGC sends a message to the expiry routine to stop
// expiring cached entries. NOTE: once this is called, cached
// entries will not be expired if the consumer has called this.
// entries will not be expired, be careful if you are using this.
func (c *Cache) StopGC() {
if c.stopGC != nil {
c.stopGC <- struct{}{}
}
}
// startGC starts running a routine ticking at expiry interval, on each interval
// this routine does a sweep across the cache entries and garbage collects all the
// expired entries.
func (c *Cache) startGC() {
// StartGC starts running a routine ticking at expiry interval,
// on each interval this routine does a sweep across the cache
// entries and garbage collects all the expired entries.
func (c *Cache) StartGC() {
go func() {
for {
select {
@@ -239,9 +291,10 @@ func (c *Cache) startGC() {
// Deletes a requested entry from the cache.
func (c *Cache) delete(key string) {
if buf, ok := c.entries[key]; ok {
if _, ok := c.entries[key]; ok {
deletedSize := uint64(len(c.entries[key].value))
delete(c.entries, key)
c.currentSize -= uint64(len(buf.value))
c.currentSize -= deletedSize
c.totalEvicted++
}
}

View File

@@ -117,6 +117,12 @@ func TestObjCache(t *testing.T) {
cacheSize: 5,
closeErr: ErrExcessData,
},
// Validate error excess data during write.
{
expiry: NoExpiry,
cacheSize: 2048,
err: ErrExcessData,
},
}
// Test 1 validating Open failure.
@@ -232,14 +238,30 @@ func TestObjCache(t *testing.T) {
if err = w.Close(); err != nil {
t.Errorf("Test case 7 expected to pass, failed instead %s", err)
}
w, err = cache.Create("test2", 1)
if err != nil {
_, err = cache.Create("test2", 1)
if err != ErrCacheFull {
t.Errorf("Test case 7 expected to pass, failed instead %s", err)
}
// Write '1' byte.
w.Write([]byte("H"))
if err = w.Close(); err != testCase.closeErr {
t.Errorf("Test case 7 expected to fail, passed instead")
// Test 8 validates rejecting Writes which write excess data.
testCase = testCases[7]
cache = New(testCase.cacheSize, testCase.expiry)
w, err = cache.Create("test1", 5)
if err != nil {
t.Errorf("Test case 8 expected to pass, failed instead %s", err)
}
// Write '5' bytes.
n, err := w.Write([]byte("Hello"))
if err != nil {
t.Errorf("Test case 8 expected to pass, failed instead %s", err)
}
if n != 5 {
t.Errorf("Test case 8 expected 5 bytes written, instead found %d", n)
}
// Write '1' more byte, should return error.
n, err = w.Write([]byte("W"))
if n == 0 && err != testCase.err {
t.Errorf("Test case 8 expected to fail with ErrExcessData, but failed with %s instead", err)
}
}

View File

@@ -16,7 +16,7 @@ This package was developed for the distributed server version of [Minio Object S
For [minio](https://minio.io/) the distributed version is started as follows (for a 6-server system):
```
$ minio server server1/disk server2/disk server3/disk server4/disk server5/disk server6/disk
$ minio server server1:/disk server2:/disk server3:/disk server4:/disk server5:/disk server6:/disk
```
_(note that the same identical command should be run on servers `server1` through to `server6`)_

View File

@@ -154,7 +154,7 @@ func (dm *DRWMutex) lockBlocking(isReadLock bool) {
//
func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool {
// Create buffered channel of quorum size
// Create buffered channel of size equal to total number of nodes.
ch := make(chan Granted, dnodeCount)
for index, c := range clnts {
@@ -216,6 +216,8 @@ func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool {
// We know that we are not going to get the lock anymore, so exit out
// and release any locks that did get acquired
done = true
// Increment the number of grants received from the buffered channel.
i++
releaseAll(clnts, locks, lockName, isReadLock)
}
}

View File

@@ -34,6 +34,7 @@ var ownNode int
// Simple majority based quorum, set to dNodeCount/2+1
var dquorum int
// Simple quorum for read operations, set to dNodeCount/2
var dquorumReads int
@@ -59,7 +60,7 @@ func SetNodesWithClients(rpcClnts []RPC, rpcOwnNode int) (err error) {
dnodeCount = len(rpcClnts)
dquorum = dnodeCount/2 + 1
dquorumReads = dnodeCount/2
dquorumReads = dnodeCount / 2
// Initialize node name and rpc path for each RPCClient object.
clnts = make([]RPC, dnodeCount)
copy(clnts, rpcClnts)

6
vendor/vendor.json vendored
View File

@@ -111,10 +111,10 @@
"revisionTime": "2015-11-18T20:00:48-08:00"
},
{
"checksumSHA1": "UWpLeW+oLfe/MiphMckp1HqKrW0=",
"checksumSHA1": "ddMyebkzU3xB7K8dAhM1S+Mflmo=",
"path": "github.com/minio/dsync",
"revision": "fcea3bf5533c1b8a5af3cb377d30363782d2532d",
"revisionTime": "2016-10-15T15:40:54Z"
"revision": "dd0da3743e6668b03559c2905cc661bc0fceeae3",
"revisionTime": "2016-11-28T22:07:34Z"
},
{
"path": "github.com/minio/go-homedir",