Merge branch 'master' into release

This commit is contained in:
Minio Trusted
2017-07-24 11:22:55 -07:00
518 changed files with 129081 additions and 4131 deletions

View File

@@ -1,23 +1,26 @@
FROM alpine:3.5
MAINTAINER Minio Inc <dev@minio.io>
COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps curl && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && apk del .build-deps
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
EXPOSE 9000
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
CMD ["minio"]

View File

@@ -2,21 +2,24 @@ FROM resin/aarch64-alpine:3.5
MAINTAINER Minio Inc <dev@minio.io>
COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps curl && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.minio.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && apk del .build-deps
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
EXPOSE 9000
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
CMD ["minio"]

View File

@@ -2,21 +2,24 @@ FROM resin/armhf-alpine:3.5
MAINTAINER Minio Inc <dev@minio.io>
COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates && \
apk add --no-cache --virtual .build-deps curl && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.minio.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && apk del .build-deps
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/healthcheck.sh
EXPOSE 9000
COPY buildscripts/docker-entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/export"]
HEALTHCHECK --interval=30s --timeout=5s \
CMD /usr/bin/healthcheck.sh
CMD ["minio"]

View File

@@ -1,59 +1,10 @@
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
PWD := $(shell pwd)
GOPATH := $(shell go env GOPATH)
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
BUILD_LDFLAGS := '$(LDFLAGS)'
TAG := latest
HOST ?= $(shell uname)
CPU ?= $(shell uname -m)
# if no host is identifed (no uname tool)
# we assume a Linux-64bit build
ifeq ($(HOST),)
HOST = Linux
endif
# identify CPU
ifeq ($(CPU), x86_64)
HOST := $(HOST)64
else
ifeq ($(CPU), amd64)
HOST := $(HOST)64
else
ifeq ($(CPU), i686)
HOST := $(HOST)32
endif
endif
endif
#############################################
# now we find out the target OS for
# which we are going to compile in case
# the caller didn't yet define OS himself
ifndef (OS)
ifeq ($(HOST), Linux64)
arch = gcc
else
ifeq ($(HOST), Linux32)
arch = 32
else
ifeq ($(HOST), Darwin64)
arch = clang
else
ifeq ($(HOST), Darwin32)
arch = clang
else
ifeq ($(HOST), FreeBSD64)
arch = gcc
endif
endif
endif
endif
endif
endif
all: install
all: build
checks:
@echo "Check deps"
@@ -68,7 +19,7 @@ getdeps: checks
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
verifiers: vet fmt lint cyclo spelling
verifiers: getdeps vet fmt lint cyclo spelling
vet:
@echo "Running $@"
@@ -94,8 +45,6 @@ cyclo:
@${GOPATH}/bin/gocyclo -over 100 cmd
@${GOPATH}/bin/gocyclo -over 100 pkg
build: getdeps verifiers $(UI_ASSETS)
deadcode:
@${GOPATH}/bin/deadcode
@@ -104,7 +53,9 @@ spelling:
@${GOPATH}/bin/misspell -error `find pkg/`
@${GOPATH}/bin/misspell -error `find docs/`
test: build
# Builds minio, runs the verifiers then runs the tests.
check: test
test: verifiers build
@echo "Running all minio testing"
@go test $(GOFLAGS) .
@go test $(GOFLAGS) github.com/minio/minio/cmd...
@@ -114,9 +65,10 @@ coverage: build
@echo "Running all coverage for minio"
@./buildscripts/go-coverage.sh
gomake-all: build
@echo "Installing minio at $(GOPATH)/bin/minio"
@go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio
# Builds minio locally.
build:
@echo "Building minio to $(PWD)/minio ..."
@CGO_ENABLED=0 go build --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
pkg-add:
@echo "Adding new package $(PKG)"
@@ -133,7 +85,11 @@ pkg-remove:
pkg-list:
@$(GOPATH)/bin/govendor list
install: gomake-all
# Builds minio and installs it to $GOPATH/bin.
install: build
@echo "Installing minio at $(GOPATH)/bin/minio ..."
@cp $(PWD)/minio $(GOPATH)/bin/minio
@echo "Check 'minio -h' for help."
release: verifiers
@MINIO_RELEASE=RELEASE ./buildscripts/build.sh

View File

@@ -27,20 +27,13 @@ brew install minio/stable/minio
minio server ~/Photos
```
#### Note
If you previously installed minio using `brew install minio` then uninstall minio as shown below
If you previously installed minio using `brew install minio` then reinstall minio from `minio/stable/minio` official repo. Homebrew builds are unstable due to golang 1.8 bugs.
```
brew uninstall minio
```
Then re-install the latest minio using:
```
brew install minio/stable/minio
```
>`brew install minio` and `brew upgrade minio` will no longer install/upgrade the latest minio binaries on macOS. Upstream bugs in golang 1.8 broke Minio brew installer. Use the updated `minio/stable/minio` in your brew paths.
### Binary Download
| Platform| Architecture | URL|
| ----------| -------- | ------|

View File

@@ -24,6 +24,8 @@ install:
# To run your custom scripts instead of automatic MSBuild
build_script:
# Compile
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- appveyor AddCompilationMessage "Starting Compile"
- cd c:\gopath\src\github.com\minio\minio
- go run buildscripts/gen-ldflags.go > temp.txt

View File

@@ -370,8 +370,24 @@ export default class Browse extends React.Component {
}
handleExpireValue(targetInput, inc, object) {
inc === -1 ? this.refs[targetInput].stepDown(1) : this.refs[targetInput].stepUp(1)
let value = this.refs[targetInput].value
let maxValue = (targetInput == 'expireHours') ? 23 : (targetInput == 'expireMins') ? 59 : (targetInput == 'expireDays') ? 7 : 0
value = isNaN(value) ? 0 : value
// Use custom step count to support browser Edge
if((inc === -1)) {
if(value != 0) {
value--
}
}
else {
if(value != maxValue) {
value++
}
}
this.refs[targetInput].value = value
// Reset hours and mins when days reaches it's max value
if (this.refs.expireDays.value == 7) {
this.refs.expireHours.value = 0
this.refs.expireMins.value = 0
@@ -379,6 +395,7 @@ export default class Browse extends React.Component {
if (this.refs.expireDays.value + this.refs.expireHours.value + this.refs.expireMins.value == 0) {
this.refs.expireDays.value = 7
}
const {dispatch} = this.props
dispatch(actions.shareObject(object, this.refs.expireDays.value, this.refs.expireHours.value, this.refs.expireMins.value))
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -90,9 +90,18 @@ main() {
go_build ${each_osarch}
done
else
for each_osarch in $(echo $chosen_osarch | sed 's/,/ /g'); do
go_build ${each_osarch}
local found=0
for each_osarch in ${SUPPORTED_OSARCH}; do
if [ "$chosen_osarch" = "$each_osarch" ]; then
found=1
fi
done
if [ ${found} -eq 1 ]; then
go_build ${chosen_osarch}
else
echo "Unknown architecture \"${chosen_osarch}\""
exit 1
fi
fi
}

View File

@@ -0,0 +1,32 @@
#!/bin/sh
#
# Minio Cloud Storage, (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_init () {
address="http://127.0.0.1:9000"
resource="/minio/index.html"
}
HealthCheckMain () {
# Get the http response code
http_response=$(curl -s -o /dev/null -I -w "%{http_code}" ${address}${resource})
# If http_repsonse is 200 - server is up.
# When MINIO_BROWSER is set to off, curl responds with 404. We assume that the the server is up
[ "$http_response" == "200" ] || [ "$http_response" == "404" ]
}
_init && HealthCheckMain

View File

@@ -113,15 +113,15 @@ func (rc remoteAdminClient) ReInitDisks() error {
}
// ServerInfoData - Returns the server info of this server.
func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) {
func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
if globalBootTime.IsZero() {
return ServerInfoData{}, errServerNotInitialized
return sid, errServerNotInitialized
}
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
return ServerInfoData{}, errServerNotInitialized
return sid, errServerNotInitialized
}
storage := objLayer.StorageInfo()
@@ -145,12 +145,12 @@ func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) {
}
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rc remoteAdminClient) ServerInfoData() (ServerInfoData, error) {
func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
args := AuthRPCArgs{}
reply := ServerInfoDataReply{}
err := rc.Call(serverInfoDataRPC, &args, &reply)
if err != nil {
return ServerInfoData{}, err
return sid, err
}
return reply.ServerInfoData, nil
@@ -493,7 +493,7 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
// getValidServerConfig - finds the server config that is present in
// quorum or more number of servers.
func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (serverConfigV13, error) {
func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (scv serverConfigV13, e error) {
// majority-based quorum
quorum := len(serverConfigs)/2 + 1
@@ -566,7 +566,7 @@ func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (server
// If quorum nodes don't agree.
if maxOccurrence < quorum {
return serverConfigV13{}, errXLWriteQuorum
return scv, errXLWriteQuorum
}
return configJSON, nil

View File

@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io/ioutil"
"net/rpc"
"os"
"path/filepath"
"time"
@@ -236,7 +235,7 @@ func (s *adminCmd) CommitConfig(cArgs *CommitConfigArgs, cReply *CommitConfigRep
// stop and restart commands.
func registerAdminRPCRouter(mux *router.Router) error {
adminRPCHandler := &adminCmd{}
adminRPCServer := rpc.NewServer()
adminRPCServer := newRPCServer()
err := adminRPCServer.RegisterName("Admin", adminRPCHandler)
if err != nil {
return traceError(err)

View File

@@ -115,6 +115,7 @@ const (
ErrBucketAlreadyOwnedByYou
ErrInvalidDuration
ErrNotSupported
ErrBucketAlreadyExists
// Add new error codes here.
// Bucket notification related errors.
@@ -317,7 +318,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
},
ErrInvalidPart: {
Code: "InvalidPart",
Description: "One or more of the specified parts could not be found.",
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartOrder: {
@@ -355,6 +356,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "The bucket you tried to delete is not empty",
HTTPStatusCode: http.StatusConflict,
},
ErrBucketAlreadyExists: {
Code: "BucketAlreadyExists",
Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",
HTTPStatusCode: http.StatusConflict,
},
ErrAllAccessDisabled: {
Code: "AllAccessDisabled",
Description: "All access to this bucket has been disabled.",
@@ -663,6 +669,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrStorageFull
case BadDigest:
apiErr = ErrBadDigest
case AllAccessDisabled:
apiErr = ErrAllAccessDisabled
case IncompleteBody:
apiErr = ErrIncompleteBody
case ObjectExistsAsDirectory:
@@ -677,6 +685,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrBucketAlreadyOwnedByYou
case BucketNotEmpty:
apiErr = ErrBucketNotEmpty
case BucketAlreadyExists:
apiErr = ErrBucketAlreadyExists
case BucketExists:
apiErr = ErrBucketAlreadyOwnedByYou
case ObjectNotFound:
@@ -701,6 +711,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrNoSuchUpload
case PartTooSmall:
apiErr = ErrEntityTooSmall
case SignatureDoesNotMatch:
apiErr = ErrSignatureDoesNotMatch
case SHA256Mismatch:
apiErr = ErrContentSHA256Mismatch
case ObjectTooLarge:
@@ -713,6 +725,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrNotImplemented
case PolicyNotFound:
apiErr = ErrNoSuchBucketPolicy
case PartTooBig:
apiErr = ErrEntityTooLarge
default:
apiErr = ErrInternalError
}

View File

@@ -36,7 +36,11 @@ func setCommonHeaders(w http.ResponseWriter) {
// Set unique request ID for each reply.
w.Header().Set(responseRequestIDKey, mustGetRequestID(UTCNow()))
w.Header().Set("Server", globalServerUserAgent)
w.Header().Set("X-Amz-Bucket-Region", serverConfig.GetRegion())
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := serverConfig.GetRegion(); region != "" {
w.Header().Set("X-Amz-Bucket-Region", region)
}
w.Header().Set("Accept-Ranges", "bytes")
}

View File

@@ -288,8 +288,6 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
var owner = Owner{}
owner.ID = globalMinioDefaultOwnerID
owner.DisplayName = globalMinioDefaultOwnerID
for _, bucket := range buckets {
var listbucket = Bucket{}
listbucket.Name = bucket.Name
@@ -312,8 +310,6 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
var data = ListObjectsResponse{}
owner.ID = globalMinioDefaultOwnerID
owner.DisplayName = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = Object{}
if object.Name == "" {
@@ -352,18 +348,17 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter string, fetchOwner bool, maxKeys int, resp ListObjectsInfo) ListObjectsV2Response {
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
var contents []Object
var prefixes []CommonPrefix
var commonPrefixes []CommonPrefix
var owner = Owner{}
var data = ListObjectsV2Response{}
if fetchOwner {
owner.ID = globalMinioDefaultOwnerID
owner.DisplayName = globalMinioDefaultOwnerID
}
for _, object := range resp.Objects {
for _, object := range objects {
var content = Object{}
if object.Name == "" {
continue
@@ -387,14 +382,14 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter
data.Prefix = prefix
data.MaxKeys = maxKeys
data.ContinuationToken = token
data.NextContinuationToken = resp.NextMarker
data.IsTruncated = resp.IsTruncated
for _, prefix := range resp.Prefixes {
data.NextContinuationToken = nextToken
data.IsTruncated = isTruncated
for _, prefix := range prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = prefix
prefixes = append(prefixes, prefixItem)
commonPrefixes = append(commonPrefixes, prefixItem)
}
data.CommonPrefixes = prefixes
data.CommonPrefixes = commonPrefixes
data.KeyCount = len(data.Contents) + len(data.CommonPrefixes)
return data
}
@@ -443,9 +438,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Initiator.DisplayName = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.DisplayName = globalMinioDefaultOwnerID
listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker

View File

@@ -77,7 +77,7 @@ func TestLogin(t *testing.T) {
// Invalid password length
{
args: LoginRPCArgs{
Username: globalMinioDefaultOwnerID,
Username: "minio",
Password: "aaa",
Version: Version,
},

View File

@@ -17,8 +17,6 @@
package cmd
import (
"net/rpc"
router "github.com/gorilla/mux"
)
@@ -39,7 +37,7 @@ type browserPeerAPIHandlers struct {
func registerBrowserPeerRPCRouter(mux *router.Router) error {
bpHandlers := &browserPeerAPIHandlers{}
bpRPCServer := rpc.NewServer()
bpRPCServer := newRPCServer()
err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers)
if err != nil {
return traceError(err)

View File

@@ -100,7 +100,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
return
}
response := generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter, fetchOwner, maxKeys, listObjectsInfo)
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsInfo.NextMarker, startAfter, delimiter, fetchOwner, listObjectsInfo.IsTruncated, maxKeys, listObjectsInfo.Objects, listObjectsInfo.Prefixes)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))

View File

@@ -525,7 +525,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
// Extract metadata to be saved from received Form.
metadata := extractMetadataFromForm(formValues)
metadata, err := extractMetadataFromHeader(formValues)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
sha256sum := ""
objectLock := globalNSMutex.NewNSLock(bucket, object)
@@ -648,12 +653,21 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
// Delete bucket access policy, if present - ignore any errors.
_ = removeBucketPolicy(bucket, objectAPI)
// Notify all peers (including self) to update in-memory state
S3PeersUpdateBucketPolicy(bucket, policyChange{true, nil})
// Delete notification config, if present - ignore any errors.
_ = removeNotificationConfig(bucket, objectAPI)
// Notify all peers (including self) to update in-memory state
S3PeersUpdateBucketNotification(bucket, nil)
// Delete listener config, if present - ignore any errors.
_ = removeListenerConfig(bucket, objectAPI)
// Notify all peers (including self) to update in-memory state
S3PeersUpdateBucketListener(bucket, []listenerConfig{})
// Write success response.
writeSuccessNoContent(w)
}

View File

@@ -204,6 +204,15 @@ func writeNotification(w http.ResponseWriter, notification map[string][]Notifica
if err != nil {
return err
}
// https://github.com/containous/traefik/issues/560
// https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events
//
// Proxies might buffer the connection to avoid this we
// need the proper MIME type before writing to client.
// This MIME header tells the proxies to avoid buffering
w.Header().Set("Content-Type", "text/event-stream")
// Add additional CRLF characters for client to
// differentiate the individual events properly.
_, err = w.Write(append(notificationBytes, crlf...))

View File

@@ -185,7 +185,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName
filterRules := []filterRule{
{
Name: "prefix",
Value: globalMinioDefaultOwnerID,
Value: "minio",
},
{
Name: "suffix",

View File

@@ -144,6 +144,9 @@ func isValidQueueID(queueARN string) bool {
if isAMQPQueue(sqsARN) { // AMQP eueue.
amqpN := serverConfig.Notify.GetAMQPByID(sqsARN.AccountID)
return amqpN.Enable && amqpN.URL != ""
} else if isMQTTQueue(sqsARN) {
mqttN := serverConfig.Notify.GetMQTTByID(sqsARN.AccountID)
return mqttN.Enable && mqttN.Broker != ""
} else if isNATSQueue(sqsARN) {
natsN := serverConfig.Notify.GetNATSByID(sqsARN.AccountID)
return natsN.Enable && natsN.Address != ""
@@ -251,6 +254,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
// Unmarshals input value of AWS ARN format into minioSqs object.
// Returned value represents minio sqs types, currently supported are
// - amqp
// - mqtt
// - nats
// - elasticsearch
// - redis
@@ -273,6 +277,8 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
switch sqsType {
case queueTypeAMQP:
mSqs.Type = queueTypeAMQP
case queueTypeMQTT:
mSqs.Type = queueTypeMQTT
case queueTypeNATS:
mSqs.Type = queueTypeNATS
case queueTypeElastic:

View File

@@ -358,6 +358,11 @@ func TestUnmarshalSQSARN(t *testing.T) {
queueARN: "arn:minio:sqs:us-east-1:1:amqp",
Type: "amqp",
},
// Valid mqtt queue arn.
{
queueARN: "arn:minio:sqs:us-east-1:1:mqtt",
Type: "mqtt",
},
// Invalid empty queue arn.
{
queueARN: "",

View File

@@ -17,6 +17,7 @@
package cmd
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
@@ -24,35 +25,34 @@ import (
"path/filepath"
)
func parsePublicCertFile(certFile string) (certs []*x509.Certificate, err error) {
var bytes []byte
if bytes, err = ioutil.ReadFile(certFile); err != nil {
return certs, err
func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err error) {
// Read certificate file.
var data []byte
if data, err = ioutil.ReadFile(certFile); err != nil {
return nil, err
}
// Parse all certs in the chain.
var block *pem.Block
var cert *x509.Certificate
current := bytes
current := data
for len(current) > 0 {
if block, current = pem.Decode(current); block == nil {
err = fmt.Errorf("Could not read PEM block from file %s", certFile)
return certs, err
var pemBlock *pem.Block
if pemBlock, current = pem.Decode(current); pemBlock == nil {
return nil, fmt.Errorf("Could not read PEM block from file %s", certFile)
}
if cert, err = x509.ParseCertificate(block.Bytes); err != nil {
return certs, err
var x509Cert *x509.Certificate
if x509Cert, err = x509.ParseCertificate(pemBlock.Bytes); err != nil {
return nil, err
}
certs = append(certs, cert)
x509Certs = append(x509Certs, x509Cert)
}
if len(certs) == 0 {
err = fmt.Errorf("Empty public certificate file %s", certFile)
if len(x509Certs) == 0 {
return nil, fmt.Errorf("Empty public certificate file %s", certFile)
}
return certs, err
return x509Certs, nil
}
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
@@ -81,7 +81,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
for _, caFile := range caFiles {
caCert, err := ioutil.ReadFile(caFile)
if err != nil {
return rootCAs, err
return nil, err
}
rootCAs.AppendCertsFromPEM(caCert)
@@ -90,19 +90,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
return rootCAs, nil
}
func getSSLConfig() (publicCerts []*x509.Certificate, rootCAs *x509.CertPool, secureConn bool, err error) {
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, tlsCert *tls.Certificate, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return publicCerts, rootCAs, secureConn, err
return nil, nil, nil, false, nil
}
if publicCerts, err = parsePublicCertFile(getPublicCertFile()); err != nil {
return publicCerts, rootCAs, secureConn, err
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
return nil, nil, nil, false, err
}
var cert tls.Certificate
if cert, err = tls.LoadX509KeyPair(getPublicCertFile(), getPrivateKeyFile()); err != nil {
return nil, nil, nil, false, err
}
tlsCert = &cert
if rootCAs, err = getRootCAs(getCADir()); err != nil {
return publicCerts, rootCAs, secureConn, err
return nil, nil, nil, false, err
}
secureConn = true
return publicCerts, rootCAs, secureConn, err
return x509Certs, rootCAs, tlsCert, secureConn, nil
}

117
cmd/common-main.go Normal file
View File

@@ -0,0 +1,117 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"os"
"path/filepath"
"strings"
"time"
"github.com/minio/cli"
)
// Check for updates and print a notification message
func checkUpdate(mode string) {
// Its OK to ignore any errors during getUpdateInfo() here.
if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil {
if updateMsg := computeUpdateMessage(downloadURL, older); updateMsg != "" {
log.Println(updateMsg)
}
}
}
func enableLoggers() {
fileLogTarget := serverConfig.Logger.GetFile()
if fileLogTarget.Enable {
err := InitFileLogger(&fileLogTarget)
fatalIf(err, "Unable to initialize file logger")
log.AddTarget(fileLogTarget)
}
consoleLogTarget := serverConfig.Logger.GetConsole()
if consoleLogTarget.Enable {
InitConsoleLogger(&consoleLogTarget)
}
log.SetConsoleTarget(consoleLogTarget)
}
func initConfig() {
// Config file does not exist, we create it fresh and return upon success.
if isFile(getConfigFile()) {
fatalIf(migrateConfig(), "Config migration failed.")
fatalIf(loadConfig(), "Unable to load config version: '%s'.", v19)
} else {
fatalIf(newConfig(), "Unable to initialize minio config for the first time.")
log.Println("Created minio configuration file successfully at " + getConfigDir())
}
}
func handleCommonCmdArgs(ctx *cli.Context) {
// Set configuration directory.
{
// Get configuration directory from command line argument.
configDir := ctx.String("config-dir")
if !ctx.IsSet("config-dir") && ctx.GlobalIsSet("config-dir") {
configDir = ctx.GlobalString("config-dir")
}
if configDir == "" {
fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.")
}
// Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir)
fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs)
}
}
func handleCommonEnvVars() {
// Start profiler if env is set.
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
globalProfiler = startProfiler(profiler)
}
// Check if object cache is disabled.
globalXLObjCacheDisabled = strings.EqualFold(os.Getenv("_MINIO_CACHE"), "off")
accessKey := os.Getenv("MINIO_ACCESS_KEY")
secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" {
cred, err := createCredential(accessKey, secretKey)
fatalIf(err, "Invalid access/secret Key set in environment.")
// credential Envs are set globally.
globalIsEnvCreds = true
globalActiveCred = cred
}
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBrowserFlag(browser)
if err != nil {
fatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
}
// browser Envs are set globally, this does not represent
// if browser is turned off or on.
globalIsEnvBrowser = true
globalIsBrowserEnabled = bool(browserFlag)
}
}

View File

@@ -141,7 +141,13 @@ func migrateConfig() error {
return err
}
fallthrough
case v18:
case "18":
// Migrate version '17' to '18'.
if err = migrateV18ToV19(); err != nil {
return err
}
fallthrough
case v19:
// No migration needed. this always points to current version.
err = nil
}
@@ -1366,3 +1372,109 @@ func migrateV17ToV18() error {
log.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version)
return nil
}
func migrateV18ToV19() error {
configFile := getConfigFile()
cv18 := &serverConfigV18{}
_, err := quick.Load(configFile, cv18)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config version 18. %v", err)
}
if cv18.Version != "18" {
return nil
}
// Copy over fields from V18 into V19 config struct
srvConfig := &serverConfigV18{
Logger: &loggers{},
Notify: &notifier{},
}
srvConfig.Version = "19"
srvConfig.Credential = cv18.Credential
srvConfig.Region = cv18.Region
if srvConfig.Region == "" {
// Region needs to be set for AWS Signature Version 4.
srvConfig.Region = globalMinioDefaultRegion
}
srvConfig.Logger.Console = cv18.Logger.Console
srvConfig.Logger.File = cv18.Logger.File
// check and set notifiers config
if len(cv18.Notify.AMQP) == 0 {
srvConfig.Notify.AMQP = make(map[string]amqpNotify)
srvConfig.Notify.AMQP["1"] = amqpNotify{}
} else {
// New deliveryMode parameter is added for AMQP,
// default value is already 0, so nothing to
// explicitly migrate here.
srvConfig.Notify.AMQP = cv18.Notify.AMQP
}
if len(cv18.Notify.ElasticSearch) == 0 {
srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{
Format: formatNamespace,
}
} else {
srvConfig.Notify.ElasticSearch = cv18.Notify.ElasticSearch
}
if len(cv18.Notify.Redis) == 0 {
srvConfig.Notify.Redis = make(map[string]redisNotify)
srvConfig.Notify.Redis["1"] = redisNotify{
Format: formatNamespace,
}
} else {
srvConfig.Notify.Redis = cv18.Notify.Redis
}
if len(cv18.Notify.PostgreSQL) == 0 {
srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{
Format: formatNamespace,
}
} else {
srvConfig.Notify.PostgreSQL = cv18.Notify.PostgreSQL
}
if len(cv18.Notify.Kafka) == 0 {
srvConfig.Notify.Kafka = make(map[string]kafkaNotify)
srvConfig.Notify.Kafka["1"] = kafkaNotify{}
} else {
srvConfig.Notify.Kafka = cv18.Notify.Kafka
}
if len(cv18.Notify.NATS) == 0 {
srvConfig.Notify.NATS = make(map[string]natsNotify)
srvConfig.Notify.NATS["1"] = natsNotify{}
} else {
srvConfig.Notify.NATS = cv18.Notify.NATS
}
if len(cv18.Notify.Webhook) == 0 {
srvConfig.Notify.Webhook = make(map[string]webhookNotify)
srvConfig.Notify.Webhook["1"] = webhookNotify{}
} else {
srvConfig.Notify.Webhook = cv18.Notify.Webhook
}
if len(cv18.Notify.MySQL) == 0 {
srvConfig.Notify.MySQL = make(map[string]mySQLNotify)
srvConfig.Notify.MySQL["1"] = mySQLNotify{
Format: formatNamespace,
}
} else {
srvConfig.Notify.MySQL = cv18.Notify.MySQL
}
// V18 will not have mqtt support, so we add that here.
srvConfig.Notify.MQTT = make(map[string]mqttNotify)
srvConfig.Notify.MQTT["1"] = mqttNotify{}
// Load browser config from existing config in the file.
srvConfig.Browser = cv18.Browser
if err = quick.Save(configFile, srvConfig); err != nil {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv18.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version)
return nil
}

View File

@@ -122,11 +122,14 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
if err := migrateV17ToV18(); err != nil {
t.Fatal("migrate v17 to v18 should succeed when no config file is found")
}
if err := migrateV18ToV19(); err != nil {
t.Fatal("migrate v18 to v19 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v18 is successfully done
func TestServerConfigMigrateV2toV18(t *testing.T) {
// Test if a config migration from v2 to v19 is successfully done
func TestServerConfigMigrateV2toV19(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
@@ -166,7 +169,7 @@ func TestServerConfigMigrateV2toV18(t *testing.T) {
}
// Check the version number in the upgraded config file
expectedVersion := v18
expectedVersion := v19
if serverConfig.Version != expectedVersion {
t.Fatalf("Expect version "+expectedVersion+", found: %v", serverConfig.Version)
}
@@ -246,6 +249,9 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV17ToV18(); err == nil {
t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json")
}
if err := migrateV18ToV19(); err == nil {
t.Fatal("migrateConfigV18ToV19() should fail with a corrupted json")
}
}
// Test if all migrate code returns error with corrupted config files

View File

@@ -449,3 +449,22 @@ type serverConfigV17 struct {
// Notification queue configuration.
Notify *notifier `json:"notify"`
}
// serverConfigV18 server configuration version '18' which is like
// version '17' except it adds support for "deliveryMode" parameter in
// the AMQP notification target.
type serverConfigV18 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential credential `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifier `json:"notify"`
}

View File

@@ -27,18 +27,17 @@ import (
)
// Config version
const v18 = "18"
const v19 = "19"
var (
// serverConfig server config.
serverConfig *serverConfigV18
serverConfig *serverConfigV19
serverConfigMu sync.RWMutex
)
// serverConfigV18 server configuration version '18' which is like
// version '17' except it adds support for "deliveryMode" parameter in
// the AMQP notification target.
type serverConfigV18 struct {
// serverConfigV19 server configuration version '19' which is like
// version '18' except it adds support for MQTT notifications.
type serverConfigV19 struct {
sync.RWMutex
Version string `json:"version"`
@@ -55,7 +54,7 @@ type serverConfigV18 struct {
}
// GetVersion get current config version.
func (s *serverConfigV18) GetVersion() string {
func (s *serverConfigV19) GetVersion() string {
s.RLock()
defer s.RUnlock()
@@ -63,7 +62,7 @@ func (s *serverConfigV18) GetVersion() string {
}
// SetRegion set new region.
func (s *serverConfigV18) SetRegion(region string) {
func (s *serverConfigV19) SetRegion(region string) {
s.Lock()
defer s.Unlock()
@@ -71,7 +70,7 @@ func (s *serverConfigV18) SetRegion(region string) {
}
// GetRegion get current region.
func (s *serverConfigV18) GetRegion() string {
func (s *serverConfigV19) GetRegion() string {
s.RLock()
defer s.RUnlock()
@@ -79,7 +78,7 @@ func (s *serverConfigV18) GetRegion() string {
}
// SetCredentials set new credentials.
func (s *serverConfigV18) SetCredential(creds credential) {
func (s *serverConfigV19) SetCredential(creds credential) {
s.Lock()
defer s.Unlock()
@@ -88,7 +87,7 @@ func (s *serverConfigV18) SetCredential(creds credential) {
}
// GetCredentials get current credentials.
func (s *serverConfigV18) GetCredential() credential {
func (s *serverConfigV19) GetCredential() credential {
s.RLock()
defer s.RUnlock()
@@ -96,7 +95,7 @@ func (s *serverConfigV18) GetCredential() credential {
}
// SetBrowser set if browser is enabled.
func (s *serverConfigV18) SetBrowser(b bool) {
func (s *serverConfigV19) SetBrowser(b bool) {
s.Lock()
defer s.Unlock()
@@ -105,7 +104,7 @@ func (s *serverConfigV18) SetBrowser(b bool) {
}
// GetCredentials get current credentials.
func (s *serverConfigV18) GetBrowser() bool {
func (s *serverConfigV19) GetBrowser() bool {
s.RLock()
defer s.RUnlock()
@@ -113,7 +112,7 @@ func (s *serverConfigV18) GetBrowser() bool {
}
// Save config.
func (s *serverConfigV18) Save() error {
func (s *serverConfigV19) Save() error {
s.RLock()
defer s.RUnlock()
@@ -121,9 +120,9 @@ func (s *serverConfigV18) Save() error {
return quick.Save(getConfigFile(), s)
}
func newServerConfigV18() *serverConfigV18 {
srvCfg := &serverConfigV18{
Version: v18,
func newServerConfigV19() *serverConfigV19 {
srvCfg := &serverConfigV19{
Version: v19,
Credential: mustGetNewCredential(),
Region: globalMinioDefaultRegion,
Browser: true,
@@ -137,6 +136,8 @@ func newServerConfigV18() *serverConfigV18 {
// Make sure to initialize notification configs.
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
srvCfg.Notify.AMQP["1"] = amqpNotify{}
srvCfg.Notify.MQTT = make(map[string]mqttNotify)
srvCfg.Notify.MQTT["1"] = mqttNotify{}
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
srvCfg.Notify.Redis = make(map[string]redisNotify)
@@ -159,7 +160,7 @@ func newServerConfigV18() *serverConfigV18 {
// found, otherwise use default parameters
func newConfig() error {
// Initialize server config.
srvCfg := newServerConfigV18()
srvCfg := newServerConfigV19()
// If env is set override the credentials from config file.
if globalIsEnvCreds {
@@ -237,8 +238,8 @@ func checkDupJSONKeys(json string) error {
}
// getValidConfig - returns valid server configuration
func getValidConfig() (*serverConfigV18, error) {
srvCfg := &serverConfigV18{
func getValidConfig() (*serverConfigV19, error) {
srvCfg := &serverConfigV19{
Region: globalMinioDefaultRegion,
Browser: true,
}
@@ -248,8 +249,8 @@ func getValidConfig() (*serverConfigV18, error) {
return nil, err
}
if srvCfg.Version != v18 {
return nil, fmt.Errorf("configuration version mismatch. Expected: %s, Got: %s", v18, srvCfg.Version)
if srvCfg.Version != v19 {
return nil, fmt.Errorf("configuration version mismatch. Expected: %s, Got: %s", v19, srvCfg.Version)
}
// Load config file json and check for duplication json keys

View File

@@ -80,13 +80,21 @@ func TestServerConfig(t *testing.T) {
}
// Set new console logger.
// Set new Webhook notification id.
// Set new MySQL notification id.
serverConfig.Notify.SetMySQLByID("2", mySQLNotify{})
savedNotifyCfg6 := serverConfig.Notify.GetMySQLByID("2")
if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6)
}
// Set new console logger.
// Set new MQTT notification id.
serverConfig.Notify.SetMQTTByID("2", mqttNotify{})
savedNotifyCfg7 := serverConfig.Notify.GetMQTTByID("2")
if !reflect.DeepEqual(savedNotifyCfg7, mqttNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", mqttNotify{}, savedNotifyCfg7)
}
consoleLogger := NewConsoleLogger()
serverConfig.Logger.SetConsole(consoleLogger)
consoleCfg := serverConfig.Logger.GetConsole()
@@ -109,8 +117,8 @@ func TestServerConfig(t *testing.T) {
serverConfig.Logger.SetFile(fileLogger)
// Match version.
if serverConfig.GetVersion() != v18 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v18)
if serverConfig.GetVersion() != v19 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v19)
}
// Attempt to save.
@@ -223,7 +231,7 @@ func TestValidateConfig(t *testing.T) {
configPath := filepath.Join(rootPath, minioConfigFile)
v := v18
v := v19
testCases := []struct {
configData string
@@ -309,6 +317,9 @@ func TestValidateConfig(t *testing.T) {
// Test 28 - Test valid Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
// Test 29 - Test MQTT
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false},
}
for i, testCase := range testCases {

View File

@@ -77,14 +77,14 @@ func (endpoint Endpoint) SetHTTP() {
}
// NewEndpoint - returns new endpoint based on given arguments.
func NewEndpoint(arg string) (Endpoint, error) {
func NewEndpoint(arg string) (ep Endpoint, e error) {
// isEmptyPath - check whether given path is not empty.
isEmptyPath := func(path string) bool {
return path == "" || path == "/" || path == `\`
}
if isEmptyPath(arg) {
return Endpoint{}, fmt.Errorf("empty or root endpoint is not supported")
return ep, fmt.Errorf("empty or root endpoint is not supported")
}
var isLocal bool
@@ -96,13 +96,13 @@ func NewEndpoint(arg string) (Endpoint, error) {
// - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format")
return ep, fmt.Errorf("invalid URL endpoint format")
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: %s", err)
return ep, fmt.Errorf("invalid URL endpoint format: %s", err)
}
host = u.Host
@@ -110,26 +110,26 @@ func NewEndpoint(arg string) (Endpoint, error) {
var p int
p, err = strconv.Atoi(port)
if err != nil {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: invalid port number")
return ep, fmt.Errorf("invalid URL endpoint format: invalid port number")
} else if p < 1 || p > 65535 {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
return ep, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
}
}
if host == "" {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: empty host name")
return ep, fmt.Errorf("invalid URL endpoint format: empty host name")
}
// As this is path in the URL, we should use path package, not filepath package.
// On MS Windows, filepath.Clean() converts into Windows path style ie `/foo` becomes `\foo`
u.Path = path.Clean(u.Path)
if isEmptyPath(u.Path) {
return Endpoint{}, fmt.Errorf("empty or root path is not supported in URL endpoint")
return ep, fmt.Errorf("empty or root path is not supported in URL endpoint")
}
isLocal, err = isLocalHost(host)
if err != nil {
return Endpoint{}, err
return ep, err
}
} else {
u = &url.URL{Path: path.Clean(arg)}

View File

@@ -29,7 +29,7 @@ import (
// all the disks, writes also calculate individual block's checksum
// for future bit-rot protection.
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, allowEmpty bool, blockSize int64,
dataBlocks, parityBlocks int, algo HashAlgo, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
dataBlocks, parityBlocks int, algo HashAlgo, writeQuorum int) (newDisks []StorageAPI, bytesWritten int64, checkSums []string, err error) {
// Allocated blockSized buffer for reading from incoming stream.
buf := make([]byte, blockSize)
@@ -43,7 +43,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
// FIXME: this is a bug in Golang, n == 0 and err ==
// io.ErrUnexpectedEOF for io.ReadFull function.
if n == 0 && rErr == io.ErrUnexpectedEOF {
return 0, nil, traceError(rErr)
return nil, 0, nil, traceError(rErr)
}
if rErr == io.EOF {
// We have reached EOF on the first byte read, io.Reader
@@ -51,28 +51,28 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
// data. Will create a 0byte file instead.
if bytesWritten == 0 && allowEmpty {
blocks = make([][]byte, len(disks))
rErr = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum)
newDisks, rErr = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum)
if rErr != nil {
return 0, nil, rErr
return nil, 0, nil, rErr
}
} // else we have reached EOF after few reads, no need to
// add an additional 0bytes at the end.
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF {
return 0, nil, traceError(rErr)
return nil, 0, nil, traceError(rErr)
}
if n > 0 {
// Returns encoded blocks.
var enErr error
blocks, enErr = encodeData(buf[0:n], dataBlocks, parityBlocks)
if enErr != nil {
return 0, nil, enErr
return nil, 0, nil, enErr
}
// Write to all disks.
if err = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum); err != nil {
return 0, nil, err
if newDisks, err = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum); err != nil {
return nil, 0, nil, err
}
bytesWritten += int64(n)
}
@@ -82,7 +82,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
for i := range checkSums {
checkSums[i] = hex.EncodeToString(hashWriters[i].Sum(nil))
}
return bytesWritten, checkSums, nil
return newDisks, bytesWritten, checkSums, nil
}
// encodeData - encodes incoming data buffer into
@@ -110,7 +110,7 @@ func encodeData(dataBuffer []byte, dataBlocks, parityBlocks int) ([][]byte, erro
}
// appendFile - append data buffer at path.
func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hashWriters []hash.Hash, writeQuorum int) (err error) {
func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hashWriters []hash.Hash, writeQuorum int) ([]StorageAPI, error) {
var wg = &sync.WaitGroup{}
var wErrs = make([]error, len(disks))
// Write encoded data to quorum disks in parallel.
@@ -126,8 +126,6 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
wErr := disk.AppendFile(volume, path, enBlocks[index])
if wErr != nil {
wErrs[index] = traceError(wErr)
// Ignore disk which returned an error.
disks[index] = nil
return
}
@@ -142,5 +140,5 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
// Wait for all the appends to finish.
wg.Wait()
return reduceWriteQuorumErrs(wErrs, objectOpIgnoredErrs, writeQuorum)
return evalDisks(disks, wErrs), reduceWriteQuorumErrs(wErrs, objectOpIgnoredErrs, writeQuorum)
}

View File

@@ -56,7 +56,7 @@ func TestErasureCreateFile(t *testing.T) {
t.Fatal(err)
}
// Test when all disks are up.
size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@@ -69,7 +69,7 @@ func TestErasureCreateFile(t *testing.T) {
disks[5] = AppendDiskDown{disks[5].(*posix)}
// Test when two disks are down.
size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@@ -83,7 +83,7 @@ func TestErasureCreateFile(t *testing.T) {
disks[8] = AppendDiskDown{disks[8].(*posix)}
disks[9] = AppendDiskDown{disks[9].(*posix)}
size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@@ -93,7 +93,7 @@ func TestErasureCreateFile(t *testing.T) {
// 1 more disk down. 7 disk down in total. Should return quorum error.
disks[10] = AppendDiskDown{disks[10].(*posix)}
_, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, _, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if errorCause(err) != errXLWriteQuorum {
t.Errorf("erasureCreateFile return value: expected errXLWriteQuorum, got %s", err)
}

View File

@@ -48,7 +48,7 @@ func TestErasureHealFile(t *testing.T) {
t.Fatal(err)
}
// Create a test file.
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}

View File

@@ -242,7 +242,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
}
// Create a test file to read from.
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@@ -325,7 +325,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
}
// Create a test file to read from.
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@@ -404,7 +404,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
iterations := 10000
// Create a test file to read from.
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
_, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != nil {
t.Fatal(err)
}

View File

@@ -503,9 +503,8 @@ func removeNotificationConfig(bucket string, objAPI ObjectLayer) error {
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath)
objLock.Lock()
err := objAPI.DeleteObject(minioMetaBucket, ncPath)
objLock.Unlock()
return err
defer objLock.Unlock()
return objAPI.DeleteObject(minioMetaBucket, ncPath)
}
// Remove listener configuration from storage layer. Used when a bucket is deleted.
@@ -516,9 +515,8 @@ func removeListenerConfig(bucket string, objAPI ObjectLayer) error {
// Acquire a write lock on notification config before modifying.
objLock := globalNSMutex.NewNSLock(minioMetaBucket, lcPath)
objLock.Lock()
err := objAPI.DeleteObject(minioMetaBucket, lcPath)
objLock.Unlock()
return err
defer objLock.Unlock()
return objAPI.DeleteObject(minioMetaBucket, lcPath)
}
// Loads both notification and listener config.
@@ -609,6 +607,25 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
}
}
// Load all mqtt targets, initialize their respective loggers.
for accountID, mqttN := range serverConfig.Notify.GetMQTT() {
if !mqttN.Enable {
continue
}
if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMQTT, newMQTTNotify); err != nil {
if _, ok := err.(net.Error); ok {
err = &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err
}
}
// Load all nats targets, initialize their respective loggers.
for accountID, natsN := range serverConfig.Notify.GetNATS() {
if !natsN.Enable {

View File

@@ -312,7 +312,7 @@ func TestInitEventNotifier(t *testing.T) {
filterRules := []filterRule{
{
Name: "prefix",
Value: globalMinioDefaultOwnerID,
Value: "minio",
},
{
Name: "suffix",
@@ -535,7 +535,7 @@ func TestAddRemoveBucketListenerConfig(t *testing.T) {
filterRules := []filterRule{
{
Name: "prefix",
Value: globalMinioDefaultOwnerID,
Value: "minio",
},
{
Name: "suffix",

View File

@@ -159,7 +159,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
}
// listMultipartUploads - lists all multipart uploads.
func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result := ListMultipartsInfo{}
recursive := true
if delimiter == slashSeparator {
@@ -191,7 +191,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if uploadIDMarker != "" {
uploads, _, err = fs.listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
maxUploads = maxUploads - len(uploads)
}
@@ -232,7 +232,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
eof = true
break
}
return ListMultipartsInfo{}, walkResult.err
return lmi, walkResult.err
}
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
@@ -256,7 +256,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
tmpUploads, end, err = fs.listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
uploads = append(uploads, tmpUploads...)
@@ -311,13 +311,13 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// Implements S3 compatible ListMultipartUploads API. The resulting
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListMultipartsInfo{}, toObjectErr(err, bucket)
return lmi, toObjectErr(err, bucket)
}
return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
@@ -412,9 +412,9 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo,
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
// object. Internally incoming data is written to '.minio.sys/tmp' location
// and safely renamed to '.minio.sys/multipart' for reach parts.
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) {
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
return PartInfo{}, err
return pi, err
}
// Initialize pipe.
@@ -431,7 +431,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
if err != nil {
return PartInfo{}, toObjectErr(err, dstBucket, dstObject)
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@@ -444,13 +444,13 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// an ongoing multipart transaction. Internally incoming data is
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return PartInfo{}, err
return pi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return PartInfo{}, toObjectErr(err, bucket)
return pi, toObjectErr(err, bucket)
}
// Hold the lock so that two parallel complete-multipart-uploads
@@ -463,9 +463,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)
if _, err := fs.rwPool.Open(uploadsPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(traceError(err), bucket, object)
}
defer fs.rwPool.Close(uploadsPath)
@@ -476,16 +476,16 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
rwlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(traceError(err), bucket, object)
}
defer rwlk.Close()
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(rwlk)
if err != nil {
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
}
partSuffix := fmt.Sprintf("object%d", partID)
@@ -523,14 +523,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size)
if cErr != nil {
fsRemoveFile(fsPartPath)
return PartInfo{}, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
fsRemoveFile(fsPartPath)
return PartInfo{}, traceError(IncompleteBody{})
return pi, traceError(IncompleteBody{})
}
// Delete temporary part in case of failure. If
@@ -541,14 +541,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return PartInfo{}, traceError(SHA256Mismatch{})
return pi, traceError(SHA256Mismatch{})
}
}
@@ -561,20 +561,20 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
fsNSPartPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, partPath)
if err = fsRenameFile(fsPartPath, fsNSPartPath); err != nil {
partLock.Unlock()
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Save the object part info in `fs.json`.
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
if _, err = fsMeta.WriteTo(rwlk); err != nil {
partLock.Unlock()
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
fi, err := fsStatFile(partNamePath)
if err != nil {
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
}
// Append the part in background.
@@ -599,7 +599,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// listObjectParts - wrapper scanning through
// '.minio.sys/multipart/bucket/object/UPLOADID'. Lists all the parts
// saved inside '.minio.sys/multipart/bucket/object/UPLOADID'.
func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
result := ListPartsInfo{}
uploadIDPath := pathJoin(bucket, object, uploadID)
@@ -608,16 +608,16 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
// On windows oddly this is returned.
return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return lpi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ListPartsInfo{}, toObjectErr(traceError(err), bucket, object)
return lpi, toObjectErr(traceError(err), bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(metaFile.LockedFile)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath)
return lpi, toObjectErr(err, minioMetaBucket, fsMetaPath)
}
// Only parts with higher part numbers will be listed.
@@ -633,7 +633,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, part.Name)
fi, err = fsStatFile(partNamePath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, partNamePath)
return lpi, toObjectErr(err, minioMetaMultipartBucket, partNamePath)
}
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
@@ -671,13 +671,13 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, fs); err != nil {
return ListPartsInfo{}, err
return lpi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListPartsInfo{}, toObjectErr(err, bucket)
return lpi, toObjectErr(err, bucket)
}
// Hold the lock so that two parallel complete-multipart-uploads
@@ -688,7 +688,7 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
listPartsInfo, err := fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, bucket, object)
return lpi, toObjectErr(err, bucket, object)
}
// Success.
@@ -701,24 +701,24 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
// md5sums of all the parts.
//
// Implements S3 compatible Complete multipart API.
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) {
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil {
return ObjectInfo{}, err
return oi, err
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
return oi, toObjectErr(err, bucket)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts)
if err != nil {
return ObjectInfo{}, err
return oi, err
}
uploadIDPath := pathJoin(bucket, object, uploadID)
@@ -733,9 +733,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
rlk, err := fs.rwPool.Open(fsMetaPathMultipart)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return oi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
// Disallow any parallel abort or complete multipart operations.
@@ -743,9 +743,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound || err == errFileAccessDenied {
return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return oi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
defer rwlk.Close()
@@ -754,7 +754,31 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
_, err = fsMeta.ReadFrom(rlk.LockedFile)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)
return oi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)
}
// Validate all parts and then commit to disk.
for i, part := range parts {
partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{})
}
if fsMeta.Parts[partIdx].ETag != part.ETag {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag,
})
}
}
// Wait for any competing PutObject() operation on bucket/object, since same namespace
@@ -763,7 +787,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
defer metaFile.Close()
@@ -780,7 +804,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID)
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID)
return oi, toObjectErr(err, minioMetaTmpBucket, uploadID)
}
}
}
@@ -798,29 +822,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Allocate staging buffer.
var buf = make([]byte, readSizeV1)
// Validate all parts and then commit to disk.
for i, part := range parts {
partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(InvalidPart{})
}
if fsMeta.Parts[partIdx].ETag != part.ETag {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(BadDigest{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag,
})
}
for _, part := range parts {
// Construct part suffix.
partSuffix := fmt.Sprintf("object%d", part.PartNumber)
multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
@@ -831,9 +833,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound {
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
return ObjectInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix)
return oi, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix)
}
// No need to hold a lock, this is a unique file and will be only written
@@ -843,7 +845,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
_, err = io.CopyBuffer(wfile, reader, buf)
@@ -851,7 +853,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
wfile.Close()
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
wfile.Close()
@@ -860,7 +862,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID)
return oi, toObjectErr(err, minioMetaTmpBucket, uploadID)
}
}
@@ -876,7 +878,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Write all the set metadata.
if _, err = fsMeta.WriteTo(metaFile); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Close lock held on bucket/object/uploadid/fs.json,
@@ -888,17 +890,17 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object)
multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID)
if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Remove entry from `uploads.json`.
if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object))
return oi, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object))
}
fi, err := fsStatFile(fsNSObjPath)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Return object info.

View File

@@ -103,6 +103,16 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
}
}
di, err := getDiskInfo(preparePath(fsPath))
if err != nil {
return nil, err
}
// Check if disk has minimum required total space.
if err = checkDiskMinTotal(di); err != nil {
return nil, err
}
// Assign a new UUID for FS minio mode. Each server instance
// gets its own UUID for temporary file transaction.
fsUUID := mustGetUUID()
@@ -138,14 +148,12 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
fs.fsFormatRlk = rlk
// Initialize and load bucket policies.
err = initBucketPolicies(fs)
if err != nil {
if err = initBucketPolicies(fs); err != nil {
return nil, fmt.Errorf("Unable to load all bucket policies. %s", err)
}
// Initialize a new event notifier.
err = initEventNotifier(fs)
if err != nil {
if err = initEventNotifier(fs); err != nil {
return nil, fmt.Errorf("Unable to initialize event notification. %s", err)
}
@@ -214,10 +222,10 @@ func (fs fsObjects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo - fetch bucket metadata info.
func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (fs fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
st, err := fs.statBucketDir(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
return bi, toObjectErr(err, bucket)
}
// As osStat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
@@ -304,15 +312,15 @@ func (fs fsObjects) DeleteBucket(bucket string) error {
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) {
func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (oi ObjectInfo, e error) {
if _, err := fs.statBucketDir(srcBucket); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket)
return oi, toObjectErr(err, srcBucket)
}
// Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject))
if err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Check if this request is only metadata update.
@@ -322,7 +330,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
var wlk *lock.LockedFile
wlk, err = fs.rwPool.Write(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject)
return oi, toObjectErr(traceError(err), srcBucket, srcObject)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@@ -331,7 +339,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
fsMeta := newFSMetaV1()
fsMeta.Meta = metadata
if _, err = fsMeta.WriteTo(wlk); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Return the new object info.
@@ -356,7 +364,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
if err != nil {
return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject)
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@@ -431,7 +439,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{}
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
@@ -446,33 +454,33 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if errorCause(rerr) != io.EOF {
return ObjectInfo{}, toObjectErr(rerr, bucket, object)
return oi, toObjectErr(rerr, bucket, object)
}
}
}
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
// Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
func (fs fsObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
return oi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
return oi, toObjectErr(err, bucket)
}
return fs.getObjectInfo(bucket, object)
@@ -759,18 +767,18 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests.
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
// With max keys of zero we have reached eof, return right here.
if maxKeys == 0 {
return ListObjectsInfo{}, nil
return loi, nil
}
// For delimiter and prefix as '/' we do not list anything at all
@@ -779,7 +787,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// as '/' we don't have any entries, since all the keys are
// of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return ListObjectsInfo{}, nil
return loi, nil
}
// Over flowing count - reset to maxObjectList.
@@ -860,13 +868,13 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
if walkResult.err != nil {
// File not found is a valid case.
if errorCause(walkResult.err) == errFileNotFound {
return ListObjectsInfo{}, nil
return loi, nil
}
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
return loi, toObjectErr(walkResult.err, bucket, prefix)
}
objInfo, err := entryToObjectInfo(walkResult.entry)
if err != nil {
return ListObjectsInfo{}, nil
return loi, nil
}
nextMarker = objInfo.Name
objInfos = append(objInfos, objInfo)
@@ -908,8 +916,8 @@ func (fs fsObjects) HealBucket(bucket string) error {
}
// ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListBucketsHeal - list all buckets to be healed. Valid only for XL
@@ -918,6 +926,6 @@ func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
}
func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

49
cmd/gateway-anonymous.go Normal file
View File

@@ -0,0 +1,49 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "net/http"
func anonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
case http.StatusForbidden:
fallthrough
case http.StatusUnauthorized:
return AllAccessDisabled{bucket, object}
}
return errUnexpected
}

View File

@@ -29,6 +29,48 @@ import (
"github.com/Azure/azure-sdk-for-go/storage"
)
// Make anonymous HTTP request to azure endpoint.
func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) {
req, err := http.NewRequest(verb, urlStr, nil)
if err != nil {
return nil, err
}
if header != nil {
req.Header = header
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
// 4XX and 5XX are error HTTP codes.
if resp.StatusCode >= 400 && resp.StatusCode <= 511 {
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
return nil, storage.AzureStorageServiceError{
StatusCode: resp.StatusCode,
Code: resp.Status,
Message: "no response body was available for error status code",
}
}
// Response contains Azure storage service error object.
var storageErr storage.AzureStorageServiceError
if err := xml.Unmarshal(respBody, &storageErr); err != nil {
return nil, err
}
storageErr.StatusCode = resp.StatusCode
return nil, storageErr
}
return resp, nil
}
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
@@ -36,11 +78,11 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo,
return bucketInfo, azureToObjectError(traceError(err))
}
url.RawQuery = "restype=container"
resp, err := http.Head(url.String())
resp, err := azureAnonRequest(httpHEAD, url.String(), nil)
if err != nil {
return bucketInfo, azureToObjectError(traceError(err), bucket)
}
resp.Body.Close()
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
@@ -67,19 +109,14 @@ func (a *azureObjects) AnonPutObject(bucket, object string, size int64, data io.
// AnonGetObject - SendGET request without authentication.
// This is needed when clients send GET requests on objects that can be downloaded without auth.
func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
u := a.client.GetBlobURL(bucket, object)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
h := make(http.Header)
if length > 0 && startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
h.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
resp, err := http.DefaultClient.Do(req)
resp, err := azureAnonRequest(httpGET, a.client.GetBlobURL(bucket, object), h)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
}
@@ -96,11 +133,11 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
// result to ObjectInfo.
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
resp, err := http.Head(a.client.GetBlobURL(bucket, object))
resp, err := azureAnonRequest(httpHEAD, a.client.GetBlobURL(bucket, object), nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
resp.Body.Close()
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
@@ -153,7 +190,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
resp, err := azureAnonRequest(httpGET, url.String(), nil)
if err != nil {
return result, azureToObjectError(traceError(err))
}
@@ -190,3 +227,63 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
result.Prefixes = listResp.BlobPrefixes
return result, nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: continuationToken,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
if err != nil {
return result, azureToObjectError(traceError(err))
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
if err != nil {
return result, azureToObjectError(traceError(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(traceError(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(traceError(err))
}
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set
if listResp.NextMarker != "" {
result.IsTruncated = true
result.NextContinuationToken = listResp.NextMarker
}
for _, object := range listResp.Blobs {
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
if e != nil {
continue
}
result.Objects = append(result.Objects, ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: t,
Size: object.Properties.ContentLength,
ETag: canonicalizeETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}

View File

@@ -32,12 +32,12 @@ func (a *azureObjects) HealObject(bucket, object string) (int, int, error) {
}
// ListObjectsHeal - Not relevant.
func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (a *azureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

View File

@@ -67,6 +67,11 @@ func azureToS3Metadata(meta map[string]string) (newMeta map[string]string) {
return newMeta
}
// Append "-1" to etag so that clients do not interpret it as MD5.
func azureToS3ETag(etag string) string {
return canonicalizeETag(etag) + "-1"
}
// To store metadata during NewMultipartUpload which will be used after
// CompleteMultipartUpload to call SetBlobMetadata.
type azureMultipartMetaInfo struct {
@@ -137,6 +142,8 @@ func azureToObjectError(err error, params ...string) error {
err = BucketExists{Bucket: bucket}
case "InvalidResourceName":
err = BucketNameInvalid{Bucket: bucket}
case "RequestBodyTooLarge":
err = PartTooBig{}
default:
switch azureErr.StatusCode {
case http.StatusNotFound:
@@ -153,15 +160,26 @@ func azureToObjectError(err error, params ...string) error {
return e
}
// Inits azure blob storage client and returns azureObjects.
func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) {
if endPoint == "" {
endPoint = storage.DefaultBaseURL
// Inits azure blob storage client and returns AzureObjects.
func newAzureLayer(host string) (GatewayLayer, error) {
var err error
var endpoint = storage.DefaultBaseURL
var secure = true
// If user provided some parameters
if host != "" {
endpoint, secure, err = parseGatewayEndpoint(host)
if err != nil {
return nil, err
}
}
c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure)
creds := serverConfig.GetCredential()
c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure)
if err != nil {
return &azureObjects{}, err
}
return &azureObjects{
client: c.GetBlobService(),
metaInfo: azureMultipartMetaInfo{
@@ -174,13 +192,12 @@ func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLa
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func (a *azureObjects) Shutdown() error {
// TODO
return nil
}
// StorageInfo - Not relevant to Azure backend.
func (a *azureObjects) StorageInfo() StorageInfo {
return StorageInfo{}
func (a *azureObjects) StorageInfo() (si StorageInfo) {
return si
}
// MakeBucketWithLocation - Create a new container on azure backend.
@@ -190,13 +207,13 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo - Get bucket metadata..
func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// Azure does not have an equivalent call, hence use ListContainers.
resp, err := a.client.ListContainers(storage.ListContainersParameters{
Prefix: bucket,
})
if err != nil {
return BucketInfo{}, azureToObjectError(traceError(err), bucket)
return bi, azureToObjectError(traceError(err), bucket)
}
for _, container := range resp.Containers {
if container.Name == bucket {
@@ -209,7 +226,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
} // else continue
}
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
@@ -260,7 +277,42 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
Name: object.Name,
ModTime: t,
Size: object.Properties.ContentLength,
ETag: canonicalizeETag(object.Properties.Etag),
ETag: azureToS3ETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = resp.BlobPrefixes
return result, nil
}
// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix
func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) {
resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{
Prefix: prefix,
Marker: continuationToken,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
})
if err != nil {
return result, azureToObjectError(traceError(err), bucket, prefix)
}
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set
if resp.NextMarker != "" {
result.IsTruncated = true
result.NextContinuationToken = resp.NextMarker
}
for _, object := range resp.Blobs {
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
if e != nil {
continue
}
result.Objects = append(result.Objects, ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: t,
Size: object.Properties.ContentLength,
ETag: azureToS3ETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
@@ -323,7 +375,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
objInfo = ObjectInfo{
Bucket: bucket,
UserDefined: meta,
ETag: canonicalizeETag(prop.Etag),
ETag: azureToS3ETag(prop.Etag),
ModTime: t,
Name: object,
Size: prop.ContentLength,
@@ -618,31 +670,6 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
return a.GetObjectInfo(bucket, object)
}
func anonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
}
return errUnexpected
}
// Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
out := url.Values{}

View File

@@ -18,21 +18,41 @@ package cmd
import (
"net/http"
"net/url"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
)
// Test azureToS3ETag.
func TestAzureToS3ETag(t *testing.T) {
tests := []struct {
etag string
expected string
}{
{`"etag"`, `etag-1`},
{"etag", "etag-1"},
}
for i, test := range tests {
got := azureToS3ETag(test.etag)
if got != test.expected {
t.Errorf("test %d: got:%s expected:%s", i+1, got, test.expected)
}
}
}
// Test canonical metadata.
func TestS3ToAzureHeaders(t *testing.T) {
headers := map[string]string{
"accept-encoding": "gzip",
"content-encoding": "gzip",
"X-Amz-Meta-Hdr": "value",
}
expectedHeaders := map[string]string{
"Accept-Encoding": "gzip",
"Content-Encoding": "gzip",
"X-Ms-Meta-Hdr": "value",
}
actualHeaders := s3ToAzureHeaders(headers)
if !reflect.DeepEqual(actualHeaders, expectedHeaders) {
@@ -155,3 +175,81 @@ func TestAzureParseBlockID(t *testing.T) {
t.Fatal("Expected azureParseBlockID() to return error")
}
}
// Test azureListBlobsGetParameters()
func TestAzureListBlobsGetParameters(t *testing.T) {
// Test values set 1
expectedURLValues := url.Values{}
expectedURLValues.Set("prefix", "test")
expectedURLValues.Set("delimiter", "_")
expectedURLValues.Set("marker", "marker")
expectedURLValues.Set("include", "hello")
expectedURLValues.Set("maxresults", "20")
expectedURLValues.Set("timeout", "10")
setBlobParameters := storage.ListBlobsParameters{"test", "_", "marker", "hello", 20, 10}
// Test values set 2
expectedURLValues1 := url.Values{}
setBlobParameters1 := storage.ListBlobsParameters{"", "", "", "", 0, 0}
testCases := []struct {
name string
args storage.ListBlobsParameters
want url.Values
}{
{"TestIfValuesSet", setBlobParameters, expectedURLValues},
{"TestIfValuesNotSet", setBlobParameters1, expectedURLValues1},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if got := azureListBlobsGetParameters(test.args); !reflect.DeepEqual(got, test.want) {
t.Errorf("azureListBlobsGetParameters() = %v, want %v", got, test.want)
}
})
}
}
func TestAnonErrToObjectErr(t *testing.T) {
testCases := []struct {
name string
statusCode int
params []string
wantErr error
}{
{"ObjectNotFound",
http.StatusNotFound,
[]string{"testBucket", "testObject"},
ObjectNotFound{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNotFound",
http.StatusNotFound,
[]string{"testBucket", ""},
BucketNotFound{Bucket: "testBucket"},
},
{"ObjectNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", "testObject"},
ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", ""},
BucketNameInvalid{Bucket: "testBucket"},
},
{"UnexpectedError",
http.StatusBadGateway,
[]string{"testBucket", "testObject"},
errUnexpected,
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if err := anonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr)
}
})
}
}

View File

@@ -0,0 +1,142 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"net/http"
"strconv"
"time"
)
func toGCSPublicURL(bucket, object string) string {
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object)
}
// AnonPutObject creates a new object anonymously with the incoming data,
func (l *gcsGateway) AnonPutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
return ObjectInfo{}, NotImplemented{}
}
// AnonGetObject - Get object anonymously
func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil)
if err != nil {
return gcsToObjectError(traceError(err), bucket, object)
}
if length > 0 && startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return gcsToObjectError(traceError(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return gcsToObjectError(traceError(err), bucket, object)
}
// AnonGetObjectInfo - Get object info anonymously
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, object))
if err != nil {
return objInfo, gcsToObjectError(traceError(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, gcsToObjectError(traceError(errUnexpected), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, traceError(err)
}
objInfo.ModTime = t
objInfo.Bucket = bucket
objInfo.UserDefined = make(map[string]string)
if resp.Header.Get("Content-Encoding") != "" {
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
}
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
objInfo.ETag = resp.Header.Get("Etag")
objInfo.ModTime = t
objInfo.Name = object
objInfo.Size = contentLength
return
}
// AnonListObjects - List objects anonymously
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
// Request V1 List Object to the backend
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
}
// translate V1 Result to V2Info
return fromMinioClientListBucketResultToV2Info(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, ""))
if err != nil {
return bucketInfo, gcsToObjectError(traceError(err))
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
// Last-Modified date being returned by GCS
return BucketInfo{
Name: bucket,
}, nil
}

27
cmd/gateway-gcs-errors.go Normal file
View File

@@ -0,0 +1,27 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "errors"
var (
// Project ID format is not valid.
errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid")
// Project ID not found
errGCSProjectIDNotFound = errors.New("unknown project id")
)

View File

@@ -0,0 +1,42 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// HealBucket - Not relevant.
func (l *gcsGateway) HealBucket(bucket string) error {
return traceError(NotImplemented{})
}
// ListBucketsHeal - Not relevant.
func (l *gcsGateway) ListBucketsHeal() (buckets []BucketInfo, err error) {
return []BucketInfo{}, traceError(NotImplemented{})
}
// HealObject - Not relevant.
func (l *gcsGateway) HealObject(bucket string, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
}
// ListObjectsHeal - Not relevant.
func (l *gcsGateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (l *gcsGateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
}

1123
cmd/gateway-gcs.go Normal file

File diff suppressed because it is too large Load Diff

181
cmd/gateway-gcs_test.go Normal file
View File

@@ -0,0 +1,181 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"reflect"
"testing"
minio "github.com/minio/minio-go"
)
func TestToGCSPageToken(t *testing.T) {
testCases := []struct {
Name string
Token string
}{
{
Name: "A",
Token: "CgFB",
},
{
Name: "AAAAAAAAAA",
Token: "CgpBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CmRBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpEDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpIDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CpMDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQ==",
},
{
Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
Token: "CvQDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=",
},
}
for i, testCase := range testCases {
if toGCSPageToken(testCase.Name) != testCase.Token {
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
}
}
}
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
func TestValidGCSProjectIDFormat(t *testing.T) {
testCases := []struct {
ProjectID string
Valid bool
}{
{"", false},
{"a", false},
{"Abc", false},
{"1bcd", false},
// 5 chars
{"abcdb", false},
// 6 chars
{"abcdbz", true},
// 30 chars
{"project-id-1-project-id-more-1", true},
// 31 chars
{"project-id-1-project-id-more-11", false},
{"storage.googleapis.com", false},
{"http://storage.googleapis.com", false},
{"http://localhost:9000", false},
{"project-id-1", true},
{"project-id-1988832", true},
{"projectid1414", true},
}
for i, testCase := range testCases {
valid := isValidGCSProjectIDFormat(testCase.ProjectID)
if valid != testCase.Valid {
t.Errorf("Test %d: Expected %v, got %v", i+1, valid, testCase.Valid)
}
}
}
// Test for isGCSMarker.
func TestIsGCSMarker(t *testing.T) {
testCases := []struct {
marker string
expected bool
}{
{
marker: "{minio}gcs123",
expected: true,
},
{
marker: "{mini_no}tgcs123",
expected: false,
},
{
marker: "{minioagainnotgcs123",
expected: false,
},
{
marker: "obj1",
expected: false,
},
}
for i, tc := range testCases {
if actual := isGCSMarker(tc.marker); actual != tc.expected {
t.Errorf("Test %d: marker is %s, expected %v but got %v",
i+1, tc.marker, tc.expected, actual)
}
}
}
// Test for gcsMultipartMetaName.
func TestGCSMultipartMetaName(t *testing.T) {
uploadID := "a"
expected := pathJoin(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta)
got := gcsMultipartMetaName(uploadID)
if expected != got {
t.Errorf("expected: %s, got: %s", expected, got)
}
}
// Test for gcsMultipartDataName.
func TestGCSMultipartDataName(t *testing.T) {
var (
uploadID = "a"
etag = "b"
partNumber = 1
)
expected := pathJoin(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag))
got := gcsMultipartDataName(uploadID, partNumber, etag)
if expected != got {
t.Errorf("expected: %s, got: %s", expected, got)
}
}
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
listBucketResult := minio.ListBucketResult{
IsTruncated: false,
Marker: "testMarker",
NextMarker: "testMarker2",
CommonPrefixes: []minio.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}},
Contents: []minio.ObjectInfo{{Key: "testobj", ContentType: ""}},
}
listBucketV2Info := ListObjectsV2Info{
Prefixes: []string{"one", "two"},
Objects: []ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}},
IsTruncated: false,
ContinuationToken: "testMarker",
NextContinuationToken: "testMarker2",
}
if got := fromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) {
t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info)
}
}

View File

@@ -19,12 +19,12 @@ package cmd
import (
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"encoding/hex"
"encoding/json"
"encoding/xml"
router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
@@ -140,7 +140,7 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re
}
// Reads the object at startOffset and writes to mw.
if err := getObject(bucket, object, startOffset, length, writer); err != nil {
if err = getObject(bucket, object, startOffset, length, writer); err != nil {
errorIf(err, "Unable to write to client.")
if !dataWritten {
// Error response only if no data has been written to client yet. i.e if
@@ -157,6 +157,23 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re
// call wrter.Write(nil) to set appropriate headers.
writer.Write(nil)
}
// Get host and port from Request.RemoteAddr.
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
host, port = "", ""
}
// Notify object accessed via a GET request.
eventNotify(eventData{
Type: ObjectAccessedGet,
Bucket: bucket,
ObjInfo: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
// PutObjectHandler - PUT Object
@@ -180,6 +197,8 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
bucket = vars["bucket"]
object = vars["object"]
// TODO: we should validate the object name here
// Get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil {
@@ -212,7 +231,12 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
}
// Extract metadata to be saved from incoming HTTP header.
metadata := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
if reqAuthType == authTypeStreamingSigned {
if contentEncoding, ok := metadata["content-encoding"]; ok {
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
@@ -236,8 +260,6 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
// Make sure we hex encode md5sum here.
metadata["etag"] = hex.EncodeToString(md5Bytes)
sha256sum := ""
// Lock the object.
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
@@ -247,7 +269,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
switch reqAuthType {
case authTypeAnonymous:
// Create anonymous object.
objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, "")
case authTypeStreamingSigned:
// Initialize stream signature verifier.
reader, s3Error := newSignV4ChunkedReader(r)
@@ -256,7 +278,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, "")
case authTypeSignedV2, authTypePresignedV2:
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
@@ -264,16 +286,19 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, "")
case authTypePresigned, authTypeSigned:
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
sha256sum := ""
if !skipContentSha256Cksum(r) {
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
sha256sum = getContentSha256Cksum(r)
}
// Create object.
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
default:
@@ -283,12 +308,30 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
}
if err != nil {
errorIf(err, "Unable to save an object %s", r.URL.Path)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
writeSuccessResponseHeadersOnly(w)
// Get host and port from Request.RemoteAddr.
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
host, port = "", ""
}
// Notify object created event.
eventNotify(eventData{
Type: ObjectCreatedPut,
Bucket: bucket,
ObjInfo: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
// HeadObjectHandler - HEAD Object
@@ -357,97 +400,23 @@ func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.R
// Successful response.
w.WriteHeader(http.StatusOK)
}
// DeleteMultipleObjectsHandler - deletes multiple objects.
func (api gatewayAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
// Get host and port from Request.RemoteAddr.
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
host, port = "", ""
}
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
return
}
// Allocate incoming content length bytes.
deleteXMLBytes := make([]byte, r.ContentLength)
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "Unable to read HTTP body.")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "Unable to unmarshal delete objects request XML.")
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
var dErrs = make([]error, len(deleteObjects.Objects))
// Delete all requested objects in parallel.
for index, object := range deleteObjects.Objects {
dErr := objectAPI.DeleteObject(bucket, object.ObjectName)
if dErr != nil {
dErrs[index] = dErr
}
}
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
for index, err := range dErrs {
object := deleteObjects.Objects[index]
// Success deleted objects are collected separately.
if err == nil {
deletedObjects = append(deletedObjects, object)
continue
}
if _, ok := errorCause(err).(ObjectNotFound); ok {
// If the object is not found it should be
// accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object)
continue
}
errorIf(err, "Unable to delete object. %s", object.ObjectName)
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
Key: object.ObjectName,
})
}
// Generate response
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify object accessed via a HEAD request.
eventNotify(eventData{
Type: ObjectAccessedHead,
Bucket: bucket,
ObjInfo: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: host,
Port: port,
})
}
// PutBucketPolicyHandler - PUT Bucket policy
@@ -649,15 +618,6 @@ func (api gatewayAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Re
return
}
// validating region here, because isValidLocationConstraint
// reads body which has been read already. So only validating
// region here.
serverRegion := serverConfig.GetRegion()
if serverRegion != location {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
@@ -746,15 +706,11 @@ func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *htt
return
}
// Extract all the litsObjectsV1 query params to their native values.
// Extract all the listObjectsV1 query params to their native
// values. N B We delegate validation of params to respective
// gateway backends.
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
// Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
listObjects := objectAPI.ListObjects
if reqAuthType == authTypeAnonymous {
listObjects = objectAPI.AnonListObjects
@@ -769,6 +725,87 @@ func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *htt
return
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
// --------------------------
// This implementation of the GET operation returns some or all (up to 1000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
// Minio continues to support ListObjectsV1 for supporting legacy tools.
func (api gatewayAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
vars := router.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
case authTypePresignedV2, authTypeSignedV2:
// Signature V2 validation.
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeSigned, authTypePresigned:
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
if s3Error != ErrNone {
errorIf(errSignatureMismatch, dumpRequest(r))
writeErrorResponse(w, s3Error, r.URL)
return
}
case authTypeAnonymous:
// No verification needed for anonymous requests.
default:
// For all unknown auth types return error.
writeErrorResponse(w, ErrAccessDenied, r.URL)
return
}
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _ := getListObjectsV2Args(r.URL.Query())
// In ListObjectsV2 'continuation-token' is the marker.
marker := token
// Check if 'continuation-token' is empty.
if token == "" {
// Then we need to use 'start-after' as marker instead.
marker = startAfter
}
listObjectsV2 := objectAPI.ListObjectsV2
if reqAuthType == authTypeAnonymous {
listObjectsV2 = objectAPI.AnonListObjectsV2
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsV2Info object to be
// serialized as XML and sent as S3 compatible response body.
listObjectsV2Info, err := listObjectsV2(bucket, prefix, token, fetchOwner, delimiter, maxKeys)
if err != nil {
errorIf(err, "Unable to list objects. Args to listObjectsV2 are bucket=%s, prefix=%s, token=%s, delimiter=%s", bucket, prefix, token, delimiter)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.ContinuationToken, startAfter, delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))

View File

@@ -21,11 +21,14 @@ import (
"fmt"
"net/url"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"github.com/gorilla/mux"
"github.com/minio/cli"
miniohttp "github.com/minio/minio/pkg/http"
)
const azureGatewayTemplate = `NAME:
@@ -53,26 +56,13 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=azureaccountname
$ export MINIO_SECRET_KEY=azureaccountkey
$ {{.HelpName}}
2. Start minio gateway server for Azure Blob Storage backend on custom endpoint.
$ export MINIO_ACCESS_KEY=azureaccountname
$ export MINIO_SECRET_KEY=azureaccountkey
$ {{.HelpName}} https://azure.example.com
`
var azureBackendCmd = cli.Command{
Name: "azure",
Usage: "Microsoft Azure Blob Storage.",
Action: azureGatewayMain,
CustomHelpTemplate: azureGatewayTemplate,
Flags: append(serverFlags,
cli.BoolFlag{
Name: "quiet",
Usage: "Disable startup banner.",
},
),
HideHelpCommand: true,
}
const s3GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
@@ -105,26 +95,71 @@ EXAMPLES:
$ {{.HelpName}} https://play.minio.io:9000
`
var s3BackendCmd = cli.Command{
Name: "s3",
Usage: "Amazon Simple Storage Service (S3).",
Action: s3GatewayMain,
CustomHelpTemplate: s3GatewayTemplate,
Flags: append(serverFlags,
cli.BoolFlag{
Name: "quiet",
Usage: "Disable startup banner.",
},
),
HideHelpCommand: true,
}
const gcsGatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
var gatewayCmd = cli.Command{
Name: "gateway",
Usage: "Start object storage gateway.",
HideHelpCommand: true,
Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd},
}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} PROJECTID
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
PROJECTID:
GCS project id, there are no defaults this is mandatory.
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of GCS.
MINIO_SECRET_KEY: Password or secret key of GCS.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio gateway server for GCS backend.
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
(Instructions to generate credentials : https://developers.google.com/identity/protocols/application-default-credentials)
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}} mygcsprojectid
`
var (
azureBackendCmd = cli.Command{
Name: "azure",
Usage: "Microsoft Azure Blob Storage.",
Action: azureGatewayMain,
CustomHelpTemplate: azureGatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
}
s3BackendCmd = cli.Command{
Name: "s3",
Usage: "Amazon Simple Storage Service (S3).",
Action: s3GatewayMain,
CustomHelpTemplate: s3GatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
}
gcsBackendCmd = cli.Command{
Name: "gcs",
Usage: "Google Cloud Storage.",
Action: gcsGatewayMain,
CustomHelpTemplate: gcsGatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
}
gatewayCmd = cli.Command{
Name: "gateway",
Usage: "Start object storage gateway.",
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd},
}
)
// Represents the type of the gateway backend.
type gatewayBackend string
@@ -132,79 +167,33 @@ type gatewayBackend string
const (
azureBackend gatewayBackend = "azure"
s3Backend gatewayBackend = "s3"
gcsBackend gatewayBackend = "gcs"
// Add more backends here.
)
// Returns access and secretkey set from environment variables.
func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
// Fetch access keys from environment variables.
accessKey = os.Getenv("MINIO_ACCESS_KEY")
secretKey = os.Getenv("MINIO_SECRET_KEY")
if accessKey == "" || secretKey == "" {
fatalIf(errors.New("Missing credentials"), "Access and secret keys are mandatory to run Minio gateway server.")
}
return accessKey, secretKey
}
// Set browser setting from environment variables
func mustSetBrowserSettingFromEnv() {
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBrowserFlag(browser)
if err != nil {
fatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
}
// browser Envs are set globally, this does not represent
// if browser is turned off or on.
globalIsEnvBrowser = true
globalIsBrowserEnabled = bool(browserFlag)
}
}
// Initialize gateway layer depending on the backend type.
// Supported backend types are
//
// - Azure Blob Storage.
// - AWS S3.
// - Google Cloud Storage.
// - Add your favorite backend here.
func newGatewayLayer(backendType gatewayBackend, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
switch gatewayBackend(backendType) {
func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, error) {
switch backendType {
case azureBackend:
return newAzureLayer(endpoint, accessKey, secretKey, secure)
return newAzureLayer(arg)
case s3Backend:
return newS3Gateway(endpoint, accessKey, secretKey, secure)
return newS3Gateway(arg)
case gcsBackend:
// FIXME: The following print command is temporary and
// will be removed when gcs is ready for production use.
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
return newGCSGateway(arg)
}
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
}
// Initialize a new gateway config.
//
// DO NOT save this config, this is meant to be
// only used in memory.
func newGatewayConfig(accessKey, secretKey, region string) error {
// Initialize server config.
srvCfg := newServerConfigV18()
// If env is set for a fresh start, save them to config file.
srvCfg.SetCredential(credential{
AccessKey: accessKey,
SecretKey: secretKey,
})
// Set custom region.
srvCfg.SetRegion(region)
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
serverConfigMu.Lock()
serverConfig = srvCfg
serverConfigMu.Unlock()
return nil
}
// Return endpoint.
func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1
@@ -264,6 +253,9 @@ func azureGatewayMain(ctx *cli.Context) {
cli.ShowCommandHelpAndExit(ctx, "azure", 1)
}
// Validate gateway arguments.
fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument")
gatewayMain(ctx, azureBackend)
}
@@ -273,54 +265,78 @@ func s3GatewayMain(ctx *cli.Context) {
cli.ShowCommandHelpAndExit(ctx, "s3", 1)
}
// Validate gateway arguments.
fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument")
gatewayMain(ctx, s3Backend)
}
// Handler for 'minio gateway gcs' command line
func gcsGatewayMain(ctx *cli.Context) {
if ctx.Args().Present() && ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
if !isValidGCSProjectIDFormat(ctx.Args().First()) {
errorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First())
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
gatewayMain(ctx, gcsBackend)
}
// Handler for 'minio gateway'.
func gatewayMain(ctx *cli.Context, backendType gatewayBackend) {
// Fetch access and secret key from env.
accessKey, secretKey := mustGetGatewayCredsFromEnv()
// Fetch browser env setting
mustSetBrowserSettingFromEnv()
// Initialize new gateway config.
newGatewayConfig(accessKey, secretKey, globalMinioDefaultRegion)
// Get quiet flag from command line argument.
quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
if quietFlag {
log.EnableQuiet()
}
serverAddr := ctx.String("address")
endpointAddr := ctx.Args().Get(0)
err := validateGatewayArguments(serverAddr, endpointAddr)
fatalIf(err, "Invalid argument")
// Fetch address option
gatewayAddr := ctx.GlobalString("address")
if gatewayAddr == ":"+globalMinioPort {
gatewayAddr = ctx.String("address")
}
// Second argument is endpoint. If no endpoint is specified then the
// gateway implementation should use a default setting.
endPoint, secure, err := parseGatewayEndpoint(endpointAddr)
fatalIf(err, "Unable to parse endpoint")
// Handle common command args.
handleCommonCmdArgs(ctx)
// Create certs path for SSL configuration.
fatalIf(createConfigDir(), "Unable to create configuration directory")
// Handle common env vars.
handleCommonEnvVars()
newObject, err := newGatewayLayer(backendType, endPoint, accessKey, secretKey, secure)
fatalIf(err, "Unable to initialize gateway layer")
// Validate if we have access, secret set through environment.
if !globalIsEnvCreds {
fatalIf(fmt.Errorf("Access and Secret keys should be set through ENVs for backend [%s]", backendType), "")
}
// Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize gateway config.
initConfig()
// Enable loggers as per configuration file.
enableLoggers()
// Init the error tracing module.
initError()
// Check and load SSL certificates.
var err error
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file")
initNSLock(false) // Enable local namespace lock.
router := mux.NewRouter().SkipClean(true)
newObject, err := newGatewayLayer(backendType, ctx.Args().First())
fatalIf(err, "Unable to initialize gateway layer")
// credentials Envs are set globally.
globalIsEnvCreds = true
router := mux.NewRouter().SkipClean(true)
// Register web router when its enabled.
if globalIsBrowserEnabled {
aerr := registerWebRouter(router)
fatalIf(aerr, "Unable to configure web browser")
fatalIf(registerWebRouter(router), "Unable to configure web browser")
}
registerGatewayAPIRouter(router, newObject)
@@ -353,22 +369,15 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) {
}
apiServer := NewServerMux(serverAddr, registerHandlers(router, handlerFns...))
_, _, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL key file")
globalHTTPServer = miniohttp.NewServer([]string{gatewayAddr}, registerHandlers(router, handlerFns...), globalTLSCertificate)
// Start server, automatically configures TLS if certs are available.
go func() {
cert, key := "", ""
if globalIsSSL {
cert, key = getPublicCertFile(), getPrivateKeyFile()
}
aerr := apiServer.ListenAndServe(cert, key)
fatalIf(aerr, "Failed to start minio server")
globalHTTPServerErrorCh <- globalHTTPServer.Start()
}()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
// Once endpoints are finalized, initialize the new object api.
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
@@ -377,15 +386,21 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) {
// Prints the formatted startup message once object layer is initialized.
if !quietFlag {
mode := ""
if gatewayBackend(backendType) == azureBackend {
switch gatewayBackend(backendType) {
case azureBackend:
mode = globalMinioModeGatewayAzure
} else if gatewayBackend(backendType) == s3Backend {
case gcsBackend:
mode = globalMinioModeGatewayGCS
case s3Backend:
mode = globalMinioModeGatewayS3
}
// Check update mode.
checkUpdate(mode)
apiEndpoints := getAPIEndpoints(apiServer.Addr)
printGatewayStartupMessage(apiEndpoints, accessKey, secretKey, backendType)
// Print gateway startup message.
printGatewayStartupMessage(getAPIEndpoints(gatewayAddr), backendType)
}
<-globalServiceDoneCh
handleSignals()
}

View File

@@ -17,7 +17,6 @@
package cmd
import (
"os"
"strings"
"testing"
)
@@ -53,28 +52,6 @@ func TestParseGatewayEndpoint(t *testing.T) {
}
}
func TestSetBrowserFromEnv(t *testing.T) {
browser := os.Getenv("MINIO_BROWSER")
os.Setenv("MINIO_BROWSER", "on")
mustSetBrowserSettingFromEnv()
if globalIsBrowserEnabled != true {
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, false)
}
os.Setenv("MINIO_BROWSER", "off")
mustSetBrowserSettingFromEnv()
if globalIsBrowserEnabled != false {
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true)
}
os.Setenv("MINIO_BROWSER", "")
mustSetBrowserSettingFromEnv()
if globalIsBrowserEnabled != false {
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true)
}
os.Setenv("MINIO_BROWSER", browser)
}
// Test validateGatewayArguments
func TestValidateGatewayArguments(t *testing.T) {
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {

View File

@@ -36,6 +36,8 @@ type GatewayLayer interface {
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
DeleteBucketPolicies(string) error
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error)
ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error)
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
}

View File

@@ -24,14 +24,14 @@ import (
)
// AnonPutObject creates a new object anonymously with the incoming data,
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
var sha256sumBytes []byte
var err error
if sha256sum != "" {
sha256sumBytes, err = hex.DecodeString(sha256sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
}
@@ -40,14 +40,14 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data
if md5sum != "" {
md5sumBytes, err = hex.DecodeString(md5sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "etag")
}
oi, err := l.anonClient.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@@ -74,37 +74,47 @@ func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64,
}
// AnonGetObjectInfo - Get object info anonymously
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) {
r := minio.NewHeadReqHeaders()
oi, err := l.anonClient.StatObject(bucket, object, r)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// AnonListObjects - List objects anonymously
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
} else if !exists {
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
@@ -118,5 +128,5 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}

View File

@@ -32,11 +32,11 @@ func (l *s3Objects) HealObject(bucket string, object string) (int, int, error) {
}
// ListObjectsHeal - Not relevant.
func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

View File

@@ -17,11 +17,12 @@
package cmd
import (
"encoding/hex"
"io"
"net/http"
"path"
"encoding/hex"
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
)
@@ -97,14 +98,30 @@ type s3Objects struct {
}
// newS3Gateway returns s3 gatewaylayer
func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
if endpoint == "" {
endpoint = "s3.amazonaws.com"
secure = true
func newS3Gateway(host string) (GatewayLayer, error) {
var err error
var endpoint string
var secure = true
// Validate host parameters.
if host != "" {
// Override default params if the host is provided
endpoint, secure, err = parseGatewayEndpoint(host)
if err != nil {
return nil, err
}
}
// Default endpoint parameters
if endpoint == "" {
endpoint = "s3.amazonaws.com"
}
creds := serverConfig.GetCredential()
// Initialize minio client object.
client, err := minio.NewCore(endpoint, accessKey, secretKey, secure)
client, err := minio.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure)
if err != nil {
return nil, err
}
@@ -128,8 +145,8 @@ func (l *s3Objects) Shutdown() error {
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo() StorageInfo {
return StorageInfo{}
func (l *s3Objects) StorageInfo() (si StorageInfo) {
return si
}
// MakeBucket creates a new container on S3 backend.
@@ -142,10 +159,10 @@ func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
@@ -159,7 +176,7 @@ func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) {
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
@@ -190,20 +207,20 @@ func (l *s3Objects) DeleteBucket(bucket string) error {
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
@@ -294,7 +311,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
Name: oi.Key,
ModTime: oi.LastModified,
Size: oi.Size,
ETag: oi.ETag,
ETag: canonicalizeETag(oi.ETag),
UserDefined: userDefined,
ContentType: oi.ContentType,
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
@@ -313,14 +330,14 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
var sha256sumBytes []byte
var err error
if sha256sum != "" {
sha256sumBytes, err = hex.DecodeString(sha256sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
}
@@ -329,29 +346,29 @@ func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.
if md5sum != "" {
md5sumBytes, err = hex.DecodeString(md5sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "etag")
}
oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies a blob from source container to destination container.
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (objInfo ObjectInfo, e error) {
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject)
return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject)
}
oi, err := l.GetObjectInfo(destBucket, destObject)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), destBucket, destObject)
return objInfo, s3ToObjectError(traceError(err), destBucket, destObject)
}
return oi, nil
@@ -406,10 +423,10 @@ func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
return fromMinioClientListMultipartsInfo(result), nil
@@ -455,20 +472,20 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
md5HexBytes, err := hex.DecodeString(md5Hex)
if err != nil {
return PartInfo{}, err
return pi, err
}
sha256sumBytes, err := hex.DecodeString(sha256sum)
if err != nil {
return PartInfo{}, err
return pi, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes)
if err != nil {
return PartInfo{}, err
return pi, err
}
return fromMinioClientObjectPart(info), nil
@@ -500,10 +517,10 @@ func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInf
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, err
return lpi, err
}
return fromMinioClientListPartsInfo(result), nil
@@ -532,10 +549,10 @@ func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart {
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (oi ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return oi, s3ToObjectError(traceError(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)

View File

@@ -18,28 +18,19 @@ package cmd
import (
"fmt"
"runtime"
"strings"
)
// Prints the formatted startup message.
func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey string, backendType gatewayBackend) {
func printGatewayStartupMessage(apiEndPoints []string, backendType gatewayBackend) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// Prints credential.
printGatewayCommonMsg(apiEndPoints, accessKey, secretKey)
printGatewayCommonMsg(strippedAPIEndpoints)
// Prints `mc` cli configuration message chooses
// first endpoint as default.
endPoint := apiEndPoints[0]
// Configure 'mc', following block prints platform specific information for minio client.
log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
printCLIAccessMsg(strippedAPIEndpoints[0], fmt.Sprintf("my%s", backendType))
// Prints documentation message.
printObjectAPIMsg()
@@ -52,10 +43,16 @@ func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey stri
}
// Prints common server startup message. Prints credential, region and browser access.
func printGatewayCommonMsg(apiEndpoints []string, accessKey, secretKey string) {
func printGatewayCommonMsg(apiEndpoints []string) {
// Get saved credentials.
cred := serverConfig.GetCredential()
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
log.Println(colorBlue("\nBrowser Access:"))
log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}

View File

@@ -20,12 +20,24 @@ import "testing"
// Test printing Gateway common message.
func TestPrintGatewayCommonMessage(t *testing.T) {
apiEndpoints := []string{"127.0.0.1:9000"}
printGatewayCommonMsg(apiEndpoints, "abcd1", "abcd123")
root, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer removeAll(root)
apiEndpoints := []string{"http://127.0.0.1:9000"}
printGatewayCommonMsg(apiEndpoints)
}
// Test print gateway startup message.
func TestPrintGatewayStartupMessage(t *testing.T) {
apiEndpoints := []string{"127.0.0.1:9000"}
printGatewayStartupMessage(apiEndpoints, "abcd1", "abcd123", "azure")
root, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer removeAll(root)
apiEndpoints := []string{"http://127.0.0.1:9000"}
printGatewayStartupMessage(apiEndpoints, "azure")
}

View File

@@ -17,20 +17,32 @@
package cmd
import (
"crypto/tls"
"crypto/x509"
"os"
"runtime"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
miniohttp "github.com/minio/minio/pkg/http"
)
// minio configuration related constants.
const (
globalMinioCertExpireWarnDays = time.Hour * 24 * 30 // 30 days.
globalMinioDefaultRegion = ""
globalMinioDefaultOwnerID = "minio"
globalMinioDefaultRegion = ""
// This is a sha256 output of ``arn:aws:iam::minio:user/admin``,
// this is kept in present form to be compatible with S3 owner ID
// requirements -
//
// ```
// The canonical user ID is the Amazon S3only concept.
// It is 64-character obfuscated version of the account ID.
// ```
// http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html
globalMinioDefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
globalMinioDefaultStorageClass = "STANDARD"
globalWindowsOSName = "windows"
globalNetBSDOSName = "netbsd"
@@ -40,6 +52,7 @@ const (
globalMinioModeDistXL = "mode-server-distributed-xl"
globalMinioModeGatewayAzure = "mode-gateway-azure"
globalMinioModeGatewayS3 = "mode-gateway-s3"
globalMinioModeGatewayGCS = "mode-gateway-gcs"
// Add new global values here.
)
@@ -96,6 +109,12 @@ var (
// IsSSL indicates if the server is configured with SSL.
globalIsSSL bool
globalTLSCertificate *tls.Certificate
globalHTTPServer *miniohttp.Server
globalHTTPServerErrorCh = make(chan error)
globalOSSignalCh = make(chan os.Signal, 1)
// List of admin peers.
globalAdminPeers = adminPeers{}
@@ -127,8 +146,9 @@ var (
// global colors.
var (
colorBold = color.New(color.Bold).SprintFunc()
colorBlue = color.New(color.FgBlue).SprintfFunc()
colorBold = color.New(color.Bold).SprintFunc()
colorBlue = color.New(color.FgBlue).SprintfFunc()
colorYellow = color.New(color.FgYellow).SprintfFunc()
)
// Returns minio global information, as a key value map.

View File

@@ -98,9 +98,9 @@ func path2BucketAndObject(path string) (bucket, object string) {
}
// extractMetadataFromHeader extracts metadata from HTTP header.
func extractMetadataFromHeader(header http.Header) map[string]string {
func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
if header == nil {
return nil
return nil, traceError(errInvalidArgument)
}
metadata := make(map[string]string)
// Save standard supported headers.
@@ -116,16 +116,17 @@ func extractMetadataFromHeader(header http.Header) map[string]string {
}
// Go through all other headers for any additional headers that needs to be saved.
for key := range header {
cKey := http.CanonicalHeaderKey(key)
if strings.HasPrefix(cKey, "X-Amz-Meta-") {
metadata[cKey] = header.Get(key)
} else if strings.HasPrefix(key, "X-Minio-Meta-") {
metadata[cKey] = header.Get(key)
if key != http.CanonicalHeaderKey(key) {
return nil, traceError(errInvalidArgument)
}
if strings.HasPrefix(key, "X-Amz-Meta-") {
metadata[key] = header.Get(key)
}
if strings.HasPrefix(key, "X-Minio-Meta-") {
metadata[key] = header.Get(key)
}
}
// Success.
return metadata
return metadata, nil
}
// The Query string for the redirect URL the client is
@@ -168,11 +169,6 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
return strings.Join(newEncs, ",")
}
// extractMetadataFromForm extracts metadata from Post Form.
func extractMetadataFromForm(formValues http.Header) map[string]string {
return extractMetadataFromHeader(formValues)
}
// Validate form field size for s3 specification requirement.
func validateFormFieldSize(formValues http.Header) error {
// Iterate over form values

View File

@@ -123,8 +123,9 @@ func TestValidateFormFieldSize(t *testing.T) {
// Tests validate metadata extraction from http headers.
func TestExtractMetadataHeaders(t *testing.T) {
testCases := []struct {
header http.Header
metadata map[string]string
header http.Header
metadata map[string]string
shouldFail bool
}{
// Validate if there a known 'content-type'.
{
@@ -134,15 +135,17 @@ func TestExtractMetadataHeaders(t *testing.T) {
metadata: map[string]string{
"content-type": "image/png",
},
shouldFail: false,
},
// Validate if there are no keys to extract.
{
header: http.Header{
"test-1": []string{"123"},
"Test-1": []string{"123"},
},
metadata: map[string]string{},
metadata: map[string]string{},
shouldFail: false,
},
// Validate if there are no keys to extract.
// Validate that there are all headers extracted
{
header: http.Header{
"X-Amz-Meta-Appid": []string{"amz-meta"},
@@ -150,19 +153,38 @@ func TestExtractMetadataHeaders(t *testing.T) {
},
metadata: map[string]string{
"X-Amz-Meta-Appid": "amz-meta",
"X-Minio-Meta-Appid": "minio-meta"},
"X-Minio-Meta-Appid": "minio-meta",
},
shouldFail: false,
},
// Fail if header key is not in canonicalized form
{
header: http.Header{
"x-amz-meta-appid": []string{"amz-meta"},
},
metadata: map[string]string{
"X-Amz-Meta-Appid": "amz-meta",
},
shouldFail: true,
},
// Empty header input returns empty metadata.
{
header: nil,
metadata: nil,
header: nil,
metadata: nil,
shouldFail: true,
},
}
// Validate if the extracting headers.
for i, testCase := range testCases {
metadata := extractMetadataFromHeader(testCase.header)
if !reflect.DeepEqual(metadata, testCase.metadata) {
metadata, err := extractMetadataFromHeader(testCase.header)
if err != nil && !testCase.shouldFail {
t.Fatalf("Test %d failed to extract metadata: %v", i+1, err)
}
if err == nil && testCase.shouldFail {
t.Fatalf("Test %d should fail, but it passed", i+1)
}
if err == nil && !reflect.DeepEqual(metadata, testCase.metadata) {
t.Fatalf("Test %d failed: Expected \"%#v\", got \"%#v\"", i+1, testCase.metadata, metadata)
}
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"math/rand"
"net/rpc"
"path"
"sync"
"time"
@@ -64,7 +63,7 @@ type lockServer struct {
}
// Start lock maintenance from all lock servers.
func startLockMaintainence(lockServers []*lockServer) {
func startLockMaintenance(lockServers []*lockServer) {
for _, locker := range lockServers {
// Start loop for stale lock maintenance
go func(lk *lockServer) {
@@ -90,7 +89,7 @@ func startLockMaintainence(lockServers []*lockServer) {
// Register distributed NS lock handlers.
func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error {
// Start lock maintenance from all lock servers.
startLockMaintainence(globalLockServers)
startLockMaintenance(globalLockServers)
// Register initialized lock servers to their respective rpc endpoints.
return registerStorageLockers(mux, globalLockServers)
@@ -99,7 +98,7 @@ func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error
// registerStorageLockers - register locker rpc handlers for net/rpc library clients
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error {
for _, lockServer := range lockServers {
lockRPCServer := rpc.NewServer()
lockRPCServer := newRPCServer()
if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil {
return traceError(err)
}

View File

@@ -32,6 +32,7 @@ type notifier struct {
Kafka kafkaConfigs `json:"kafka"`
Webhook webhookConfigs `json:"webhook"`
MySQL mySQLConfigs `json:"mysql"`
MQTT mqttConfigs `json:"mqtt"`
// Add new notification queues.
}
@@ -54,6 +55,25 @@ func (a amqpConfigs) Validate() error {
return nil
}
type mqttConfigs map[string]mqttNotify
func (a mqttConfigs) Clone() mqttConfigs {
a2 := make(mqttConfigs, len(a))
for k, v := range a {
a2[k] = v
}
return a2
}
func (a mqttConfigs) Validate() error {
for k, v := range a {
if err := v.Validate(); err != nil {
return fmt.Errorf("MQTT [%s] configuration invalid: %s", k, err.Error())
}
}
return nil
}
type natsConfigs map[string]natsNotify
func (a natsConfigs) Clone() natsConfigs {
@@ -215,6 +235,9 @@ func (n *notifier) Validate() error {
if err := n.MySQL.Validate(); err != nil {
return err
}
if err := n.MQTT.Validate(); err != nil {
return err
}
return nil
}
@@ -236,6 +259,24 @@ func (n *notifier) GetAMQPByID(accountID string) amqpNotify {
return n.AMQP[accountID]
}
func (n *notifier) SetMQTTByID(accountID string, mqttn mqttNotify) {
n.Lock()
defer n.Unlock()
n.MQTT[accountID] = mqttn
}
func (n *notifier) GetMQTT() map[string]mqttNotify {
n.RLock()
defer n.RUnlock()
return n.MQTT.Clone()
}
func (n *notifier) GetMQTTByID(accountID string) mqttNotify {
n.RLock()
defer n.RUnlock()
return n.MQTT[accountID]
}
func (n *notifier) SetNATSByID(accountID string, natsn natsNotify) {
n.Lock()
defer n.Unlock()

View File

@@ -30,6 +30,8 @@ const (
// Static string indicating queue type 'amqp'.
queueTypeAMQP = "amqp"
// Static string indicating queue type 'mqtt'.
queueTypeMQTT = "mqtt"
// Static string indicating queue type 'nats'.
queueTypeNATS = "nats"
// Static string indicating queue type 'elasticsearch'.
@@ -80,6 +82,25 @@ func isAMQPQueue(sqsArn arnSQS) bool {
return true
}
// Returns true if mqttARN is for an MQTT queue.
func isMQTTQueue(sqsArn arnSQS) bool {
if sqsArn.Type != queueTypeMQTT {
return false
}
mqttL := serverConfig.Notify.GetMQTTByID(sqsArn.AccountID)
if !mqttL.Enable {
return false
}
// Connect to mqtt server to validate.
mqttC, err := dialMQTT(mqttL)
if err != nil {
errorIf(err, "Unable to connect to mqtt service. %#v", mqttL)
return false
}
defer mqttC.Client.Disconnect(250)
return true
}
// Returns true if natsArn is for an NATS queue.
func isNATSQueue(sqsArn arnSQS) bool {
if sqsArn.Type != queueTypeNATS {

View File

@@ -59,13 +59,13 @@ type amqpConn struct {
// dialAMQP - dials and returns an amqpConnection instance,
// for sending notifications. Returns error if amqp logger
// is not enabled.
func dialAMQP(amqpL amqpNotify) (amqpConn, error) {
func dialAMQP(amqpL amqpNotify) (ac amqpConn, e error) {
if !amqpL.Enable {
return amqpConn{}, errNotifyNotEnabled
return ac, errNotifyNotEnabled
}
conn, err := amqp.Dial(amqpL.URL)
if err != nil {
return amqpConn{}, err
return ac, err
}
return amqpConn{Connection: conn, params: amqpL}, nil
}

View File

@@ -66,13 +66,13 @@ type kafkaConn struct {
topic string
}
func dialKafka(kn kafkaNotify) (kafkaConn, error) {
func dialKafka(kn kafkaNotify) (kc kafkaConn, e error) {
if !kn.Enable {
return kafkaConn{}, errNotifyNotEnabled
return kc, errNotifyNotEnabled
}
if kn.Topic == "" {
return kafkaConn{}, kkErrFunc(
return kc, kkErrFunc(
"Topic was not specified in configuration")
}
@@ -85,7 +85,7 @@ func dialKafka(kn kafkaNotify) (kafkaConn, error) {
p, err := sarama.NewSyncProducer(kn.Brokers, config)
if err != nil {
return kafkaConn{}, kkErrFunc("Failed to start producer: %v", err)
return kc, kkErrFunc("Failed to start producer: %v", err)
}
return kafkaConn{p, kn.Topic}, nil

123
cmd/notify-mqtt.go Normal file
View File

@@ -0,0 +1,123 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"crypto/tls"
"io/ioutil"
"time"
"github.com/Sirupsen/logrus"
MQTT "github.com/eclipse/paho.mqtt.golang"
)
type mqttNotify struct {
Enable bool `json:"enable"`
Broker string `json:"broker"`
Topic string `json:"topic"`
QoS int `json:"qos"`
ClientID string `json:"clientId"`
User string `json:"username"`
Password string `json:"password"`
}
func (m *mqttNotify) Validate() error {
if !m.Enable {
return nil
}
if _, err := checkURL(m.Broker); err != nil {
return err
}
return nil
}
type mqttConn struct {
params mqttNotify
Client MQTT.Client
}
func dialMQTT(mqttL mqttNotify) (mc mqttConn, e error) {
if !mqttL.Enable {
return mc, errNotifyNotEnabled
}
connOpts := &MQTT.ClientOptions{
ClientID: mqttL.ClientID,
CleanSession: true,
Username: mqttL.User,
Password: mqttL.Password,
MaxReconnectInterval: 1 * time.Second,
KeepAlive: 30 * time.Second,
TLSConfig: tls.Config{RootCAs: globalRootCAs},
}
connOpts.AddBroker(mqttL.Broker)
client := MQTT.NewClient(connOpts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
return mc, token.Error()
}
return mqttConn{Client: client, params: mqttL}, nil
}
func newMQTTNotify(accountID string) (*logrus.Logger, error) {
mqttL := serverConfig.Notify.GetMQTTByID(accountID)
//connect to MQTT Server
mqttC, err := dialMQTT(mqttL)
if err != nil {
return nil, err
}
mqttLog := logrus.New()
// Disable writing to console.
mqttLog.Out = ioutil.Discard
// Add a mqtt hook.
mqttLog.Hooks.Add(mqttC)
// Set default JSON formatter
mqttLog.Formatter = new(logrus.JSONFormatter)
// successfully enabled all MQTTs
return mqttLog, nil
}
// Fire if called when an event should be sent to the message broker.
func (q mqttConn) Fire(entry *logrus.Entry) error {
body, err := entry.String()
if err != nil {
return err
}
if !q.Client.IsConnected() {
if token := q.Client.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
}
token := q.Client.Publish(q.params.Topic, byte(q.params.QoS), false, body)
if token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
// Levels is available logging levels.
func (q mqttConn) Levels() []logrus.Level {
return []logrus.Level{
logrus.InfoLevel,
}
}

View File

@@ -145,9 +145,9 @@ type mySQLConn struct {
*sql.DB
}
func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
func dialMySQL(msql mySQLNotify) (mc mySQLConn, e error) {
if !msql.Enable {
return mySQLConn{}, errNotifyNotEnabled
return mc, errNotifyNotEnabled
}
dsnStr := msql.DsnString
@@ -166,7 +166,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
db, err := sql.Open("mysql", dsnStr)
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"Connection opening failure (dsnStr=%s): %v",
dsnStr, err)
}
@@ -174,7 +174,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
// ping to check that server is actually reachable.
err = db.Ping()
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"Ping to server failed with: %v", err)
}
@@ -190,7 +190,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
_, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table))
if errCreate != nil {
// failed to create the table. error out.
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"'Select' failed with %v, then 'Create Table' failed with %v",
err, errCreate,
)
@@ -205,22 +205,20 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNSMySQL,
msql.Table))
if err != nil {
return mySQLConn{},
mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
return mc, mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
}
// delete statement
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL,
msql.Table))
if err != nil {
return mySQLConn{},
mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
return mc, mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
}
case formatAccess:
// insert statement
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL,
msql.Table))
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"create INSERT prepared statement failed with: %v", err)
}

View File

@@ -69,9 +69,9 @@ type natsIOConn struct {
// dialNATS - dials and returns an natsIOConn instance,
// for sending notifications. Returns error if nats logger
// is not enabled.
func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
func dialNATS(natsL natsNotify, testDial bool) (nioc natsIOConn, e error) {
if !natsL.Enable {
return natsIOConn{}, errNotifyNotEnabled
return nioc, errNotifyNotEnabled
}
// Construct natsIOConn which holds all NATS connection information
@@ -105,7 +105,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
// Do the real connection to the NATS server
sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...)
if err != nil {
return natsIOConn{}, err
return nioc, err
}
// Save the created connection
conn.stanConn = sc
@@ -120,7 +120,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
// Do the real connection
nc, err := natsC.Connect()
if err != nil {
return natsIOConn{}, err
return nioc, err
}
// Save the created connection
conn.natsConn = nc

View File

@@ -153,9 +153,9 @@ type pgConn struct {
*sql.DB
}
func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
func dialPostgreSQL(pgN postgreSQLNotify) (pc pgConn, e error) {
if !pgN.Enable {
return pgConn{}, errNotifyNotEnabled
return pc, errNotifyNotEnabled
}
// collect connection params
@@ -179,7 +179,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
db, err := sql.Open("postgres", connStr)
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"Connection opening failure (connectionString=%s): %v",
connStr, err)
}
@@ -187,7 +187,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
// ping to check that server is actually reachable.
err = db.Ping()
if err != nil {
return pgConn{}, pgErrFunc("Ping to server failed with: %v",
return pc, pgErrFunc("Ping to server failed with: %v",
err)
}
@@ -203,7 +203,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
_, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table))
if errCreate != nil {
// failed to create the table. error out.
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"'Select' failed with %v, then 'Create Table' failed with %v",
err, errCreate,
)
@@ -218,14 +218,14 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create UPSERT prepared statement failed with: %v", err)
}
// delete statement
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create DELETE prepared statement failed with: %v", err)
}
case formatAccess:
@@ -233,7 +233,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create INSERT prepared statement failed with: %v", err)
}
}

View File

@@ -110,6 +110,13 @@ func (e SHA256Mismatch) Error() string {
return "sha256 computed does not match with what is expected"
}
// SignatureDoesNotMatch - when content md5 does not match with what was sent from client.
type SignatureDoesNotMatch struct{}
func (e SignatureDoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided. Check your key and signing method."
}
// StorageFull storage ran out of space.
type StorageFull struct{}
@@ -144,6 +151,13 @@ func (e BucketNotFound) Error() string {
return "Bucket not found: " + e.Bucket
}
// BucketAlreadyExists the requested bucket name is not available.
type BucketAlreadyExists GenericError
func (e BucketAlreadyExists) Error() string {
return "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again."
}
// BucketAlreadyOwnedByYou already owned by you.
type BucketAlreadyOwnedByYou GenericError
@@ -250,6 +264,14 @@ func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object
}
// AllAccessDisabled All access to this object has been disabled
type AllAccessDisabled GenericError
// Return string an error formatted as the given text.
func (e AllAccessDisabled) Error() string {
return "All access to this object has been disabled"
}
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header.
type IncompleteBody GenericError
@@ -307,7 +329,7 @@ func (e InvalidUploadID) Error() string {
type InvalidPart struct{}
func (e InvalidPart) Error() string {
return "One or more of the specified parts could not be found"
return "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag."
}
// PartTooSmall - error if part size is less than 5MB.
@@ -321,6 +343,13 @@ func (e PartTooSmall) Error() string {
return fmt.Sprintf("Part size for %d should be atleast 5MB", e.PartNumber)
}
// PartTooBig returned if size of part is bigger than the allowed limit.
type PartTooBig struct{}
func (e PartTooBig) Error() string {
return "Part size bigger than the allowed limit"
}
// NotImplemented If a feature is not implemented
type NotImplemented struct{}

View File

@@ -1061,7 +1061,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
{
MaxUploads: 10,
IsTruncated: false,
Prefix: globalMinioDefaultOwnerID,
Prefix: "minio",
UploadIDMarker: uploadIDs[4],
Uploads: []uploadMetadata{
{
@@ -1895,7 +1895,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// Part number 0 doesn't exist, expecting InvalidPart error (Test number 12).
{bucketNames[0], objectNames[0], uploadIDs[0], []completePart{{ETag: "abcd", PartNumber: 0}}, "", InvalidPart{}, false},
// // Upload and PartNumber exists, But a deliberate ETag mismatch is introduced (Test number 13).
{bucketNames[0], objectNames[0], uploadIDs[0], inputParts[0].parts, "", BadDigest{}, false},
{bucketNames[0], objectNames[0], uploadIDs[0], inputParts[0].parts, "", InvalidPart{}, false},
// Test case with non existent object name (Test number 14).
{bucketNames[0], "my-object", uploadIDs[0], []completePart{{ETag: "abcd", PartNumber: 1}}, "", InvalidUploadID{UploadID: uploadIDs[0]}, false},
// Testing for Part being too small (Test number 15).

View File

@@ -260,7 +260,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
// Extract metadata relevant for an CopyObject operation based on conditional
// header values specified in X-Amz-Metadata-Directive.
func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) map[string]string {
func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) (map[string]string, error) {
// if x-amz-metadata-directive says REPLACE then
// we extract metadata from the input headers.
if isMetadataReplace(header) {
@@ -270,11 +270,11 @@ func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]strin
// if x-amz-metadata-directive says COPY then we
// return the default metadata.
if isMetadataCopy(header) {
return defaultMeta
return defaultMeta, nil
}
// Copy is default behavior if not x-amz-metadata-directive is set.
return defaultMeta
return defaultMeta, nil
}
// CopyObjectHandler - Copy Object
@@ -363,7 +363,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Make sure to remove saved etag, CopyObject calculates a new one.
delete(defaultMeta, "etag")
newMetadata := getCpObjMetadataFromHeader(r.Header, defaultMeta)
newMetadata, err := getCpObjMetadataFromHeader(r.Header, defaultMeta)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
}
// Check if x-amz-metadata-directive was not set to REPLACE and source,
// desination are same objects.
if !isMetadataReplace(r.Header) && cpSrcDstSame {
@@ -457,7 +461,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
}
// Extract metadata to be saved from incoming HTTP header.
metadata := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
if rAuthType == authTypeStreamingSigned {
if contentEncoding, ok := metadata["content-encoding"]; ok {
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
@@ -579,7 +588,12 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
}
// Extract metadata that needs to be saved.
metadata := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
uploadID, err := objectAPI.NewMultipartUpload(bucket, object, metadata)
if err != nil {

View File

@@ -2252,7 +2252,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(BadDigest{})),
expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(InvalidPart{})),
getGetObjectURL("", bucketName, objectName))),
expectedRespStatus: http.StatusBadRequest,
},

View File

@@ -35,9 +35,11 @@ import (
)
const (
fsMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space.
fsMinFreeInodes = 10000 // Min 10000.
maxAllowedIOError = 5
diskMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space.
diskMinTotalSpace = diskMinFreeSpace // Min 1GiB total space.
diskMinFreeInodes = 10000 // Min 10000 free inodes.
diskMinTotalInodes = diskMinFreeInodes // Min 10000 total inodes.
maxAllowedIOError = 5
)
// posix - implements StorageAPI interface.
@@ -108,7 +110,7 @@ func newPosix(path string) (StorageAPI, error) {
if err != nil {
return nil, err
}
fs := &posix{
st := &posix{
diskPath: diskPath,
// 1MiB buffer pool for posix internal operations.
pool: sync.Pool{
@@ -131,7 +133,19 @@ func newPosix(path string) (StorageAPI, error) {
return nil, err
}
}
return fs, nil
di, err := getDiskInfo(preparePath(diskPath))
if err != nil {
return nil, err
}
// Check if disk has minimum required total space.
if err = checkDiskMinTotal(di); err != nil {
return nil, err
}
// Success.
return st, nil
}
// getDiskInfo returns given disk information.
@@ -155,11 +169,58 @@ var ignoreDiskFreeOS = []string{
globalSolarisOSName,
}
// check if disk total has minimum required size.
func checkDiskMinTotal(di disk.Info) (err error) {
// Remove 5% from total space for cumulative disk space
// used for journalling, inodes etc.
totalDiskSpace := float64(di.Total) * 0.95
if int64(totalDiskSpace) <= diskMinTotalSpace {
return errDiskFull
}
// Some filesystems do not implement a way to provide total inodes available, instead
// inodes are allocated based on available disk space. For example CephDISK, StoreNext CVDISK,
// AzureFile driver. Allow for the available disk to be separately validated and we will
// validate inodes only if total inodes are provided by the underlying filesystem.
if di.Files != 0 && di.FSType != "NFS" {
totalFiles := int64(di.Files)
if totalFiles <= diskMinTotalInodes {
return errDiskFull
}
}
return nil
}
// check if disk free has minimum required size.
func checkDiskMinFree(di disk.Info) error {
// Remove 5% from free space for cumulative disk space used for journalling, inodes etc.
availableDiskSpace := float64(di.Free) * 0.95
if int64(availableDiskSpace) <= diskMinFreeSpace {
return errDiskFull
}
// Some filesystems do not implement a way to provide total inodes available, instead inodes
// are allocated based on available disk space. For example CephDISK, StoreNext CVDISK, AzureFile driver.
// Allow for the available disk to be separately validate and we will validate inodes only if
// total inodes are provided by the underlying filesystem.
if di.Files != 0 && di.FSType != "NFS" {
availableFiles := int64(di.Ffree)
if availableFiles <= diskMinFreeInodes {
return errDiskFull
}
}
// Success.
return nil
}
// checkDiskFree verifies if disk path has sufficient minimum free disk space and files.
func checkDiskFree(diskPath string, neededSpace int64) (err error) {
// We don't validate disk space or inode utilization on windows.
// Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds.
// And StatFS is not supported by Go for solaris and netbsd.
// Each windows call to 'GetVolumeInformationW' takes around
// 3-5seconds. And StatDISK is not supported by Go for solaris
// and netbsd.
if contains(ignoreDiskFreeOS, runtime.GOOS) {
return nil
}
@@ -170,29 +231,15 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) {
return err
}
// Remove 5% from free space for cumulative disk space used for journalling, inodes etc.
availableDiskSpace := float64(di.Free) * 0.95
if int64(availableDiskSpace) <= fsMinFreeSpace {
return errDiskFull
}
// Some filesystems do not implement a way to provide total inodes available, instead inodes
// are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver.
// Allow for the available disk to be separately validate and we will validate inodes only if
// total inodes are provided by the underlying filesystem.
if di.Files != 0 && di.FSType != "NFS" {
availableFiles := int64(di.Ffree)
if availableFiles <= fsMinFreeInodes {
return errDiskFull
}
if err = checkDiskMinFree(di); err != nil {
return err
}
// Check if we have enough space to store data
if neededSpace > int64(availableDiskSpace) {
if neededSpace > int64(float64(di.Free)*0.95) {
return errDiskFull
}
// Success.
return nil
}

View File

@@ -29,6 +29,8 @@ import (
"syscall"
"testing"
"github.com/minio/minio/pkg/disk"
"golang.org/x/crypto/blake2b"
)
@@ -1669,3 +1671,106 @@ func TestPosixStatFile(t *testing.T) {
}
}
}
// Checks for restrictions for min total disk space and inodes.
func TestCheckDiskTotalMin(t *testing.T) {
testCases := []struct {
diskInfo disk.Info
err error
}{
// Test 1 - when fstype is nfs.
{
diskInfo: disk.Info{
Total: diskMinTotalSpace * 3,
FSType: "NFS",
},
err: nil,
},
// Test 2 - when fstype is xfs and total inodes are small.
{
diskInfo: disk.Info{
Total: diskMinTotalSpace * 3,
FSType: "XFS",
Files: 9999,
},
err: errDiskFull,
},
// Test 3 - when fstype is btrfs and total inodes is empty.
{
diskInfo: disk.Info{
Total: diskMinTotalSpace * 3,
FSType: "BTRFS",
Files: 0,
},
err: nil,
},
// Test 4 - when fstype is xfs and total disk space is really small.
{
diskInfo: disk.Info{
Total: diskMinTotalSpace - diskMinTotalSpace/1024,
FSType: "XFS",
Files: 9999,
},
err: errDiskFull,
},
}
// Validate all cases.
for i, test := range testCases {
if err := checkDiskMinTotal(test.diskInfo); test.err != err {
t.Errorf("Test %d: Expected error %s, got %s", i+1, test.err, err)
}
}
}
// Checks for restrictions for min free disk space and inodes.
func TestCheckDiskFreeMin(t *testing.T) {
testCases := []struct {
diskInfo disk.Info
err error
}{
// Test 1 - when fstype is nfs.
{
diskInfo: disk.Info{
Free: diskMinTotalSpace * 3,
FSType: "NFS",
},
err: nil,
},
// Test 2 - when fstype is xfs and total inodes are small.
{
diskInfo: disk.Info{
Free: diskMinTotalSpace * 3,
FSType: "XFS",
Files: 9999,
Ffree: 9999,
},
err: errDiskFull,
},
// Test 3 - when fstype is btrfs and total inodes are empty.
{
diskInfo: disk.Info{
Free: diskMinTotalSpace * 3,
FSType: "BTRFS",
Files: 0,
},
err: nil,
},
// Test 4 - when fstype is xfs and total disk space is really small.
{
diskInfo: disk.Info{
Free: diskMinTotalSpace - diskMinTotalSpace/1024,
FSType: "XFS",
Files: 9999,
},
err: errDiskFull,
},
}
// Validate all cases.
for i, test := range testCases {
if err := checkDiskMinFree(test.diskInfo); test.err != err {
t.Errorf("Test %d: Expected error %s, got %s", i+1, test.err, err)
}
}
}

View File

@@ -112,7 +112,7 @@ type PostPolicyForm struct {
}
// parsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure.
func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
// Convert po into interfaces and
// perform strict type conversion using reflection.
var rawPolicy struct {
@@ -122,7 +122,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
err := json.Unmarshal([]byte(policy), &rawPolicy)
if err != nil {
return PostPolicyForm{}, err
return ppf, err
}
parsedPolicy := PostPolicyForm{}
@@ -130,7 +130,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
// Parse expiry time.
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if err != nil {
return PostPolicyForm{}, err
return ppf, err
}
parsedPolicy.Conditions.Policies = make(map[string]struct {
Operator string

View File

@@ -133,6 +133,14 @@ func printConfigErrMsg(storageDisks []StorageAPI, sErrs []error, fn printOnceFun
// Generate a formatted message when cluster is misconfigured.
func getConfigErrMsg(storageDisks []StorageAPI, sErrs []error) string {
msg := colorBlue("\nDetected configuration inconsistencies in the cluster. Please fix following servers.")
return msg + combineDiskErrs(storageDisks, sErrs)
}
// Combines each disk errors in a newline formatted string.
// this is a helper function in printing messages across
// all disks.
func combineDiskErrs(storageDisks []StorageAPI, sErrs []error) string {
var msg string
for i, disk := range storageDisks {
if disk == nil {
continue

View File

@@ -18,6 +18,7 @@ package cmd
import (
"errors"
"fmt"
"time"
"github.com/minio/mc/pkg/console"
@@ -118,6 +119,24 @@ func quickErrToActions(errMap map[error]int) InitActions {
// Preparatory initialization stage for XL validates known errors.
// Converts them into specific actions. These actions have special purpose
// which caller decides on what needs to be done.
// Logic used in this function is as shown below.
//
// ---- Possible states and handled conditions -----
//
// - Formatted setup
// - InitObjectLayer when `disksFormatted >= readQuorum`
// - Wait for quorum when `disksFormatted < readQuorum && disksFormatted + disksOffline >= readQuorum`
// (we don't know yet if there are unformatted disks)
// - Wait for heal when `disksFormatted >= readQuorum && disksUnformatted > 0`
// (here we know there is at least one unformatted disk which requires healing)
//
// - Unformatted setup
// - Format/Wait for format when `disksUnformatted == diskCount`
//
// - Wait for all when `disksUnformatted + disksOffline == disksCount`
//
// Under all other conditions should lead to server initialization aborted.
func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
// Count errors by error value.
errMap := make(map[error]int)
@@ -135,19 +154,12 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
disksOffline := errMap[errDiskNotFound]
disksFormatted := errMap[nil]
disksUnformatted := errMap[errUnformattedDisk]
disksCorrupted := errMap[errCorruptedFormat]
// No Quorum lots of offline disks, wait for quorum.
if disksOffline > readQuorum {
return WaitForQuorum
}
// There is quorum or more corrupted disks, there is not enough good
// disks to reconstruct format.json.
if disksCorrupted >= quorum {
return Abort
}
// All disks are unformatted, proceed to formatting disks.
if disksUnformatted == diskCount {
// Only the first server formats an uninitialized setup, others wait for notification.
@@ -163,6 +175,7 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
if disksUnformatted+disksFormatted+disksOffline == diskCount {
return WaitForAll
}
// Some disks possibly corrupted and too many unformatted disks.
return Abort
}
@@ -172,10 +185,13 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
if disksFormatted+disksOffline == diskCount {
return InitObjectLayer
}
// Some of the formatted disks are possibly corrupted or unformatted, heal them.
return WaitForHeal
} // Exhausted all our checks, un-handled errors perhaps we Abort.
return WaitForQuorum
}
// Exhausted all our checks, un-handled errors perhaps we Abort.
return Abort
}
// Prints retry message upon a specific retry count.
@@ -227,6 +243,7 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks
// actual errors for disks not being available.
printRetryMsg(sErrs, storageDisks)
}
// Pre-emptively check if one of the formatted disks
// is invalid. This function returns success for the
// most part unless one of the formats is not consistent
@@ -240,16 +257,17 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks
// first server has a wrong format and exit gracefully.
// refer - https://github.com/minio/minio/issues/4140
if retryCount > maxRetryAttempts {
errorIf(err, "Detected disk (%s) in unexpected format",
errorIf(err, "%s : Detected disk in unexpected format",
storageDisks[index])
continue
}
return err
}
// Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
case Abort:
return errCorruptedFormat
return fmt.Errorf("%s", combineDiskErrs(storageDisks, sErrs))
case FormatDisks:
console.Eraseline()
printFormatMsg(endpoints, storageDisks, printOnceFn())

58
cmd/rpc-server.go Normal file
View File

@@ -0,0 +1,58 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"net/http"
"net/rpc"
miniohttp "github.com/minio/minio/pkg/http"
)
// ServeHTTP implements an http.Handler that answers RPC requests,
// hijacks the underlying connection and clears all deadlines if any.
func (server *rpcServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodConnect {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
errorIf(err, "rpc hijacking failed for: %s", req.RemoteAddr)
return
}
// Overrides Read/Write deadlines if any.
bufConn, ok := conn.(*miniohttp.BufConn)
if ok {
bufConn.RemoveTimeout()
conn = bufConn
}
// Can connect to RPC service using HTTP CONNECT to rpcPath.
io.WriteString(conn, "HTTP/1.0 200 Connected to Go RPC\n\n")
server.ServeConn(conn)
}
type rpcServer struct{ *rpc.Server }
// Similar to rpc.NewServer() provides a custom ServeHTTP override.
func newRPCServer() *rpcServer {
return &rpcServer{rpc.NewServer()}
}

76
cmd/rpc-server_test.go Normal file
View File

@@ -0,0 +1,76 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"net/http/httptest"
"testing"
router "github.com/gorilla/mux"
)
type ArithArgs struct {
A, B int
}
type ArithReply struct {
C int
}
type Arith int
// Some of Arith's methods have value args, some have pointer args. That's deliberate.
func (t *Arith) Add(args ArithArgs, reply *ArithReply) error {
reply.C = args.A + args.B
return nil
}
func TestGoHTTPRPC(t *testing.T) {
newServer := newRPCServer()
newServer.Register(new(Arith))
mux := router.NewRouter().SkipClean(true)
mux.Path("/foo").Handler(newServer)
httpServer := httptest.NewServer(mux)
defer httpServer.Close()
client := newRPCClient(httpServer.Listener.Addr().String(), "/foo", false)
defer client.Close()
// Synchronous calls
args := &ArithArgs{7, 8}
reply := new(ArithReply)
if err := client.Call("Arith.Add", args, reply); err != nil {
t.Errorf("Add: expected no error but got string %v", err)
}
if reply.C != args.A+args.B {
t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
}
resp, err := http.Get(httpServer.URL + "/foo")
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusMethodNotAllowed {
t.Errorf("Expected %d, got %d", http.StatusMethodNotAllowed, resp.StatusCode)
}
}

View File

@@ -17,8 +17,6 @@
package cmd
import (
"net/rpc"
router "github.com/gorilla/mux"
)
@@ -39,7 +37,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error {
},
}
s3PeerRPCServer := rpc.NewServer()
s3PeerRPCServer := newRPCServer()
err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers)
if err != nil {
return traceError(err)

View File

@@ -17,21 +17,21 @@
package cmd
import (
"errors"
"net/http"
"os"
"path/filepath"
"os/signal"
"runtime"
"strings"
"time"
"syscall"
"github.com/minio/cli"
"github.com/minio/dsync"
miniohttp "github.com/minio/minio/pkg/http"
)
var serverFlags = []cli.Flag{
cli.StringFlag{
Name: "address",
Value: ":9000",
Value: ":" + globalMinioPort,
Usage: "Bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname.",
},
}
@@ -58,6 +58,9 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
REGION:
MINIO_REGION: To set custom region. By default it is "us-east-1".
EXAMPLES:
1. Start minio server on "/home/shared" directory.
$ {{.HelpName}} /home/shared
@@ -78,61 +81,9 @@ EXAMPLES:
`,
}
// Check for updates and print a notification message
func checkUpdate(mode string) {
// Its OK to ignore any errors during getUpdateInfo() here.
if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil {
if updateMsg := computeUpdateMessage(downloadURL, older); updateMsg != "" {
log.Println(updateMsg)
}
}
}
func enableLoggers() {
fileLogTarget := serverConfig.Logger.GetFile()
if fileLogTarget.Enable {
err := InitFileLogger(&fileLogTarget)
fatalIf(err, "Unable to initialize file logger")
log.AddTarget(fileLogTarget)
}
consoleLogTarget := serverConfig.Logger.GetConsole()
if consoleLogTarget.Enable {
InitConsoleLogger(&consoleLogTarget)
}
log.SetConsoleTarget(consoleLogTarget)
}
func initConfig() {
// Config file does not exist, we create it fresh and return upon success.
if isFile(getConfigFile()) {
fatalIf(migrateConfig(), "Config migration failed.")
fatalIf(loadConfig(), "Unable to load config version: '%s'.", v18)
} else {
fatalIf(newConfig(), "Unable to initialize minio config for the first time.")
log.Println("Created minio configuration file successfully at " + getConfigDir())
}
}
func serverHandleCmdArgs(ctx *cli.Context) {
// Set configuration directory.
{
// Get configuration directory from command line argument.
configDir := ctx.String("config-dir")
if !ctx.IsSet("config-dir") && ctx.GlobalIsSet("config-dir") {
configDir = ctx.GlobalString("config-dir")
}
if configDir == "" {
fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.")
}
// Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir)
fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs)
}
// Handle common command args.
handleCommonCmdArgs(ctx)
// Server address.
serverAddr := ctx.String("address")
@@ -159,36 +110,8 @@ func serverHandleCmdArgs(ctx *cli.Context) {
}
func serverHandleEnvVars() {
// Start profiler if env is set.
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
globalProfiler = startProfiler(profiler)
}
// Check if object cache is disabled.
globalXLObjCacheDisabled = strings.EqualFold(os.Getenv("_MINIO_CACHE"), "off")
accessKey := os.Getenv("MINIO_ACCESS_KEY")
secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" {
cred, err := createCredential(accessKey, secretKey)
fatalIf(err, "Invalid access/secret Key set in environment.")
// credential Envs are set globally.
globalIsEnvCreds = true
globalActiveCred = cred
}
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBrowserFlag(browser)
if err != nil {
fatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
}
// browser Envs are set globally, this does not represent
// if browser is turned off or on.
globalIsEnvBrowser = true
globalIsBrowserEnabled = bool(browserFlag)
}
// Handle common environment variables.
handleCommonEnvVars()
if serverRegion := os.Getenv("MINIO_REGION"); serverRegion != "" {
// region Envs are set globally.
@@ -210,12 +133,16 @@ func serverMain(ctx *cli.Context) {
log.EnableQuiet()
}
// Handle all server command args.
serverHandleCmdArgs(ctx)
// Handle all server environment vars.
serverHandleEnvVars()
// Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize server config.
initConfig()
// Enable loggers as per configuration file.
@@ -226,8 +153,8 @@ func serverMain(ctx *cli.Context) {
// Check and load SSL certificates.
var err error
globalPublicCerts, globalRootCAs, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL key file")
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file")
if !quietFlag {
// Check for new updates from dl.minio.io.
@@ -253,43 +180,47 @@ func serverMain(ctx *cli.Context) {
initNSLock(globalIsDistXL)
// Configure server.
handler, err := configureServerHandler(globalEndpoints)
// Declare handler to avoid lint errors.
var handler http.Handler
handler, err = configureServerHandler(globalEndpoints)
fatalIf(err, "Unable to configure one of server's RPC services.")
// Initialize a new HTTP server.
apiServer := NewServerMux(globalMinioAddr, handler)
// Initialize S3 Peers inter-node communication only in distributed setup.
initGlobalS3Peers(globalEndpoints)
// Initialize Admin Peers inter-node communication only in distributed setup.
initGlobalAdminPeers(globalEndpoints)
// Start server, automatically configures TLS if certs are available.
globalHTTPServer = miniohttp.NewServer([]string{globalMinioAddr}, handler, globalTLSCertificate)
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
globalHTTPServer.ErrorLogFunc = errorIf
go func() {
cert, key := "", ""
if globalIsSSL {
cert, key = getPublicCertFile(), getPrivateKeyFile()
}
fatalIf(apiServer.ListenAndServe(cert, key), "Failed to start minio server.")
globalHTTPServerErrorCh <- globalHTTPServer.Start()
}()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
newObject, err := newObjectLayer(globalEndpoints)
fatalIf(err, "Initializing object layer failed")
if err != nil {
errorIf(err, "Initializing object layer failed")
err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
os.Exit(1)
}
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
// Prints the formatted startup message once object layer is initialized.
apiEndpoints := getAPIEndpoints(apiServer.Addr)
apiEndpoints := getAPIEndpoints(globalMinioAddr)
printStartupMessage(apiEndpoints)
// Set uptime time after object layer has initialized.
globalBootTime = UTCNow()
// Waits on the server.
<-globalServiceDoneCh
handleSignals()
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.

View File

@@ -1,526 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"crypto/tls"
"errors"
"io"
"net"
"net/http"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
serverShutdownPoll = 500 * time.Millisecond
)
// The value chosen below is longest word chosen
// from all the http verbs comprising of
// "PRI", "OPTIONS", "GET", "HEAD", "POST",
// "PUT", "DELETE", "TRACE", "CONNECT".
const (
maxHTTPVerbLen = 7
)
// HTTP2 PRI method.
var httpMethodPRI = "PRI"
var defaultHTTP2Methods = []string{
httpMethodPRI,
}
var defaultHTTP1Methods = []string{
http.MethodOptions,
http.MethodGet,
http.MethodHead,
http.MethodPost,
http.MethodPut,
http.MethodDelete,
http.MethodTrace,
http.MethodConnect,
}
// ConnMux - Peeks into the incoming connection for relevant
// protocol without advancing the underlying net.Conn (io.Reader).
// ConnMux - allows us to multiplex between TLS and Regular HTTP
// connections on the same listeners.
type ConnMux struct {
net.Conn
// To peek net.Conn incoming data
peeker *bufio.Reader
}
// NewConnMux - creates a new ConnMux instance
func NewConnMux(c net.Conn) *ConnMux {
br := bufio.NewReader(c)
return &ConnMux{
Conn: c,
peeker: bufio.NewReader(br),
}
}
// List of protocols to be detected by PeekProtocol function.
const (
protocolTLS = "tls"
protocolHTTP1 = "http"
protocolHTTP2 = "http2"
)
// PeekProtocol - reads the first bytes, then checks if it is similar
// to one of the default http methods. Returns error if there are any
// errors in peeking over the connection.
func (c *ConnMux) PeekProtocol() (string, error) {
// Peek for HTTP verbs.
buf, err := c.peeker.Peek(maxHTTPVerbLen)
if err != nil {
return "", err
}
// Check for HTTP2 methods first.
for _, m := range defaultHTTP2Methods {
if strings.HasPrefix(string(buf), m) {
return protocolHTTP2, nil
}
}
// Check for HTTP1 methods.
for _, m := range defaultHTTP1Methods {
if strings.HasPrefix(string(buf), m) {
return protocolHTTP1, nil
}
}
// Default to TLS, this is not a real indication
// that the connection is TLS but that will be
// validated later by doing a handshake.
return protocolTLS, nil
}
// Read reads from the tcp session for data sent by
// the client, additionally sets deadline for 15 secs
// after each successful read. Deadline cancels and
// returns error if the client does not send any
// data in 15 secs. Also keeps track of the total
// bytes received from the client.
func (c *ConnMux) Read(b []byte) (n int, err error) {
// Update total incoming number of bytes.
defer func() {
globalConnStats.incInputBytes(n)
}()
n, err = c.peeker.Read(b)
if err != nil {
return n, err
}
// Read deadline was already set previously, set again
// after a successful read operation for future read
// operations.
c.Conn.SetReadDeadline(UTCNow().Add(defaultTCPReadTimeout))
// Success.
return n, nil
}
// Write to the client over a tcp session, additionally
// keeps track of the total bytes written by the server.
func (c *ConnMux) Write(b []byte) (n int, err error) {
// Update total outgoing number of bytes.
defer func() {
globalConnStats.incOutputBytes(n)
}()
// Call the conn write wrapper.
return c.Conn.Write(b)
}
// Close closes the underlying tcp connection.
func (c *ConnMux) Close() (err error) {
// Make sure that we always close a connection,
return c.Conn.Close()
}
// ListenerMux wraps the standard net.Listener to inspect
// the communication protocol upon network connection
// ListenerMux also wraps net.Listener to ensure that once
// Listener.Close returns, the underlying socket has been closed.
//
// - https://github.com/golang/go/issues/10527
//
// The default Listener returns from Close before the underlying
// socket has been closed if another goroutine has an active
// reference (e.g. is in Accept).
//
// The following sequence of events can happen:
//
// Goroutine 1 is running Accept, and is blocked, waiting for epoll
//
// Goroutine 2 calls Close. It sees an extra reference, and so cannot
// destroy the socket, but instead decrements a reference, marks the
// connection as closed and unblocks epoll.
//
// Goroutine 2 returns to the caller, makes a new connection.
// The new connection is sent to the socket (since it hasn't been destroyed)
//
// Goroutine 1 returns from epoll, and accepts the new connection.
//
// To avoid accepting connections after Close, we block Goroutine 2
// from returning from Close till Accept returns an error to the user.
type ListenerMux struct {
net.Listener
config *tls.Config
// acceptResCh is a channel for transporting wrapped net.Conn (regular or tls)
// after peeking the content of the latter
acceptResCh chan ListenerMuxAcceptRes
// Cond is used to signal Close when there are no references to the listener.
cond *sync.Cond
refs int
}
// ListenerMuxAcceptRes contains then final net.Conn data (wrapper by tls or not) to be sent to the http handler
type ListenerMuxAcceptRes struct {
conn net.Conn
err error
}
// Default keep alive interval timeout, on your Linux system to figure out
// maximum probes sent
//
// > cat /proc/sys/net/ipv4/tcp_keepalive_probes
// ! 9
//
// Final value of total keep-alive comes upto 9 x 10 * seconds = 1.5 minutes.
const defaultKeepAliveTimeout = 10 * time.Second // 10 seconds.
// Timeout to close and return error to the client when not sending any data.
const defaultTCPReadTimeout = 15 * time.Second // 15 seconds.
// newListenerMux listens and wraps accepted connections with tls after protocol peeking
func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux {
l := ListenerMux{
Listener: listener,
config: config,
cond: sync.NewCond(&sync.Mutex{}),
acceptResCh: make(chan ListenerMuxAcceptRes),
}
// Start listening, wrap connections with tls when needed
go func() {
// Extract tcp listener.
tcpListener, ok := l.Listener.(*net.TCPListener)
if !ok {
l.acceptResCh <- ListenerMuxAcceptRes{err: errInvalidArgument}
return
}
// Loop for accepting new connections
for {
// Use accept TCP method to receive the connection.
conn, err := tcpListener.AcceptTCP()
if err != nil {
l.acceptResCh <- ListenerMuxAcceptRes{err: err}
continue
}
// Enable Read timeout
conn.SetReadDeadline(UTCNow().Add(defaultTCPReadTimeout))
// Enable keep alive for each connection.
conn.SetKeepAlive(true)
conn.SetKeepAlivePeriod(defaultKeepAliveTimeout)
// Allocate new conn muxer.
connMux := NewConnMux(conn)
// Wrap the connection with ConnMux to be able to peek the data in the incoming connection
// and decide if we need to wrap the connection itself with a TLS or not
go func(connMux *ConnMux) {
protocol, cerr := connMux.PeekProtocol()
if cerr != nil {
// io.EOF is usually returned by non-http clients,
// just close the connection to avoid any leak.
if cerr != io.EOF {
errorIf(cerr, "Unable to peek into incoming protocol")
}
connMux.Close()
return
}
switch protocol {
case protocolTLS:
tlsConn := tls.Server(connMux, l.config)
// Make sure to handshake so that we know that this
// is a TLS connection, if not we should close and reject
// such a connection.
if cerr = tlsConn.Handshake(); cerr != nil {
// Close for junk message.
tlsConn.Close()
return
}
l.acceptResCh <- ListenerMuxAcceptRes{
conn: tlsConn,
}
default:
l.acceptResCh <- ListenerMuxAcceptRes{
conn: connMux,
}
}
}(connMux)
}
}()
return &l
}
// IsClosed - Returns if the underlying listener is closed fully.
func (l *ListenerMux) IsClosed() bool {
l.cond.L.Lock()
defer l.cond.L.Unlock()
return l.refs == 0
}
func (l *ListenerMux) incRef() {
l.cond.L.Lock()
l.refs++
l.cond.L.Unlock()
}
func (l *ListenerMux) decRef() {
l.cond.L.Lock()
l.refs--
newRefs := l.refs
l.cond.L.Unlock()
if newRefs == 0 {
l.cond.Broadcast()
}
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (l *ListenerMux) Close() error {
if l == nil {
return nil
}
if err := l.Listener.Close(); err != nil {
return err
}
l.cond.L.Lock()
for l.refs > 0 {
l.cond.Wait()
}
l.cond.L.Unlock()
return nil
}
// Accept - peek the protocol to decide if we should wrap the
// network stream with the TLS server
func (l *ListenerMux) Accept() (net.Conn, error) {
l.incRef()
defer l.decRef()
res := <-l.acceptResCh
return res.conn, res.err
}
// ServerMux - the main mux server
type ServerMux struct {
Addr string
handler http.Handler
listeners []*ListenerMux
// Current number of concurrent http requests
currentReqs int32
// Time to wait before forcing server shutdown
gracefulTimeout time.Duration
mu sync.RWMutex // guards closing, and listeners
closing bool
}
// NewServerMux constructor to create a ServerMux
func NewServerMux(addr string, handler http.Handler) *ServerMux {
m := &ServerMux{
Addr: addr,
handler: handler,
// Wait for 5 seconds for new incoming connnections, otherwise
// forcibly close them during graceful stop or restart.
gracefulTimeout: 5 * time.Second,
}
// Returns configured HTTP server.
return m
}
// Initialize listeners on all ports.
func initListeners(serverAddr string, tls *tls.Config) ([]*ListenerMux, error) {
host, port, err := net.SplitHostPort(serverAddr)
if err != nil {
return nil, err
}
var listeners []*ListenerMux
if host == "" {
var listener net.Listener
listener, err = net.Listen("tcp", serverAddr)
if err != nil {
return nil, err
}
listeners = append(listeners, newListenerMux(listener, tls))
return listeners, nil
}
var addrs []string
if net.ParseIP(host) != nil {
addrs = append(addrs, host)
} else {
addrs, err = net.LookupHost(host)
if err != nil {
return nil, err
}
if len(addrs) == 0 {
return nil, errUnexpected
}
}
for _, addr := range addrs {
var listener net.Listener
listener, err = net.Listen("tcp", net.JoinHostPort(addr, port))
if err != nil {
return nil, err
}
listeners = append(listeners, newListenerMux(listener, tls))
}
return listeners, nil
}
// ListenAndServe - serve HTTP requests with protocol multiplexing support
// TLS is actived when certFile and keyFile parameters are not empty.
func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) {
tlsEnabled := certFile != "" && keyFile != ""
config := &tls.Config{
// Causes servers to use Go's default ciphersuite preferences,
// which are tuned to avoid attacks. Does nothing on clients.
PreferServerCipherSuites: true,
// Set minimum version to TLS 1.2
MinVersion: tls.VersionTLS12,
} // Always instantiate.
if tlsEnabled {
// Configure TLS in the server
if config.NextProtos == nil {
config.NextProtos = []string{"http/1.1", "h2"}
}
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
}
go m.handleServiceSignals()
listeners, err := initListeners(m.Addr, config)
if err != nil {
return err
}
m.mu.Lock()
m.listeners = listeners
m.mu.Unlock()
// All http requests start to be processed by httpHandler
httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if tlsEnabled && r.TLS == nil {
// TLS is enabled but request is not TLS
// configured - return error to client.
writeErrorResponse(w, ErrInsecureClientRequest, &url.URL{})
} else {
// Return ServiceUnavailable for clients which are sending requests
// in shutdown phase
m.mu.RLock()
closing := m.closing
m.mu.RUnlock()
if closing {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
// Execute registered handlers, update currentReqs to keep
// track of concurrent requests processing on the server
atomic.AddInt32(&m.currentReqs, 1)
m.handler.ServeHTTP(w, r)
atomic.AddInt32(&m.currentReqs, -1)
}
})
var wg = &sync.WaitGroup{}
for _, listener := range listeners {
wg.Add(1)
go func(listener *ListenerMux) {
defer wg.Done()
serr := http.Serve(listener, httpHandler)
// Do not print the error if the listener is closed.
if !listener.IsClosed() {
errorIf(serr, "Unable to serve incoming requests.")
}
}(listener)
}
// Wait for all http.Serve's to return.
wg.Wait()
return nil
}
// Close initiates the graceful shutdown
func (m *ServerMux) Close() error {
m.mu.Lock()
if m.closing {
m.mu.Unlock()
return errors.New("Server has been closed")
}
// Closed completely.
m.closing = true
// Close the listeners.
for _, listener := range m.listeners {
if err := listener.Close(); err != nil {
m.mu.Unlock()
return err
}
}
m.mu.Unlock()
// Starting graceful shutdown. Check if all requests are finished
// in regular interval or force the shutdown
ticker := time.NewTicker(serverShutdownPoll)
defer ticker.Stop()
for {
select {
case <-time.After(m.gracefulTimeout):
return nil
case <-ticker.C:
if atomic.LoadInt32(&m.currentReqs) <= 0 {
return nil
}
}
}
}

View File

@@ -1,506 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"net/http"
"os"
"runtime"
"strings"
"sync"
"testing"
"time"
)
func TestListenerAcceptAfterClose(t *testing.T) {
var wg sync.WaitGroup
for i := 0; i < 16; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
runTest(t)
}
}()
}
wg.Wait()
}
func runTest(t *testing.T) {
const connectionsBeforeClose = 1
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
ln = newListenerMux(ln, &tls.Config{})
addr := ln.Addr().String()
waitForListener := make(chan error)
go func() {
defer close(waitForListener)
var connCount int
for {
conn, aerr := ln.Accept()
if aerr != nil {
return
}
connCount++
if connCount > connectionsBeforeClose {
waitForListener <- errUnexpected
return
}
conn.Close()
}
}()
for i := 0; i < connectionsBeforeClose; i++ {
err = dial(addr)
if err != nil {
t.Fatal(err)
}
}
ln.Close()
dial(addr)
err = <-waitForListener
if err != nil {
t.Fatal(err)
}
}
func dial(addr string) error {
conn, err := net.Dial("tcp", addr)
if err == nil {
conn.Close()
}
return err
}
// Tests initializing listeners.
func TestInitListeners(t *testing.T) {
testCases := []struct {
serverAddr string
shouldPass bool
}{
// Test 1 with ip and port.
{
serverAddr: net.JoinHostPort("127.0.0.1", "0"),
shouldPass: true,
},
// Test 2 only port.
{
serverAddr: net.JoinHostPort("", "0"),
shouldPass: true,
},
// Test 3 with no port error.
{
serverAddr: "127.0.0.1",
shouldPass: false,
},
// Test 4 with 'foobar' host not resolvable.
{
serverAddr: "foobar:9000",
shouldPass: false,
},
}
for i, testCase := range testCases {
listeners, err := initListeners(testCase.serverAddr, &tls.Config{})
if testCase.shouldPass {
if err != nil {
t.Fatalf("Test %d: Unable to initialize listeners %s", i+1, err)
}
for _, listener := range listeners {
if err = listener.Close(); err != nil {
t.Fatalf("Test %d: Unable to close listeners %s", i+1, err)
}
}
}
if err == nil && !testCase.shouldPass {
t.Fatalf("Test %d: Should fail but is successful", i+1)
}
}
// Windows doesn't have 'localhost' hostname.
if runtime.GOOS != globalWindowsOSName {
listeners, err := initListeners("localhost:"+getFreePort(), &tls.Config{})
if err != nil {
t.Fatalf("Test 3: Unable to initialize listeners %s", err)
}
for _, listener := range listeners {
if err = listener.Close(); err != nil {
t.Fatalf("Test 3: Unable to close listeners %s", err)
}
}
}
}
func TestClose(t *testing.T) {
// Create ServerMux
m := NewServerMux("", nil)
if err := m.Close(); err != nil {
t.Error("Server errored while trying to Close", err)
}
// Closing again should return an error.
if err := m.Close(); err.Error() != "Server has been closed" {
t.Error("Unexepcted error expected \"Server has been closed\", got", err)
}
}
func TestServerMux(t *testing.T) {
var err error
var got []byte
var res *http.Response
// Create ServerMux
m := NewServerMux("127.0.0.1:0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "hello")
}))
// Start serving requests
go m.ListenAndServe("", "")
// Issue a GET request. Since we started server in a goroutine, it could be not ready
// at this point. So we allow until 5 failed retries before declare there is an error
for i := 0; i < 5; i++ {
// Sleep one second
time.Sleep(1 * time.Second)
// Check if one listener is ready
m.mu.Lock()
listenersCount := len(m.listeners)
m.mu.Unlock()
if listenersCount == 0 {
continue
}
m.mu.Lock()
listenerAddr := m.listeners[0].Addr().String()
m.mu.Unlock()
// Issue the GET request
client := http.Client{}
res, err = client.Get("http://" + listenerAddr)
if err != nil {
continue
}
// Read the request response
got, err = ioutil.ReadAll(res.Body)
if err != nil {
continue
}
// We've got a response, quit the loop
break
}
// Check for error persisted after 5 times
if err != nil {
t.Fatal(err)
}
// Check the web service response
if string(got) != "hello" {
t.Errorf("got %q, want hello", string(got))
}
}
func TestServerCloseBlocking(t *testing.T) {
// Create ServerMux
m := NewServerMux("127.0.0.1:0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "hello")
}))
// Start serving requests in a goroutine
go m.ListenAndServe("", "")
// Dial, try until 5 times before declaring a failure
dial := func() (net.Conn, error) {
var c net.Conn
var err error
for i := 0; i < 5; i++ {
// Sleep one second in case of the server is not ready yet
time.Sleep(1 * time.Second)
// Check if there is at least one listener configured
m.mu.Lock()
if len(m.listeners) == 0 {
m.mu.Unlock()
continue
}
m.mu.Unlock()
// Run the actual Dial
m.mu.Lock()
c, err = net.Dial("tcp", m.listeners[0].Addr().String())
m.mu.Unlock()
if err != nil {
continue
}
}
return c, err
}
// Dial to open a StateNew but don't send anything
cnew, err := dial()
if err != nil {
t.Fatal(err)
}
defer cnew.Close()
// Dial another connection but idle after a request to have StateIdle
cidle, err := dial()
if err != nil {
t.Fatal(err)
}
defer cidle.Close()
cidle.Write([]byte("HEAD / HTTP/1.1\r\nHost: foo\r\n\r\n"))
_, err = http.ReadResponse(bufio.NewReader(cidle), nil)
if err != nil {
t.Fatal(err)
}
// Make sure we don't block forever.
m.Close()
}
func TestServerListenAndServePlain(t *testing.T) {
wait := make(chan struct{})
addr := net.JoinHostPort("127.0.0.1", getFreePort())
errc := make(chan error)
once := &sync.Once{}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Create ServerMux and when we receive a request we stop waiting
m := NewServerMux(addr, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "hello")
once.Do(func() { close(wait) })
}))
// ListenAndServe in a goroutine, but we don't know when it's ready
go func() { errc <- m.ListenAndServe("", "") }()
// Keep trying the server until it's accepting connections
go func() {
client := http.Client{Timeout: time.Millisecond * 10}
for {
res, _ := client.Get("http://" + addr)
if res != nil && res.StatusCode == http.StatusOK {
break
}
}
}()
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
select {
case err := <-errc:
if err != nil {
t.Fatal(err)
}
case <-wait:
return
}
}()
// Wait until we get an error or wait closed
wg.Wait()
// Shutdown the ServerMux
m.Close()
}
func TestServerListenAndServeTLS(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
defer removeAll(rootPath)
wait := make(chan struct{})
addr := net.JoinHostPort("127.0.0.1", getFreePort())
errc := make(chan error)
once := &sync.Once{}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Create ServerMux and when we receive a request we stop waiting
m := NewServerMux(addr, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "hello")
once.Do(func() { close(wait) })
}))
// Create a cert
err = createConfigDir()
if err != nil {
t.Fatal(err)
}
certFile := getPublicCertFile()
keyFile := getPrivateKeyFile()
defer os.RemoveAll(certFile)
defer os.RemoveAll(keyFile)
err = generateTestCert(addr)
if err != nil {
t.Error(err)
return
}
// ListenAndServe in a goroutine, but we don't know when it's ready
go func() { errc <- m.ListenAndServe(certFile, keyFile) }()
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := http.Client{
Timeout: time.Millisecond * 10,
Transport: tr,
}
// Keep trying the server until it's accepting connections
start := UTCNow()
for {
res, _ := client.Get("https://" + addr)
if res != nil && res.StatusCode == http.StatusOK {
break
}
// Explicit check to terminate loop after 5 minutes
// (for investigational purpose of issue #4461)
if UTCNow().Sub(start) >= 5*time.Minute {
t.Fatalf("Failed to establish connection after 5 minutes")
}
}
// Once a request succeeds, subsequent requests should
// work fine.
res, err := client.Get("http://" + addr)
if err != nil {
t.Errorf("Got unexpected error: %v", err)
}
// Without TLS we expect a Bad-Request response from the server.
if !(res != nil && res.StatusCode == http.StatusBadRequest && res.Request.URL.Scheme == httpScheme) {
t.Fatalf("Plaintext request to TLS server did not have expected response!")
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("Error reading body")
}
// Check that the expected error is received.
bodyStr := string(body)
apiErr := getAPIError(ErrInsecureClientRequest)
if !(strings.Contains(bodyStr, apiErr.Code) && strings.Contains(bodyStr, apiErr.Description)) {
t.Fatalf("Plaintext request to TLS server did not have expected response body!")
}
wg.Done()
}()
wg.Add(1)
go func() {
defer wg.Done()
select {
case err := <-errc:
if err != nil {
t.Error(err)
return
}
case <-wait:
return
}
}()
// Wait until we get an error or wait closed
wg.Wait()
// Shutdown the ServerMux
m.Close()
}
// generateTestCert creates a cert and a key used for testing only
func generateTestCert(host string) error {
certPath := getPublicCertFile()
keyPath := getPrivateKeyFile()
priv, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return err
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Minio Test Cert"},
},
NotBefore: UTCNow(),
NotAfter: UTCNow().Add(time.Minute * 1),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
if ip := net.ParseIP(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
}
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return err
}
certOut, err := os.Create(certPath)
if err != nil {
return err
}
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
keyOut.Close()
return nil
}

View File

@@ -52,7 +52,7 @@ func printStartupMessage(apiEndPoints []string) {
// Prints `mc` cli configuration message chooses
// first endpoint as default.
printCLIAccessMsg(strippedAPIEndpoints[0])
printCLIAccessMsg(strippedAPIEndpoints[0], "myminio")
// Prints documentation message.
printObjectAPIMsg()
@@ -109,7 +109,7 @@ func printServerCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" {
@@ -141,17 +141,17 @@ func printEventNotifiers() {
// Prints startup message for command line access. Prints link to our documentation
// and custom platform specific message.
func printCLIAccessMsg(endPoint string) {
func printCLIAccessMsg(endPoint string, alias string) {
// Get saved credentials.
cred := serverConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client.
log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey)
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey)
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
@@ -174,7 +174,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
if storageInfo.Backend.Type == Erasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 {
diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures)
diskInfo += fmt.Sprintf("We can withstand [%d] drive failure(s).", maxDiskFailures)
}
msg += colorBlue("\nStatus:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
}

View File

@@ -140,7 +140,7 @@ func TestPrintCLIAccessMsg(t *testing.T) {
defer removeAll(root)
apiEndpoints := []string{"http://127.0.0.1:9000"}
printCLIAccessMsg(apiEndpoints[0])
printCLIAccessMsg(apiEndpoints[0], "myminio")
}
// Test print startup message.

View File

@@ -1619,7 +1619,8 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
c.Assert(strings.Contains(string(getContent), "<Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner>"), Equals, true)
c.Assert(strings.Contains(string(getContent), fmt.Sprintf("<Owner><ID>%s</ID><DisplayName></DisplayName></Owner>",
globalMinioDefaultOwnerID)), Equals, true)
}

View File

@@ -19,8 +19,6 @@ package cmd
import (
"os"
"os/exec"
"syscall"
"time"
)
// Type of service signals currently supported.
@@ -64,59 +62,3 @@ func restartProcess() error {
cmd.Stderr = os.Stderr
return cmd.Start()
}
// Handles all serviceSignal and execute service functions.
func (m *ServerMux) handleServiceSignals() error {
// Custom exit function
runExitFn := func(err error) {
// If global profiler is set stop before we exit.
if globalProfiler != nil {
globalProfiler.Stop()
}
// Call user supplied user exit function
fatalIf(err, "Unable to gracefully complete service operation.")
// We are usually done here, close global service done channel.
globalServiceDoneCh <- struct{}{}
}
// Wait for SIGTERM in a go-routine.
trapCh := signalTrap(os.Interrupt, syscall.SIGTERM)
go func(trapCh <-chan bool) {
<-trapCh
globalServiceSignalCh <- serviceStop
}(trapCh)
// Start listening on service signal. Monitor signals.
for {
signal := <-globalServiceSignalCh
switch signal {
case serviceStatus:
/// We don't do anything for this.
case serviceRestart:
if err := m.Close(); err != nil {
errorIf(err, "Unable to close server gracefully")
}
if err := restartProcess(); err != nil {
errorIf(err, "Unable to restart the server.")
}
runExitFn(nil)
case serviceStop:
log.Println("Received signal to exit.")
go func() {
time.Sleep(serverShutdownPoll + time.Millisecond*100)
log.Println("Waiting for active connections to terminate - press Ctrl+C to quit immediately.")
}()
if err := m.Close(); err != nil {
errorIf(err, "Unable to close server gracefully")
}
objAPI := newObjectLayerFn()
if objAPI == nil {
// Server not initialized yet, exit happily.
runExitFn(nil)
} else {
runExitFn(objAPI.Shutdown())
}
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Client, (C) 2015 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,32 +18,67 @@ package cmd
import (
"os"
"os/signal"
)
// signalTrap traps the registered signals and notifies the caller.
func signalTrap(sig ...os.Signal) <-chan bool {
// channel to notify the caller.
trapCh := make(chan bool, 1)
func handleSignals() {
// Custom exit function
exit := func(state bool) {
// If global profiler is set stop before we exit.
if globalProfiler != nil {
globalProfiler.Stop()
}
go func(chan<- bool) {
// channel to receive signals.
sigCh := make(chan os.Signal, 1)
defer close(sigCh)
if state {
os.Exit(0)
}
// `signal.Notify` registers the given channel to
// receive notifications of the specified signals.
signal.Notify(sigCh, sig...)
os.Exit(1)
}
// Wait for the signal.
<-sigCh
stopProcess := func() bool {
var err, oerr error
// Once signal has been received stop signal Notify handler.
signal.Stop(sigCh)
err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
// Notify the caller.
trapCh <- true
}(trapCh)
if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown()
errorIf(oerr, "Unable to shutdown object layer")
}
return trapCh
return (err == nil && oerr == nil)
}
for {
select {
case err := <-globalHTTPServerErrorCh:
errorIf(err, "http server exited abnormally")
var oerr error
if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown()
errorIf(oerr, "Unable to shutdown object layer")
}
exit(err == nil && oerr == nil)
case osSignal := <-globalOSSignalCh:
log.Printf("Exiting on signal %v\n", osSignal)
exit(stopProcess())
case signal := <-globalServiceSignalCh:
switch signal {
case serviceStatus:
// Ignore this at the moment.
case serviceRestart:
log.Println("Restarting on service signal")
err := globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
rerr := restartProcess()
errorIf(rerr, "Unable to restart the server")
exit(err == nil && rerr == nil)
case serviceStop:
log.Println("Stopping on service signal")
exit(stopProcess())
}
}
}
}

View File

@@ -45,20 +45,20 @@ func (c credentialHeader) getScope() string {
}
// parse credentialHeader string into its structured form.
func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode) {
func parseCredentialHeader(credElement string) (ch credentialHeader, aec APIErrorCode) {
creds := strings.Split(strings.TrimSpace(credElement), "=")
if len(creds) != 2 {
return credentialHeader{}, ErrMissingFields
return ch, ErrMissingFields
}
if creds[0] != "Credential" {
return credentialHeader{}, ErrMissingCredTag
return ch, ErrMissingCredTag
}
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
if len(credElements) != 5 {
return credentialHeader{}, ErrCredMalformed
return ch, ErrCredMalformed
}
if !isAccessKeyValid(credElements[0]) {
return credentialHeader{}, ErrInvalidAccessKeyID
return ch, ErrInvalidAccessKeyID
}
// Save access key id.
cred := credentialHeader{
@@ -67,15 +67,15 @@ func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode)
var e error
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
if e != nil {
return credentialHeader{}, ErrMalformedCredentialDate
return ch, ErrMalformedCredentialDate
}
cred.scope.region = credElements[2]
if credElements[3] != "s3" {
return credentialHeader{}, ErrInvalidService
return ch, ErrInvalidService
}
cred.scope.service = credElements[3]
if credElements[4] != "aws4_request" {
return credentialHeader{}, ErrInvalidRequestVersion
return ch, ErrInvalidRequestVersion
}
cred.scope.request = credElements[4]
return cred, ErrNone
@@ -148,17 +148,17 @@ func doesV4PresignParamsExist(query url.Values) APIErrorCode {
}
// Parses all the presigned signature values into separate elements.
func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
func parsePreSignV4(query url.Values) (psv preSignValues, aec APIErrorCode) {
var err APIErrorCode
// verify whether the required query params exist.
err = doesV4PresignParamsExist(query)
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Verify if the query algorithm is supported or not.
if query.Get("X-Amz-Algorithm") != signV4Algorithm {
return preSignValues{}, ErrInvalidQuerySignatureAlgo
return psv, ErrInvalidQuerySignatureAlgo
}
// Initialize signature version '4' structured header.
@@ -167,35 +167,35 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
// Save credential.
preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
var e error
// Save date in native time.Time.
preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
if e != nil {
return preSignValues{}, ErrMalformedPresignedDate
return psv, ErrMalformedPresignedDate
}
// Save expires in native time.Duration.
preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
if e != nil {
return preSignValues{}, ErrMalformedExpires
return psv, ErrMalformedExpires
}
if preSignV4Values.Expires < 0 {
return preSignValues{}, ErrNegativeExpires
return psv, ErrNegativeExpires
}
// Save signed headers.
preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Save signature.
preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Return structed form of signature query string.
@@ -207,25 +207,25 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
// Authorization: algorithm Credential=accessKeyID/credScope, \
// SignedHeaders=signedHeaders, Signature=signature
//
func parseSignV4(v4Auth string) (signValues, APIErrorCode) {
func parseSignV4(v4Auth string) (sv signValues, aec APIErrorCode) {
// Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1)
if v4Auth == "" {
return signValues{}, ErrAuthHeaderEmpty
return sv, ErrAuthHeaderEmpty
}
// Verify if the header algorithm is supported or not.
if !strings.HasPrefix(v4Auth, signV4Algorithm) {
return signValues{}, ErrSignatureVersionNotSupported
return sv, ErrSignatureVersionNotSupported
}
// Strip off the Algorithm prefix.
v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
if len(authFields) != 3 {
return signValues{}, ErrMissingFields
return sv, ErrMissingFields
}
// Initialize signature version '4' structured header.
@@ -235,19 +235,19 @@ func parseSignV4(v4Auth string) (signValues, APIErrorCode) {
// Save credentail values.
signV4Values.Credential, err = parseCredentialHeader(authFields[0])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Save signed headers.
signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Save signature.
signV4Values.Signature, err = parseSignature(authFields[2])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Return the structure here.

View File

@@ -39,10 +39,9 @@ import (
// AWS Signature Version '4' constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
presignedHostHeader = "host"
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// getCanonicalHeaders generate a list of request headers with their values

View File

@@ -27,6 +27,10 @@ var errUnexpected = errors.New("Unexpected error, please report this issue at ht
// errCorruptedFormat - corrupted backend format.
var errCorruptedFormat = errors.New("corrupted backend format, please join https://slack.minio.io for assistance")
// errFormatNotSupported - returned when older minio tries to parse metadata
// created by newer minio.
var errFormatNotSupported = errors.New("format not supported")
// errUnformattedDisk - unformatted disk found.
var errUnformattedDisk = errors.New("unformatted disk found")
@@ -51,9 +55,6 @@ var errFileNotFound = errors.New("file not found")
// errFileNameTooLong - given file name is too long than supported length.
var errFileNameTooLong = errors.New("file name too long")
// errFileComponentInvalid - given file name has invalid components.
var errFileComponentInvalid = errors.New("file name has invalid components")
// errVolumeExists - cannot create same volume again.
var errVolumeExists = errors.New("volume already exists")

View File

@@ -18,7 +18,6 @@ package cmd
import (
"io"
"net/rpc"
"path"
"time"
@@ -217,7 +216,7 @@ func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *AuthRPCRe
}
// Initialize new storage rpc.
func newRPCServer(endpoints EndpointList) (servers []*storageServer, err error) {
func newStorageRPCServer(endpoints EndpointList) (servers []*storageServer, err error) {
for _, endpoint := range endpoints {
if endpoint.IsLocal {
storage, err := newPosix(endpoint.Path)
@@ -238,14 +237,14 @@ func newRPCServer(endpoints EndpointList) (servers []*storageServer, err error)
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error {
// Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newRPCServer(endpoints)
storageRPCs, err := newStorageRPCServer(endpoints)
if err != nil {
return traceError(err)
}
// Create a unique route for each disk exported from this node.
for _, stServer := range storageRPCs {
storageRPCServer := rpc.NewServer()
storageRPCServer := newRPCServer()
err = storageRPCServer.RegisterName("Storage", stServer)
if err != nil {
return traceError(err)

View File

@@ -41,13 +41,10 @@ const (
)
// getChunkSignature - get chunk signature.
func getChunkSignature(seedSignature string, date time.Time, hashedChunk string) string {
func getChunkSignature(seedSignature string, region string, date time.Time, hashedChunk string) string {
// Access credentials.
cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Calculate string to sign.
stringToSign := signV4ChunkedAlgorithm + "\n" +
date.Format(iso8601Format) + "\n" +
@@ -69,12 +66,12 @@ func getChunkSignature(seedSignature string, date time.Time, hashedChunk string)
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
// returns signature, error otherwise if the signature mismatches or any other
// error while parsing and validating.
func calculateSeedSignature(r *http.Request) (signature string, date time.Time, errCode APIErrorCode) {
func calculateSeedSignature(r *http.Request) (signature string, region string, date time.Time, errCode APIErrorCode) {
// Access credentials.
cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Configured region.
confRegion := serverConfig.GetRegion()
// Copy request.
req := *r
@@ -85,7 +82,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
// Parse signature version '4' header.
signV4Values, errCode := parseSignV4(v4Auth)
if errCode != ErrNone {
return "", time.Time{}, errCode
return "", "", time.Time{}, errCode
}
// Payload streaming.
@@ -93,32 +90,32 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
// Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
if payload != req.Header.Get("X-Amz-Content-Sha256") {
return "", time.Time{}, ErrContentSHA256Mismatch
return "", "", time.Time{}, ErrContentSHA256Mismatch
}
// Extract all the signed headers along with its values.
extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
if errCode != ErrNone {
return "", time.Time{}, errCode
return "", "", time.Time{}, errCode
}
// Verify if the access key id matches.
if signV4Values.Credential.accessKey != cred.AccessKey {
return "", time.Time{}, ErrInvalidAccessKeyID
return "", "", time.Time{}, ErrInvalidAccessKeyID
}
// Verify if region is valid.
sRegion := signV4Values.Credential.scope.region
region = signV4Values.Credential.scope.region
// Should validate region, only if region is set. Some operations
// do not need region validated for example GetBucketLocation.
if !isValidRegion(sRegion, region) {
return "", time.Time{}, ErrInvalidRegion
if !isValidRegion(region, confRegion) {
return "", "", time.Time{}, ErrInvalidRegion
}
// Extract date, if not present throw error.
var dateStr string
if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" {
if dateStr = r.Header.Get("Date"); dateStr == "" {
return "", time.Time{}, ErrMissingDateHeader
return "", "", time.Time{}, ErrMissingDateHeader
}
}
// Parse date header.
@@ -126,7 +123,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
date, err = time.Parse(iso8601Format, dateStr)
if err != nil {
errorIf(err, "Unable to parse date", dateStr)
return "", time.Time{}, ErrMalformedDate
return "", "", time.Time{}, ErrMalformedDate
}
// Query string.
@@ -146,11 +143,11 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
// Verify if signature match.
if newSignature != signV4Values.Signature {
return "", time.Time{}, ErrSignatureDoesNotMatch
return "", "", time.Time{}, ErrSignatureDoesNotMatch
}
// Return caculated signature.
return newSignature, date, ErrNone
return newSignature, region, date, ErrNone
}
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
@@ -168,7 +165,7 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
// NewChunkedReader is not needed by normal applications. The http package
// automatically decodes chunking when reading response bodies.
func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) {
seedSignature, seedDate, errCode := calculateSeedSignature(req)
seedSignature, region, seedDate, errCode := calculateSeedSignature(req)
if errCode != ErrNone {
return nil, errCode
}
@@ -176,6 +173,7 @@ func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) {
reader: bufio.NewReader(req.Body),
seedSignature: seedSignature,
seedDate: seedDate,
region: region,
chunkSHA256Writer: sha256.New(),
state: readChunkHeader,
}, ErrNone
@@ -187,6 +185,7 @@ type s3ChunkedReader struct {
reader *bufio.Reader
seedSignature string
seedDate time.Time
region string
state chunkState
lastChunk bool
chunkSignature string
@@ -304,7 +303,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
// Calculate the hashed chunk.
hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
// Calculate the chunk signature.
newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk)
newSignature := getChunkSignature(cr.seedSignature, cr.region, cr.seedDate, hashedChunk)
if cr.chunkSignature != newSignature {
// Chunk signature doesn't match we return signature does not match.
cr.err = errSignatureMismatch

View File

@@ -17,6 +17,7 @@
package cmd
import (
"bufio"
"fmt"
"io/ioutil"
"net/http"
@@ -140,6 +141,30 @@ func IsKubernetes() bool {
return os.Getenv("KUBERNETES_SERVICE_HOST") != ""
}
// Minio Helm chart uses DownwardAPIFile to write pod label info to /podinfo/labels
// More info: https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#store-pod-fields
// Check if this is Helm package installation and report helm chart version
func getHelmVersion(helmInfoFilePath string) string {
// Read the file exists.
helmInfoFile, err := os.Open(helmInfoFilePath)
// Log errors and return "" as Minio can be deployed without Helm charts as well.
if err != nil && !os.IsNotExist(err) {
errorIf(err, "Unable to read %s", helmInfoFilePath)
return ""
}
scanner := bufio.NewScanner(helmInfoFile)
for scanner.Scan() {
if strings.Contains(scanner.Text(), "chart=") {
helmChartVersion := strings.TrimPrefix(scanner.Text(), "chart=")
// remove quotes from the chart version
return strings.Trim(helmChartVersion, `"`)
}
}
return ""
}
func isSourceBuild(minioVersion string) bool {
_, err := time.Parse(time.RFC3339, minioVersion)
return err != nil
@@ -182,6 +207,15 @@ func getUserAgent(mode string) string {
userAgent += " Minio/" + "universe-" + universePkgVersion
}
}
if IsKubernetes() {
// In Kubernetes environment, try to fetch the helm package version
helmChartVersion := getHelmVersion("/podinfo/labels")
if helmChartVersion != "" {
userAgent += " Minio/" + "helm-" + helmChartVersion
}
}
return userAgent
}

View File

@@ -235,6 +235,48 @@ func TestIsKubernetes(t *testing.T) {
}
}
// Tests if the environment we are running is Helm chart.
func TestGetHelmVersion(t *testing.T) {
createTempFile := func(content string) string {
tmpfile, err := ioutil.TempFile("", "helm-testfile-")
if err != nil {
t.Fatalf("Unable to create temporary file. %s", err)
}
if _, err = tmpfile.Write([]byte(content)); err != nil {
t.Fatalf("Unable to create temporary file. %s", err)
}
if err = tmpfile.Close(); err != nil {
t.Fatalf("Unable to create temporary file. %s", err)
}
return tmpfile.Name()
}
filename := createTempFile(
`app="virtuous-rat-minio"
chart="minio-0.1.3"
heritage="Tiller"
pod-template-hash="818089471"`)
defer os.Remove(filename)
testCases := []struct {
filename string
expectedResult string
}{
{"", ""},
{"/tmp/non-existing-file", ""},
{filename, "minio-0.1.3"},
}
for _, testCase := range testCases {
result := getHelmVersion(testCase.filename)
if testCase.expectedResult != result {
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, result)
}
}
}
// Tests if the environment we are running is in docker.
func TestIsDocker(t *testing.T) {
createTempFile := func(content string) string {

View File

@@ -497,7 +497,12 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
}
// Extract incoming metadata if any.
metadata := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
// Lock the object.
objectLock := globalNSMutex.NewNSLock(bucket, object)
@@ -690,13 +695,15 @@ func getBucketAccessPolicy(objAPI ObjectLayer, bucketName string) (policy.Bucket
policyInfo, err = layer.GetBucketPolicies(bucketName)
case *azureObjects:
policyInfo, err = layer.GetBucketPolicies(bucketName)
case *gcsGateway:
policyInfo, err = layer.GetBucketPolicies(bucketName)
default:
policyInfo, err = readBucketAccessPolicy(objAPI, bucketName)
}
return policyInfo, err
}
// GetBucketPolicy - get bucket policy.
// GetBucketPolicy - get bucket policy for the requested prefix.
func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolicyArgs, reply *GetBucketPolicyRep) error {
objectAPI := web.ObjectAPI()
if objectAPI == nil {
@@ -707,9 +714,12 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
return toJSONError(errAuthentication)
}
policyInfo, err := readBucketAccessPolicy(objectAPI, args.BucketName)
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
return toJSONError(err, args.BucketName)
_, ok := errorCause(err).(PolicyNotFound)
if !ok {
return toJSONError(err, args.BucketName)
}
}
reply.UIVersion = browser.UIVersion
@@ -745,8 +755,8 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
if !isHTTPRequestValid(r) {
return toJSONError(errAuthentication)
}
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
_, ok := errorCause(err).(PolicyNotFound)
if !ok {
@@ -791,7 +801,6 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
}
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
if _, ok := errorCause(err).(PolicyNotFound); !ok {
return toJSONError(err, args.BucketName)
@@ -915,7 +924,7 @@ func presignedGet(host, bucket, object string, expiry int64) string {
signature := getSignature(signingKey, stringToSign)
// Construct the final presigned URL.
return host + path + "?" + query + "&" + "X-Amz-Signature=" + signature
return host + getURLEncodedName(path) + "?" + query + "&" + "X-Amz-Signature=" + signature
}
// toJSONError converts regular errors into more user friendly

View File

@@ -144,15 +144,15 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
}
// GetBucketInfo - returns BucketInfo for a bucket.
func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (xl xlObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
return bi, BucketNameInvalid{Bucket: bucket}
}
bucketInfo, err := xl.getBucketInfo(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
return bi, toObjectErr(err, bucket)
}
return bucketInfo, nil
}

Some files were not shown because too many files have changed in this diff Show More