mirror of
https://github.com/minio/minio.git
synced 2026-02-05 02:10:14 -05:00
Merge master for release.
This commit is contained in:
@@ -18,6 +18,8 @@ env:
|
||||
script:
|
||||
## Run all the tests
|
||||
- make
|
||||
- diff -au <(gofmt -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -d pkg) <(printf "")
|
||||
- make test GOFLAGS="-timeout 15m -race -v"
|
||||
- make coverage
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
@@ -9,10 +11,10 @@ WORKDIR /go/src/github.com/minio/
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps git go musl-dev && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
git checkout release && \
|
||||
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-05-05T01:14:51Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-05-05T01-14-51Z -X github.com/minio/minio/cmd.CommitID=40985cc4e3eec06b7ea82dc34c8d907fd2e7aa12" && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM resin/aarch64-alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
@@ -9,9 +11,10 @@ WORKDIR /go/src/github.com/minio/
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps git go musl-dev && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-03-16T21:50:32Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-03-16T21-50-32Z -X github.com/minio/minio/cmd.CommitID=5311eb22fd681a8cd4a46e2a872d46c2352c64e8" && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM resin/armhf-alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
@@ -9,9 +11,10 @@ WORKDIR /go/src/github.com/minio/
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps git go musl-dev && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "-X github.com/minio/minio/cmd.Version=2017-03-16T21:50:32Z -X github.com/minio/minio/cmd.ReleaseTag=RELEASE.2017-03-16T21-50-32Z -X github.com/minio/minio/cmd.CommitID=5311eb22fd681a8cd4a46e2a872d46c2352c64e8" && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
23
Dockerfile.release
Normal file
23
Dockerfile.release
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps curl && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY buildscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/export"]
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
22
Dockerfile.release.aarch64
Normal file
22
Dockerfile.release.aarch64
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM resin/aarch64-alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps curl && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.minio.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY buildscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/export"]
|
||||
|
||||
CMD ["minio"]
|
||||
22
Dockerfile.release.armhf
Normal file
22
Dockerfile.release.armhf
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM resin/armhf-alpine:3.5
|
||||
|
||||
MAINTAINER Minio Inc <dev@minio.io>
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps curl && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.minio.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY buildscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/export"]
|
||||
|
||||
CMD ["minio"]
|
||||
63
Makefile
63
Makefile
@@ -1,7 +1,7 @@
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
BUILD_LDFLAGS := '$(LDFLAGS) -s -w'
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
TAG := latest
|
||||
|
||||
HOST ?= $(shell uname)
|
||||
@@ -56,55 +56,43 @@ endif
|
||||
all: install
|
||||
|
||||
checks:
|
||||
@echo -n "Check deps: "
|
||||
@echo "Check deps"
|
||||
@(env bash $(PWD)/buildscripts/checkdeps.sh)
|
||||
@echo "Done."
|
||||
@echo -n "Checking project is in GOPATH: "
|
||||
@echo "Checking project is in GOPATH"
|
||||
@(env bash $(PWD)/buildscripts/checkgopath.sh)
|
||||
@echo "Done."
|
||||
|
||||
getdeps: checks
|
||||
@echo -n "Installing golint: " && go get -u github.com/golang/lint/golint
|
||||
@echo "Done."
|
||||
@echo -n "Installing gocyclo: " && go get -u github.com/fzipp/gocyclo
|
||||
@echo "Done."
|
||||
@echo -n "Installing deadcode: " && go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
@echo "Done."
|
||||
@echo -n "Installing misspell: " && go get -u github.com/client9/misspell/cmd/misspell
|
||||
@echo "Done."
|
||||
@echo -n "Installing ineffassign: " && go get -u github.com/gordonklaus/ineffassign
|
||||
@echo "Done."
|
||||
@echo "Installing golint" && go get -u github.com/golang/lint/golint
|
||||
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
|
||||
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
|
||||
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
|
||||
|
||||
verifiers: vet fmt lint cyclo spelling
|
||||
|
||||
vet:
|
||||
@echo -n "Running $@: "
|
||||
@echo "Running $@"
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
|
||||
@echo "Done."
|
||||
|
||||
fmt:
|
||||
@echo -n "Running $@: "
|
||||
@gofmt -s -l cmd
|
||||
@gofmt -s -l pkg
|
||||
@echo "Done."
|
||||
@echo "Running $@"
|
||||
@gofmt -d cmd
|
||||
@gofmt -d pkg
|
||||
|
||||
lint:
|
||||
@echo -n "Running $@: "
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
|
||||
@echo "Done."
|
||||
|
||||
ineffassign:
|
||||
@echo -n "Running $@: "
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/ineffassign .
|
||||
@echo "Done."
|
||||
|
||||
cyclo:
|
||||
@echo -n "Running $@: "
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/gocyclo -over 100 cmd
|
||||
@${GOPATH}/bin/gocyclo -over 100 pkg
|
||||
@echo "Done."
|
||||
|
||||
build: getdeps verifiers $(UI_ASSETS)
|
||||
|
||||
@@ -117,30 +105,30 @@ spelling:
|
||||
@${GOPATH}/bin/misspell -error `find docs/`
|
||||
|
||||
test: build
|
||||
@echo -n "Running all minio testing: "
|
||||
@echo "Running all minio testing"
|
||||
@go test $(GOFLAGS) .
|
||||
@go test $(GOFLAGS) github.com/minio/minio/cmd...
|
||||
@go test $(GOFLAGS) github.com/minio/minio/pkg...
|
||||
@echo "Done."
|
||||
|
||||
coverage: build
|
||||
@echo -n "Running all coverage for minio: "
|
||||
@echo "Running all coverage for minio"
|
||||
@./buildscripts/go-coverage.sh
|
||||
@echo "Done."
|
||||
|
||||
gomake-all: build
|
||||
@echo -n "Installing minio at $(GOPATH)/bin/minio: "
|
||||
@echo "Installing minio at $(GOPATH)/bin/minio"
|
||||
@go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio
|
||||
@echo "Done."
|
||||
|
||||
pkg-add:
|
||||
${GOPATH}/bin/govendor add $(PKG)
|
||||
@echo "Adding new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor add $(PKG)
|
||||
|
||||
pkg-update:
|
||||
${GOPATH}/bin/govendor update $(PKG)
|
||||
@echo "Updating new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor update $(PKG)
|
||||
|
||||
pkg-remove:
|
||||
${GOPATH}/bin/govendor remove $(PKG)
|
||||
@echo "Remove new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor remove $(PKG)
|
||||
|
||||
pkg-list:
|
||||
@$(GOPATH)/bin/govendor list
|
||||
@@ -154,8 +142,7 @@ experimental: verifiers
|
||||
@MINIO_RELEASE=EXPERIMENTAL ./buildscripts/build.sh
|
||||
|
||||
clean:
|
||||
@echo -n "Cleaning up all the generated files: "
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@rm -rf build
|
||||
@rm -rf release
|
||||
@echo "Done."
|
||||
|
||||
15
README.md
15
README.md
@@ -26,7 +26,20 @@ Install minio packages using [Homebrew](http://brew.sh/)
|
||||
brew install minio/stable/minio
|
||||
minio server ~/Photos
|
||||
```
|
||||
Note: If you are upgrading minio on macOS, please see instructions [here](https://github.com/minio/minio/blob/master/docs/minio_homebrew.md).
|
||||
#### Note
|
||||
If you previously installed minio using `brew install minio` then uninstall minio as shown below
|
||||
|
||||
```
|
||||
brew uninstall minio
|
||||
```
|
||||
|
||||
Then re-install the latest minio using:
|
||||
|
||||
```
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
>`brew install minio` and `brew upgrade minio` will no longer install/upgrade the latest minio binaries on macOS. Upstream bugs in golang 1.8 broke Minio brew installer. Use the updated `minio/stable/minio` in your brew paths.
|
||||
|
||||
### Binary Download
|
||||
| Platform| Architecture | URL|
|
||||
|
||||
@@ -36,9 +36,9 @@ test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- go test -timeout 17m -race github.com/minio/minio/cmd...
|
||||
- go test -race github.com/minio/minio/pkg...
|
||||
- go test -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- go test -v -timeout 17m -race github.com/minio/minio/cmd...
|
||||
- go test -v -race github.com/minio/minio/pkg...
|
||||
- go test -v -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
|
||||
@@ -27,7 +27,7 @@ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
yarn release
|
||||
```
|
||||
|
||||
This generates ui-assets.go in the current direcotry. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
### Run Minio Browser with live reload
|
||||
|
||||
|
||||
@@ -432,6 +432,8 @@ export const setLoginError = () => {
|
||||
|
||||
export const downloadSelected = (url, req, xhr) => {
|
||||
return (dispatch) => {
|
||||
var anchor = document.createElement('a')
|
||||
document.body.appendChild(anchor);
|
||||
xhr.open('POST', url, true)
|
||||
xhr.responseType = 'blob'
|
||||
|
||||
@@ -439,10 +441,20 @@ export const downloadSelected = (url, req, xhr) => {
|
||||
if (this.status == 200) {
|
||||
dispatch(checkedObjectsReset())
|
||||
var blob = new Blob([this.response], {
|
||||
type: 'application/zip'
|
||||
type: 'octet/stream'
|
||||
})
|
||||
var blobUrl = window.URL.createObjectURL(blob);
|
||||
window.location = blobUrl
|
||||
var separator = req.prefix.length > 1 ? '-' : ''
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download = req.bucketName+separator+req.prefix.slice(0, -1)+'.zip';
|
||||
|
||||
|
||||
|
||||
|
||||
anchor.click()
|
||||
window.URL.revokeObjectURL(blobUrl)
|
||||
anchor.remove()
|
||||
}
|
||||
};
|
||||
xhr.send(JSON.stringify(req));
|
||||
|
||||
@@ -68,7 +68,7 @@ export default class Browse extends React.Component {
|
||||
memory: res.MinioMemory,
|
||||
platform: res.MinioPlatform,
|
||||
runtime: res.MinioRuntime,
|
||||
envVars: res.MinioEnvVars
|
||||
info: res.MinioGlobalInfo
|
||||
})
|
||||
dispatch(actions.setServerInfo(serverInfo))
|
||||
})
|
||||
@@ -463,21 +463,25 @@ export default class Browse extends React.Component {
|
||||
}
|
||||
|
||||
if (web.LoggedIn()) {
|
||||
storageUsageDetails = <div className="feh-usage">
|
||||
<div className="fehu-chart">
|
||||
<div style={ { width: usedPercent } }></div>
|
||||
</div>
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{ humanize.filesize(total - free) }
|
||||
</li>
|
||||
<li className="pull-right">
|
||||
<span>Free: </span>
|
||||
{ humanize.filesize(total - used) }
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
if (!(used === 0 && free === 0)) {
|
||||
storageUsageDetails = <div className="feh-usage">
|
||||
<div className="fehu-chart">
|
||||
<div style={ { width: usedPercent } }></div>
|
||||
</div>
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{ humanize.filesize(total - free) }
|
||||
</li>
|
||||
<li className="pull-right">
|
||||
<span>Free: </span>
|
||||
{ humanize.filesize(total - used) }
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
let createButton = ''
|
||||
@@ -722,11 +726,11 @@ export default class Browse extends React.Component {
|
||||
</div>
|
||||
<div className="input-group" style={ { display: web.LoggedIn() ? 'block' : 'none' } }>
|
||||
<label>
|
||||
Expires in
|
||||
Expires in (Max 7 days)
|
||||
</label>
|
||||
<div className="set-expire">
|
||||
<div className="set-expire-item">
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1, shareObject.object) }></i>
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireDays', 1, shareObject.object) } />
|
||||
<div className="set-expire-title">
|
||||
Days
|
||||
</div>
|
||||
@@ -735,12 +739,14 @@ export default class Browse extends React.Component {
|
||||
type="number"
|
||||
min={ 0 }
|
||||
max={ 7 }
|
||||
defaultValue={ 5 } />
|
||||
defaultValue={ 5 }
|
||||
readOnly="readOnly"
|
||||
/>
|
||||
</div>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1, shareObject.object) }></i>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireDays', -1, shareObject.object) } />
|
||||
</div>
|
||||
<div className="set-expire-item">
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1, shareObject.object) }></i>
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireHours', 1, shareObject.object) } />
|
||||
<div className="set-expire-title">
|
||||
Hours
|
||||
</div>
|
||||
@@ -749,12 +755,14 @@ export default class Browse extends React.Component {
|
||||
type="number"
|
||||
min={ 0 }
|
||||
max={ 23 }
|
||||
defaultValue={ 0 } />
|
||||
defaultValue={ 0 }
|
||||
readOnly="readOnly"
|
||||
/>
|
||||
</div>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1, shareObject.object) }></i>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireHours', -1, shareObject.object) } />
|
||||
</div>
|
||||
<div className="set-expire-item">
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1, shareObject.object) }></i>
|
||||
<i className="set-expire-increase" onClick={ this.handleExpireValue.bind(this, 'expireMins', 1, shareObject.object) } />
|
||||
<div className="set-expire-title">
|
||||
Minutes
|
||||
</div>
|
||||
@@ -763,9 +771,11 @@ export default class Browse extends React.Component {
|
||||
type="number"
|
||||
min={ 0 }
|
||||
max={ 59 }
|
||||
defaultValue={ 0 } />
|
||||
defaultValue={ 0 }
|
||||
readOnly="readOnly"
|
||||
/>
|
||||
</div>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1, shareObject.object) }></i>
|
||||
<i className="set-expire-decrease" onClick={ this.handleExpireValue.bind(this, 'expireMins', -1, shareObject.object) } />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -7,6 +7,7 @@ import * as actions from '../actions'
|
||||
class PolicyInput extends Component {
|
||||
componentDidMount() {
|
||||
const {web, dispatch} = this.props
|
||||
this.prefix.focus()
|
||||
web.ListAllBucketPolicies({
|
||||
bucketName: this.props.currentBucket
|
||||
}).then(res => {
|
||||
@@ -27,8 +28,23 @@ class PolicyInput extends Component {
|
||||
|
||||
handlePolicySubmit(e) {
|
||||
e.preventDefault()
|
||||
const {web, dispatch} = this.props
|
||||
const {web, dispatch, currentBucket} = this.props
|
||||
|
||||
let prefix = currentBucket + '/' + this.prefix.value
|
||||
let policy = this.policy.value
|
||||
|
||||
if (!prefix.endsWith('*')) prefix = prefix + '*'
|
||||
|
||||
let prefixAlreadyExists = this.props.policies.some(elem => prefix === elem.prefix)
|
||||
|
||||
if (prefixAlreadyExists) {
|
||||
dispatch(actions.showAlert({
|
||||
type: 'danger',
|
||||
message: "Policy for this prefix already exists."
|
||||
}))
|
||||
return
|
||||
}
|
||||
|
||||
web.SetBucketPolicy({
|
||||
bucketName: this.props.currentBucket,
|
||||
prefix: this.prefix.value,
|
||||
@@ -36,8 +52,7 @@ class PolicyInput extends Component {
|
||||
})
|
||||
.then(() => {
|
||||
dispatch(actions.setPolicies([{
|
||||
policy: this.policy.value,
|
||||
prefix: this.prefix.value + '*',
|
||||
policy, prefix
|
||||
}, ...this.props.policies]))
|
||||
this.prefix.value = ''
|
||||
})
|
||||
|
||||
@@ -34,23 +34,12 @@ class SettingsModal extends React.Component {
|
||||
|
||||
let accessKeyEnv = ''
|
||||
let secretKeyEnv = ''
|
||||
// Check environment variables first. They may or may not have been
|
||||
// loaded already; they load in Browse#componentDidMount.
|
||||
if (serverInfo.envVars) {
|
||||
serverInfo.envVars.forEach(envVar => {
|
||||
let keyVal = envVar.split('=')
|
||||
if (keyVal[0] == 'MINIO_ACCESS_KEY') {
|
||||
accessKeyEnv = keyVal[1]
|
||||
} else if (keyVal[0] == 'MINIO_SECRET_KEY') {
|
||||
secretKeyEnv = keyVal[1]
|
||||
}
|
||||
})
|
||||
}
|
||||
if (accessKeyEnv != '' || secretKeyEnv != '') {
|
||||
// Check environment variables first.
|
||||
if (serverInfo.info.isEnvCreds) {
|
||||
dispatch(actions.setSettings({
|
||||
accessKey: accessKeyEnv,
|
||||
secretKey: secretKeyEnv,
|
||||
keysReadOnly: true
|
||||
accessKey: 'xxxxxxxxx',
|
||||
secretKey: 'xxxxxxxxx',
|
||||
keysReadOnly: true
|
||||
}))
|
||||
} else {
|
||||
web.GetAuth()
|
||||
|
||||
@@ -183,6 +183,17 @@ select.form-control {
|
||||
.set-expire {
|
||||
border: 1px solid @input-border;
|
||||
margin: 35px 0 30px;
|
||||
position: relative;
|
||||
|
||||
&:before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
z-index: 1;
|
||||
}
|
||||
}
|
||||
|
||||
.set-expire-item {
|
||||
@@ -191,6 +202,7 @@ select.form-control {
|
||||
display: table-cell;
|
||||
width: 1%;
|
||||
text-align: center;
|
||||
.user-select(none);
|
||||
|
||||
&:not(:last-child) {
|
||||
border-right: 1px solid @input-border;
|
||||
@@ -209,6 +221,7 @@ select.form-control {
|
||||
left: -8px;
|
||||
|
||||
input {
|
||||
.user-select(none);
|
||||
font-size: 20px;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@ _init() {
|
||||
LDFLAGS=$(go run buildscripts/gen-ldflags.go)
|
||||
|
||||
# Extract release tag
|
||||
release_tag=$(echo $LDFLAGS | awk {'print $4'} | cut -f2 -d=)
|
||||
release_tag=$(echo $LDFLAGS | awk {'print $6'} | cut -f2 -d=)
|
||||
|
||||
# Verify release tag.
|
||||
if [ -z "$release_tag" ]; then
|
||||
@@ -28,6 +28,7 @@ _init() {
|
||||
## System binaries
|
||||
CP=`which cp`
|
||||
SHASUM=`which shasum`
|
||||
SHA256SUM="${SHASUM} -a 256"
|
||||
SED=`which sed`
|
||||
}
|
||||
|
||||
@@ -43,58 +44,36 @@ go_build() {
|
||||
release_bin="$release_str/$os-$arch/$(basename $package).$release_tag"
|
||||
# Release binary downloadable name
|
||||
release_real_bin="$release_str/$os-$arch/$(basename $package)"
|
||||
# Release shasum name
|
||||
release_shasum="$release_str/$os-$arch/$(basename $package).shasum"
|
||||
|
||||
# Release sha1sum name
|
||||
release_shasum="$release_str/$os-$arch/$(basename $package).${release_tag}.shasum"
|
||||
# Release sha1sum default
|
||||
release_shasum_default="$release_str/$os-$arch/$(basename $package).shasum"
|
||||
|
||||
# Release sha256sum name
|
||||
release_sha256sum="$release_str/$os-$arch/$(basename $package).${release_tag}.sha256sum"
|
||||
# Release sha256sum default
|
||||
release_sha256sum_default="$release_str/$os-$arch/$(basename $package).sha256sum"
|
||||
|
||||
# Go build to build the binary.
|
||||
if [ "${arch}" == "arm" ]; then
|
||||
# Release binary downloadable name
|
||||
release_real_bin_6="$release_str/$os-${arch}6vl/$(basename $package)"
|
||||
CGO_ENABLED=0 GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin
|
||||
|
||||
release_bin_6="$release_str/$os-${arch}6vl/$(basename $package).$release_tag"
|
||||
## Support building for ARM6vl
|
||||
GOARM=6 GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin_6
|
||||
|
||||
## Copy
|
||||
$CP -p $release_bin_6 $release_real_bin_6
|
||||
|
||||
# Release shasum name
|
||||
release_shasum_6="$release_str/$os-${arch}6vl/$(basename $package).shasum"
|
||||
|
||||
# Calculate shasum
|
||||
shasum_str=$(${SHASUM} ${release_bin_6})
|
||||
echo ${shasum_str} | $SED "s/$release_str\/$os-${arch}6vl\///g" > $release_shasum_6
|
||||
|
||||
# Release binary downloadable name
|
||||
release_real_bin_7="$release_str/$os-$arch/$(basename $package)"
|
||||
|
||||
release_bin_7="$release_str/$os-$arch/$(basename $package).$release_tag"
|
||||
## Support building for ARM7vl
|
||||
GOARM=7 GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin_7
|
||||
|
||||
## Copy
|
||||
$CP -p $release_bin_7 $release_real_bin_7
|
||||
|
||||
# Release shasum name
|
||||
release_shasum_7="$release_str/$os-$arch/$(basename $package).shasum"
|
||||
|
||||
# Calculate shasum
|
||||
shasum_str=$(${SHASUM} ${release_bin_7})
|
||||
echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum_7
|
||||
# Create copy
|
||||
if [ $os == "windows" ]; then
|
||||
$CP -p $release_bin ${release_real_bin}.exe
|
||||
else
|
||||
GOOS=$os GOARCH=$arch go build --ldflags "-s -w ${LDFLAGS}" -o $release_bin
|
||||
|
||||
# Create copy
|
||||
if [ $os == "windows" ]; then
|
||||
$CP -p $release_bin ${release_real_bin}.exe
|
||||
else
|
||||
$CP -p $release_bin $release_real_bin
|
||||
fi
|
||||
|
||||
# Calculate shasum
|
||||
shasum_str=$(${SHASUM} ${release_bin})
|
||||
echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum
|
||||
$CP -p $release_bin $release_real_bin
|
||||
fi
|
||||
|
||||
# Calculate sha1sum
|
||||
shasum_str=$(${SHASUM} ${release_bin})
|
||||
echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum
|
||||
$CP -p $release_shasum $release_shasum_default
|
||||
|
||||
# Calculate sha256sum
|
||||
sha256sum_str=$(${SHA256SUM} ${release_bin})
|
||||
echo ${sha256sum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_sha256sum
|
||||
$CP -p $release_sha256sum $release_sha256sum_default
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
@@ -241,9 +241,9 @@ type ServerInfoData struct {
|
||||
|
||||
// ServerInfo holds server information result of one node
|
||||
type ServerInfo struct {
|
||||
Error error
|
||||
Addr string
|
||||
Data *ServerInfoData
|
||||
Error string `json:"error"`
|
||||
Addr string `json:"addr"`
|
||||
Data *ServerInfoData `json:"data"`
|
||||
}
|
||||
|
||||
// ServerInfoHandler - GET /?info
|
||||
@@ -276,7 +276,7 @@ func (adminAPI adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *htt
|
||||
serverInfoData, err := peer.cmdRunner.ServerInfoData()
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to get server info from %s.", peer.addr)
|
||||
reply[idx].Error = err
|
||||
reply[idx].Error = err.Error()
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -731,7 +731,7 @@ func TestListObjectsHealHandler(t *testing.T) {
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
err = adminTestBed.objLayer.MakeBucket("mybucket")
|
||||
err = adminTestBed.objLayer.MakeBucketWithLocation("mybucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket - %v", err)
|
||||
}
|
||||
@@ -859,7 +859,7 @@ func TestHealBucketHandler(t *testing.T) {
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
err = adminTestBed.objLayer.MakeBucket("mybucket")
|
||||
err = adminTestBed.objLayer.MakeBucketWithLocation("mybucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket - %v", err)
|
||||
}
|
||||
@@ -936,7 +936,7 @@ func TestHealObjectHandler(t *testing.T) {
|
||||
// Create an object myobject under bucket mybucket.
|
||||
bucketName := "mybucket"
|
||||
objName := "myobject"
|
||||
err = adminTestBed.objLayer.MakeBucket(bucketName)
|
||||
err = adminTestBed.objLayer.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket %s - %v", bucketName, err)
|
||||
}
|
||||
@@ -1067,7 +1067,7 @@ func TestHealUploadHandler(t *testing.T) {
|
||||
// Create an object myobject under bucket mybucket.
|
||||
bucketName := "mybucket"
|
||||
objName := "myobject"
|
||||
err = adminTestBed.objLayer.MakeBucket(bucketName)
|
||||
err = adminTestBed.objLayer.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket %s - %v", bucketName, err)
|
||||
}
|
||||
@@ -1314,7 +1314,7 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
if len(serverInfo.Addr) == 0 {
|
||||
t.Error("Expected server address to be non empty")
|
||||
}
|
||||
if serverInfo.Error != nil {
|
||||
if serverInfo.Error != "" {
|
||||
t.Errorf("Unexpected error = %v\n", serverInfo.Error)
|
||||
}
|
||||
if serverInfo.Data.StorageInfo.Free == 0 {
|
||||
@@ -1455,7 +1455,7 @@ func TestListHealUploadsHandler(t *testing.T) {
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
err = adminTestBed.objLayer.MakeBucket("mybucket")
|
||||
err = adminTestBed.objLayer.MakeBucketWithLocation("mybucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket - %v", err)
|
||||
}
|
||||
|
||||
@@ -149,6 +149,7 @@ const (
|
||||
ErrAdminInvalidAccessKey
|
||||
ErrAdminInvalidSecretKey
|
||||
ErrAdminConfigNoQuorum
|
||||
ErrInsecureClientRequest
|
||||
)
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
@@ -230,7 +231,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyID",
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
@@ -618,6 +619,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Configuration update failed because server quorum was not met",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
@@ -62,8 +62,8 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
|
||||
// Set Etag if available.
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
if objInfo.ETag != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
|
||||
@@ -321,8 +321,8 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max
|
||||
}
|
||||
content.Key = object.Name
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.MD5Sum != "" {
|
||||
content.ETag = "\"" + object.MD5Sum + "\""
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
@@ -370,8 +370,8 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter
|
||||
}
|
||||
content.Key = object.Name
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
if object.MD5Sum != "" {
|
||||
content.ETag = "\"" + object.MD5Sum + "\""
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
|
||||
@@ -113,13 +113,13 @@ func checkRequestAuthType(r *http.Request, bucket, policyAction, region string)
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
}
|
||||
return s3Error
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, region)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
}
|
||||
return s3Error
|
||||
}
|
||||
|
||||
@@ -52,10 +52,10 @@ type authConfig struct {
|
||||
|
||||
// AuthRPCClient is a authenticated RPC client which does authentication before doing Call().
|
||||
type AuthRPCClient struct {
|
||||
sync.Mutex // Mutex to lock this object.
|
||||
rpcClient *RPCClient // Reconnectable RPC client to make any RPC call.
|
||||
config authConfig // Authentication configuration information.
|
||||
authToken string // Authentication token.
|
||||
sync.RWMutex // Mutex to lock this object.
|
||||
rpcClient *RPCClient // Reconnectable RPC client to make any RPC call.
|
||||
config authConfig // Authentication configuration information.
|
||||
authToken string // Authentication token.
|
||||
}
|
||||
|
||||
// newAuthRPCClient - returns a JWT based authenticated (go) rpc client, which does automatic reconnect.
|
||||
@@ -78,33 +78,43 @@ func newAuthRPCClient(config authConfig) *AuthRPCClient {
|
||||
}
|
||||
}
|
||||
|
||||
// Login - a jwt based authentication is performed with rpc server.
|
||||
// Login a JWT based authentication is performed with rpc server.
|
||||
func (authClient *AuthRPCClient) Login() (err error) {
|
||||
// Login should be attempted one at a time.
|
||||
//
|
||||
// The reason for large region lock here is
|
||||
// to avoid two simultaneous login attempts
|
||||
// racing over each other.
|
||||
//
|
||||
// #1 Login() gets the lock proceeds to login.
|
||||
// #2 Login() waits for the unlock to happen
|
||||
// after login in #1.
|
||||
// #1 Successfully completes login saves the
|
||||
// newly acquired token.
|
||||
// #2 Successfully gets the lock and proceeds,
|
||||
// but since we have acquired the token
|
||||
// already the call quickly returns.
|
||||
authClient.Lock()
|
||||
defer authClient.Unlock()
|
||||
|
||||
// Return if already logged in.
|
||||
if authClient.authToken != "" {
|
||||
return nil
|
||||
// Attempt to login if not logged in already.
|
||||
if authClient.authToken == "" {
|
||||
// Login to authenticate and acquire a new auth token.
|
||||
var (
|
||||
loginMethod = authClient.config.serviceName + loginMethodName
|
||||
loginArgs = LoginRPCArgs{
|
||||
Username: authClient.config.accessKey,
|
||||
Password: authClient.config.secretKey,
|
||||
Version: Version,
|
||||
RequestTime: UTCNow(),
|
||||
}
|
||||
loginReply = LoginRPCReply{}
|
||||
)
|
||||
if err = authClient.rpcClient.Call(loginMethod, &loginArgs, &loginReply); err != nil {
|
||||
return err
|
||||
}
|
||||
authClient.authToken = loginReply.AuthToken
|
||||
}
|
||||
|
||||
// Call login.
|
||||
args := LoginRPCArgs{
|
||||
Username: authClient.config.accessKey,
|
||||
Password: authClient.config.secretKey,
|
||||
Version: Version,
|
||||
RequestTime: UTCNow(),
|
||||
}
|
||||
|
||||
reply := LoginRPCReply{}
|
||||
serviceMethod := authClient.config.serviceName + loginMethodName
|
||||
if err = authClient.rpcClient.Call(serviceMethod, &args, &reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Logged in successfully.
|
||||
authClient.authToken = reply.AuthToken
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -112,17 +122,17 @@ func (authClient *AuthRPCClient) Login() (err error) {
|
||||
func (authClient *AuthRPCClient) call(serviceMethod string, args interface {
|
||||
SetAuthToken(authToken string)
|
||||
}, reply interface{}) (err error) {
|
||||
// On successful login, execute RPC call.
|
||||
if err = authClient.Login(); err == nil {
|
||||
authClient.Lock()
|
||||
// Set token and timestamp before the rpc call.
|
||||
args.SetAuthToken(authClient.authToken)
|
||||
authClient.Unlock()
|
||||
if err = authClient.Login(); err != nil {
|
||||
return err
|
||||
} // On successful login, execute RPC call.
|
||||
|
||||
// Do RPC call.
|
||||
err = authClient.rpcClient.Call(serviceMethod, args, reply)
|
||||
}
|
||||
return err
|
||||
authClient.RLock()
|
||||
// Set token before the rpc call.
|
||||
args.SetAuthToken(authClient.authToken)
|
||||
authClient.RUnlock()
|
||||
|
||||
// Do an RPC call.
|
||||
return authClient.rpcClient.Call(serviceMethod, args, reply)
|
||||
}
|
||||
|
||||
// Call executes RPC call till success or globalAuthRPCRetryThreshold on ErrShutdown.
|
||||
|
||||
@@ -39,7 +39,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash(textData)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
sha256sum := ""
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
@@ -61,8 +61,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.MD5Sum != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
}
|
||||
}
|
||||
// Benchmark ends here. Stop timer.
|
||||
@@ -78,22 +78,22 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
objSize := 128 * humanize.MiByte
|
||||
|
||||
// PutObjectPart returns md5Sum of the object inserted.
|
||||
// md5Sum variable is assigned with that value.
|
||||
var md5Sum, uploadID string
|
||||
// PutObjectPart returns etag of the object inserted.
|
||||
// etag variable is assigned with that value.
|
||||
var etag, uploadID string
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash(textData)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
sha256sum := ""
|
||||
uploadID, err = obj.NewMultipartUpload(bucket, object, metadata)
|
||||
if err != nil {
|
||||
@@ -115,14 +115,14 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textPartData = textData[j*partSize:]
|
||||
}
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash([]byte(textPartData))
|
||||
metadata["etag"] = getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["md5Sum"], sha256sum)
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["etag"], sha256sum)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if partInfo.ETag != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, md5Sum, metadata["md5Sum"])
|
||||
if partInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, etag, metadata["etag"])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -208,19 +208,19 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < 10; i++ {
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash(textData)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.MD5Sum != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -307,7 +307,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -317,7 +317,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash([]byte(textData))
|
||||
metadata["etag"] = getMD5Hash([]byte(textData))
|
||||
sha256sum := ""
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
@@ -332,8 +332,8 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.MD5Sum != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.MD5Sum, metadata["md5Sum"])
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.ETag, metadata["etag"])
|
||||
}
|
||||
i++
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -367,7 +367,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash([]byte(textData))
|
||||
metadata["etag"] = getMD5Hash([]byte(textData))
|
||||
sha256sum := ""
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
@@ -375,8 +375,8 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.MD5Sum != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -83,6 +83,10 @@ func enforceBucketPolicy(bucket, action, resource, referer string, queryParams u
|
||||
|
||||
// Check if the action is allowed on the bucket/prefix.
|
||||
func isBucketActionAllowed(action, bucket, prefix string) bool {
|
||||
if globalBucketPolicies == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
policy := globalBucketPolicies.GetBucketPolicy(bucket)
|
||||
if policy == nil {
|
||||
return false
|
||||
@@ -389,7 +393,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucket(bucket)
|
||||
err := objectAPI.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create a bucket.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@@ -535,7 +539,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("ETag", `"`+objInfo.MD5Sum+`"`)
|
||||
w.Header().Set("ETag", `"`+objInfo.ETag+`"`)
|
||||
w.Header().Set("Location", getObjectLocation(bucket, object))
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
@@ -568,7 +572,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
resp := encodeResponse(PostResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: `"` + objInfo.MD5Sum + `"`,
|
||||
ETag: `"` + objInfo.ETag + `"`,
|
||||
Location: getObjectLocation(objInfo.Bucket, objInfo.Name),
|
||||
})
|
||||
writeResponse(w, http.StatusCreated, resp, "application/xml")
|
||||
|
||||
@@ -68,7 +68,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
locationResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: "/" + bucketName + "/",
|
||||
Code: "InvalidAccessKeyID",
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
shouldPass: false,
|
||||
@@ -771,3 +771,36 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
|
||||
}
|
||||
|
||||
func TestIsBucketActionAllowed(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testIsBucketActionAllowedHandler, []string{"BucketLocation"})
|
||||
}
|
||||
|
||||
func testIsBucketActionAllowedHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials credential, t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
// input.
|
||||
action string
|
||||
bucket string
|
||||
prefix string
|
||||
isGlobalPoliciesNil bool
|
||||
// flag indicating whether the test should pass.
|
||||
shouldPass bool
|
||||
}{
|
||||
{"s3:GetBucketLocation", "mybucket", "abc", true, false},
|
||||
{"s3:ListObject", "mybucket", "abc", false, false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if testCase.isGlobalPoliciesNil {
|
||||
globalBucketPolicies = nil
|
||||
} else {
|
||||
initBucketPolicies(obj)
|
||||
}
|
||||
isAllowed := isBucketActionAllowed(testCase.action, testCase.bucket, testCase.prefix)
|
||||
if isAllowed != testCase.shouldPass {
|
||||
t.Errorf("Case %d: Expected the response status to be `%t`, but instead found `%t`", i+1, testCase.shouldPass, isAllowed)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
@@ -111,20 +112,21 @@ func checkARN(arn, arnType string) APIErrorCode {
|
||||
if !strings.HasPrefix(arn, arnType) {
|
||||
return ErrARNNotification
|
||||
}
|
||||
if !strings.HasPrefix(arn, arnType+serverConfig.GetRegion()+":") {
|
||||
return ErrRegionNotification
|
||||
}
|
||||
account := strings.SplitN(strings.TrimPrefix(arn, arnType+serverConfig.GetRegion()+":"), ":", 2)
|
||||
switch len(account) {
|
||||
case 1:
|
||||
// This means ARN is malformed, account should have min of 2elements.
|
||||
strs := strings.SplitN(arn, ":", -1)
|
||||
if len(strs) != 6 {
|
||||
return ErrARNNotification
|
||||
case 2:
|
||||
// Account topic id or topic name cannot be empty.
|
||||
if account[0] == "" || account[1] == "" {
|
||||
return ErrARNNotification
|
||||
}
|
||||
if serverConfig.GetRegion() != "" {
|
||||
region := strs[3]
|
||||
if region != serverConfig.GetRegion() {
|
||||
return ErrRegionNotification
|
||||
}
|
||||
}
|
||||
accountID := strs[4]
|
||||
resource := strs[5]
|
||||
if accountID == "" || resource == "" {
|
||||
return ErrARNNotification
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -257,29 +259,39 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
|
||||
// - kafka
|
||||
// - webhook
|
||||
func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
|
||||
mSqs = arnSQS{}
|
||||
if !strings.HasPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":") {
|
||||
return mSqs
|
||||
strs := strings.SplitN(queueARN, ":", -1)
|
||||
if len(strs) != 6 {
|
||||
return
|
||||
}
|
||||
sqsType := strings.TrimPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":")
|
||||
switch {
|
||||
case hasSuffix(sqsType, queueTypeAMQP):
|
||||
if serverConfig.GetRegion() != "" {
|
||||
region := strs[3]
|
||||
if region != serverConfig.GetRegion() {
|
||||
return
|
||||
}
|
||||
}
|
||||
sqsType := strs[5]
|
||||
switch sqsType {
|
||||
case queueTypeAMQP:
|
||||
mSqs.Type = queueTypeAMQP
|
||||
case hasSuffix(sqsType, queueTypeNATS):
|
||||
case queueTypeNATS:
|
||||
mSqs.Type = queueTypeNATS
|
||||
case hasSuffix(sqsType, queueTypeElastic):
|
||||
case queueTypeElastic:
|
||||
mSqs.Type = queueTypeElastic
|
||||
case hasSuffix(sqsType, queueTypeRedis):
|
||||
case queueTypeRedis:
|
||||
mSqs.Type = queueTypeRedis
|
||||
case hasSuffix(sqsType, queueTypePostgreSQL):
|
||||
case queueTypePostgreSQL:
|
||||
mSqs.Type = queueTypePostgreSQL
|
||||
case hasSuffix(sqsType, queueTypeMySQL):
|
||||
case queueTypeMySQL:
|
||||
mSqs.Type = queueTypeMySQL
|
||||
case hasSuffix(sqsType, queueTypeKafka):
|
||||
case queueTypeKafka:
|
||||
mSqs.Type = queueTypeKafka
|
||||
case hasSuffix(sqsType, queueTypeWebhook):
|
||||
case queueTypeWebhook:
|
||||
mSqs.Type = queueTypeWebhook
|
||||
default:
|
||||
errorIf(errors.New("invalid SQS type"), "SQS type: %s", sqsType)
|
||||
} // Add more queues here.
|
||||
mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type)
|
||||
return mSqs
|
||||
|
||||
mSqs.AccountID = strs[4]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -259,11 +259,6 @@ func TestQueueARN(t *testing.T) {
|
||||
queueARN: "arn:minio:sns:us-east-1:1:listen",
|
||||
errCode: ErrARNNotification,
|
||||
},
|
||||
// Invalid region 'us-west-1' in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:redis",
|
||||
errCode: ErrRegionNotification,
|
||||
},
|
||||
// Invalid queue name empty in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:",
|
||||
@@ -298,6 +293,37 @@ func TestQueueARN(t *testing.T) {
|
||||
t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
}
|
||||
|
||||
// Test when server region is set.
|
||||
rootPath, err = newTestConfig("us-east-1")
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer removeAll(rootPath)
|
||||
|
||||
testCases = []struct {
|
||||
queueARN string
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
// Incorrect region should produce error.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:webhook",
|
||||
errCode: ErrRegionNotification,
|
||||
},
|
||||
// Correct region should not produce error.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate all tests for queue arn.
|
||||
for i, testCase := range testCases {
|
||||
errCode := checkQueueARN(testCase.queueARN)
|
||||
if testCase.errCode != errCode {
|
||||
t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test unmarshal queue arn.
|
||||
@@ -337,11 +363,6 @@ func TestUnmarshalSQSARN(t *testing.T) {
|
||||
queueARN: "",
|
||||
Type: "",
|
||||
},
|
||||
// Invalid region 'us-west-1' in queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:redis",
|
||||
Type: "",
|
||||
},
|
||||
// Partial queue arn.
|
||||
{
|
||||
queueARN: "arn:minio:sqs:",
|
||||
@@ -361,4 +382,33 @@ func TestUnmarshalSQSARN(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test when the server region is set.
|
||||
rootPath, err = newTestConfig("us-east-1")
|
||||
if err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer removeAll(rootPath)
|
||||
|
||||
testCases = []struct {
|
||||
queueARN string
|
||||
Type string
|
||||
}{
|
||||
// Incorrect region in ARN returns empty mSqs.Type
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-west-1:1:webhook",
|
||||
Type: "",
|
||||
},
|
||||
// Correct regionin ARN returns valid mSqs.Type
|
||||
{
|
||||
queueARN: "arn:minio:sqs:us-east-1:1:webhook",
|
||||
Type: "webhook",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
mSqs := unmarshalSqsARN(testCase.queueARN)
|
||||
if testCase.Type != mSqs.Type {
|
||||
t.Errorf("Test %d: Expected \"%s\", got \"%s\"", i+1, testCase.Type, mSqs.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,7 +251,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
initBucketPolicies(obj)
|
||||
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucket(bucketName1); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName1, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,77 +27,125 @@ import (
|
||||
// DO NOT EDIT following message template, please open a github issue to discuss instead.
|
||||
var configMigrateMSGTemplate = "Configuration file %s migrated from version '%s' to '%s' successfully.\n"
|
||||
|
||||
// Migrates all config versions from "1" to "18".
|
||||
func migrateConfig() error {
|
||||
// Purge all configs with version '1'.
|
||||
// Purge all configs with version '1',
|
||||
// this is a special case since version '1' used
|
||||
// to be a filename 'fsUsers.json' not 'config.json'.
|
||||
if err := purgeV1(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '2' to '3'.
|
||||
if err := migrateV2ToV3(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '3' to '4'.
|
||||
if err := migrateV3ToV4(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '4' to '5'.
|
||||
if err := migrateV4ToV5(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '5' to '6.
|
||||
if err := migrateV5ToV6(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '6' to '7'.
|
||||
if err := migrateV6ToV7(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '7' to '8'.
|
||||
if err := migrateV7ToV8(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '8' to '9'.
|
||||
if err := migrateV8ToV9(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '9' to '10'.
|
||||
if err := migrateV9ToV10(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '10' to '11'.
|
||||
if err := migrateV10ToV11(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '11' to '12'.
|
||||
if err := migrateV11ToV12(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '12' to '13'.
|
||||
if err := migrateV12ToV13(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '13' to '14'.
|
||||
if err := migrateV13ToV14(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '14' to '15'.
|
||||
if err := migrateV14ToV15(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '15' to '16'.
|
||||
if err := migrateV15ToV16(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '16' to '17'.
|
||||
if err := migrateV16ToV17(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate version '17' to '18'.
|
||||
if err := migrateV17ToV18(); err != nil {
|
||||
|
||||
// Load only config version information.
|
||||
version, err := quick.GetVersion(getConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
// Conditional to migrate only relevant config versions.
|
||||
// Upon success migration continues to the next version in sequence.
|
||||
switch version {
|
||||
case "2":
|
||||
// Migrate version '2' to '3'.
|
||||
if err = migrateV2ToV3(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "3":
|
||||
// Migrate version '3' to '4'.
|
||||
if err = migrateV3ToV4(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "4":
|
||||
// Migrate version '4' to '5'.
|
||||
if err = migrateV4ToV5(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "5":
|
||||
// Migrate version '5' to '6.
|
||||
if err = migrateV5ToV6(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "6":
|
||||
// Migrate version '6' to '7'.
|
||||
if err = migrateV6ToV7(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "7":
|
||||
// Migrate version '7' to '8'.
|
||||
if err = migrateV7ToV8(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "8":
|
||||
// Migrate version '8' to '9'.
|
||||
if err = migrateV8ToV9(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "9":
|
||||
// Migrate version '9' to '10'.
|
||||
if err = migrateV9ToV10(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "10":
|
||||
// Migrate version '10' to '11'.
|
||||
if err = migrateV10ToV11(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "11":
|
||||
// Migrate version '11' to '12'.
|
||||
if err = migrateV11ToV12(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "12":
|
||||
// Migrate version '12' to '13'.
|
||||
if err = migrateV12ToV13(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "13":
|
||||
// Migrate version '13' to '14'.
|
||||
if err = migrateV13ToV14(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "14":
|
||||
// Migrate version '14' to '15'.
|
||||
if err = migrateV14ToV15(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "15":
|
||||
// Migrate version '15' to '16'.
|
||||
if err = migrateV15ToV16(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "16":
|
||||
// Migrate version '16' to '17'.
|
||||
if err = migrateV16ToV17(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "17":
|
||||
// Migrate version '17' to '18'.
|
||||
if err = migrateV17ToV18(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case v18:
|
||||
// No migration needed. this always points to current version.
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -153,6 +154,7 @@ func TestServerConfigMigrateV2toV18(t *testing.T) {
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Fire a migrateConfig()
|
||||
if err := migrateConfig(); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
@@ -191,7 +193,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\""), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@@ -245,3 +247,39 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer removeAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
||||
0644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test different migrate versions and be sure they are returning an error
|
||||
if err = migrateConfig(); err == nil {
|
||||
t.Fatal("migrateConfig() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Create a corrupted config file for version '2'.
|
||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test different migrate versions and be sure they are returning an error
|
||||
if err = migrateConfig(); err == nil {
|
||||
t.Fatal("migrateConfig() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,11 +261,6 @@ func getValidConfig() (*serverConfigV18, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate region field
|
||||
if srvCfg.Region == "" {
|
||||
return nil, errors.New("Region config value cannot be empty")
|
||||
}
|
||||
|
||||
// Validate credential fields only when
|
||||
// they are not set via the environment
|
||||
|
||||
|
||||
@@ -25,19 +25,35 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
accessKeyMinLen = 5
|
||||
accessKeyMaxLen = 20
|
||||
secretKeyMinLen = 8
|
||||
secretKeyMaxLenAmazon = 40
|
||||
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
alphaNumericTableLen = byte(len(alphaNumericTable))
|
||||
// Minimum length for Minio access key.
|
||||
accessKeyMinLen = 5
|
||||
|
||||
// Maximum length for Minio access key.
|
||||
accessKeyMaxLen = 20
|
||||
|
||||
// Minimum length for Minio secret key for both server and gateway mode.
|
||||
secretKeyMinLen = 8
|
||||
|
||||
// Maximum secret key length for Minio, this
|
||||
// is used when autogenerating new credentials.
|
||||
secretKeyMaxLenMinio = 40
|
||||
|
||||
// Maximum secret key length allowed from client side
|
||||
// caters for both server and gateway mode.
|
||||
secretKeyMaxLen = 100
|
||||
|
||||
// Alpha numeric table used for generating access keys.
|
||||
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// Total length of the alpha numeric table.
|
||||
alphaNumericTableLen = byte(len(alphaNumericTable))
|
||||
)
|
||||
|
||||
// Common errors generated for access and secret key validation.
|
||||
var (
|
||||
errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length")
|
||||
errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length")
|
||||
errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 100 characters in length")
|
||||
)
|
||||
var secretKeyMaxLen = secretKeyMaxLenAmazon
|
||||
|
||||
// isAccessKeyValid - validate access key for right length.
|
||||
func isAccessKeyValid(accessKey string) bool {
|
||||
@@ -111,10 +127,10 @@ func mustGetNewCredential() credential {
|
||||
accessKey := string(keyBytes)
|
||||
|
||||
// Generate secret key.
|
||||
keyBytes = make([]byte, secretKeyMaxLen)
|
||||
keyBytes = make([]byte, secretKeyMaxLenMinio)
|
||||
_, err = rand.Read(keyBytes)
|
||||
fatalIf(err, "Unable to generate secret key.")
|
||||
secretKey := string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
|
||||
secretKey := string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLenMinio])
|
||||
|
||||
cred, err := createCredential(accessKey, secretKey)
|
||||
fatalIf(err, "Unable to generate new credential.")
|
||||
|
||||
@@ -23,6 +23,9 @@ func TestMustGetNewCredential(t *testing.T) {
|
||||
if !cred.IsValid() {
|
||||
t.Fatalf("Failed to get new valid credential")
|
||||
}
|
||||
if len(cred.SecretKey) != secretKeyMaxLenMinio {
|
||||
t.Fatalf("Invalid length %d of the secretKey credential generated, expected %d", len(cred.SecretKey), secretKeyMaxLenMinio)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateCredential(t *testing.T) {
|
||||
@@ -42,7 +45,7 @@ func TestCreateCredential(t *testing.T) {
|
||||
// Secret key too small.
|
||||
{"myuser", "pass", false, errInvalidSecretKeyLength},
|
||||
// Secret key too long.
|
||||
{"myuser", "pass1234567890123456789012345678901234567", false, errInvalidSecretKeyLength},
|
||||
{"myuser", "pass1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890", false, errInvalidSecretKeyLength},
|
||||
// Success when access key contains leading/trailing spaces.
|
||||
{" user ", cred.SecretKey, true, nil},
|
||||
{"myuser", "mypassword", true, nil},
|
||||
|
||||
@@ -80,7 +80,7 @@ func (endpoint Endpoint) SetHTTP() {
|
||||
func NewEndpoint(arg string) (Endpoint, error) {
|
||||
// isEmptyPath - check whether given path is not empty.
|
||||
isEmptyPath := func(path string) bool {
|
||||
return path == "" || path == "." || path == "/" || path == `\`
|
||||
return path == "" || path == "/" || path == `\`
|
||||
}
|
||||
|
||||
if isEmptyPath(arg) {
|
||||
@@ -127,14 +127,10 @@ func NewEndpoint(arg string) (Endpoint, error) {
|
||||
return Endpoint{}, fmt.Errorf("empty or root path is not supported in URL endpoint")
|
||||
}
|
||||
|
||||
// Get IPv4 address of the host.
|
||||
hostIPs, err := getHostIP4(host)
|
||||
isLocal, err = isLocalHost(host)
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
|
||||
// If intersection of two IP sets is not empty, then the host is local host.
|
||||
isLocal = !localIP4.Intersection(hostIPs).IsEmpty()
|
||||
} else {
|
||||
u = &url.URL{Path: path.Clean(arg)}
|
||||
isLocal = true
|
||||
|
||||
@@ -62,7 +62,6 @@ func TestNewEndpoint(t *testing.T) {
|
||||
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true}, URLEndpointType, nil},
|
||||
{"http://192.168.253.200/path", Endpoint{URL: u4}, URLEndpointType, nil},
|
||||
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{".", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{"/", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{"c://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
||||
|
||||
@@ -28,7 +28,9 @@ import (
|
||||
// erasureCreateFile - writes an entire stream by erasure coding to
|
||||
// all the disks, writes also calculate individual block's checksum
|
||||
// for future bit-rot protection.
|
||||
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, allowEmpty bool, blockSize int64, dataBlocks int, parityBlocks int, algo string, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
|
||||
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, allowEmpty bool, blockSize int64,
|
||||
dataBlocks, parityBlocks int, algo HashAlgo, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
|
||||
|
||||
// Allocated blockSized buffer for reading from incoming stream.
|
||||
buf := make([]byte, blockSize)
|
||||
|
||||
|
||||
@@ -19,7 +19,9 @@ package cmd
|
||||
import "encoding/hex"
|
||||
|
||||
// Heals the erasure coded file. reedsolomon.Reconstruct() is used to reconstruct the missing parts.
|
||||
func erasureHealFile(latestDisks []StorageAPI, outDatedDisks []StorageAPI, volume, path, healBucket, healPath string, size int64, blockSize int64, dataBlocks int, parityBlocks int, algo string) (checkSums []string, err error) {
|
||||
func erasureHealFile(latestDisks []StorageAPI, outDatedDisks []StorageAPI, volume, path, healBucket, healPath string,
|
||||
size, blockSize int64, dataBlocks, parityBlocks int, algo HashAlgo) (checkSums []string, err error) {
|
||||
|
||||
var offset int64
|
||||
remainingSize := size
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -111,7 +110,9 @@ func getReadDisks(orderedDisks []StorageAPI, index int, dataBlocks int) (readDis
|
||||
}
|
||||
|
||||
// parallelRead - reads chunks in parallel from the disks specified in []readDisks.
|
||||
func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []StorageAPI, enBlocks [][]byte, blockOffset int64, curChunkSize int64, bitRotVerify func(diskIndex int) bool, pool *bpool.BytePool) {
|
||||
func parallelRead(volume, path string, readDisks, orderedDisks []StorageAPI, enBlocks [][]byte,
|
||||
blockOffset, curChunkSize int64, brVerifiers []bitRotVerifier, pool *bpool.BytePool) {
|
||||
|
||||
// WaitGroup to synchronise the read go-routines.
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
@@ -125,11 +126,15 @@ func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []St
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Verify bit rot for the file on this disk.
|
||||
if !bitRotVerify(index) {
|
||||
// So that we don't read from this disk for the next block.
|
||||
orderedDisks[index] = nil
|
||||
return
|
||||
// evaluate if we need to perform bit-rot checking
|
||||
needBitRotVerification := true
|
||||
if brVerifiers[index].isVerified {
|
||||
needBitRotVerification = false
|
||||
// if file has bit-rot, do not reuse disk
|
||||
if brVerifiers[index].hasBitRot {
|
||||
orderedDisks[index] = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
buf, err := pool.Get()
|
||||
@@ -140,7 +145,25 @@ func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []St
|
||||
}
|
||||
buf = buf[:curChunkSize]
|
||||
|
||||
_, err = readDisks[index].ReadFile(volume, path, blockOffset, buf)
|
||||
if needBitRotVerification {
|
||||
_, err = readDisks[index].ReadFileWithVerify(
|
||||
volume, path, blockOffset, buf,
|
||||
brVerifiers[index].algo,
|
||||
brVerifiers[index].checkSum)
|
||||
} else {
|
||||
_, err = readDisks[index].ReadFile(volume, path,
|
||||
blockOffset, buf)
|
||||
}
|
||||
|
||||
// if bit-rot verification was done, store the
|
||||
// result of verification so we can skip
|
||||
// re-doing it next time
|
||||
if needBitRotVerification {
|
||||
brVerifiers[index].isVerified = true
|
||||
_, ok := err.(hashMismatchError)
|
||||
brVerifiers[index].hasBitRot = ok
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
orderedDisks[index] = nil
|
||||
return
|
||||
@@ -153,12 +176,16 @@ func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []St
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// erasureReadFile - read bytes from erasure coded files and writes to given writer.
|
||||
// Erasure coded files are read block by block as per given erasureInfo and data chunks
|
||||
// are decoded into a data block. Data block is trimmed for given offset and length,
|
||||
// then written to given writer. This function also supports bit-rot detection by
|
||||
// verifying checksum of individual block's checksum.
|
||||
func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path string, offset int64, length int64, totalLength int64, blockSize int64, dataBlocks int, parityBlocks int, checkSums []string, algo string, pool *bpool.BytePool) (int64, error) {
|
||||
// erasureReadFile - read bytes from erasure coded files and writes to
|
||||
// given writer. Erasure coded files are read block by block as per
|
||||
// given erasureInfo and data chunks are decoded into a data
|
||||
// block. Data block is trimmed for given offset and length, then
|
||||
// written to given writer. This function also supports bit-rot
|
||||
// detection by verifying checksum of individual block's checksum.
|
||||
func erasureReadFile(writer io.Writer, disks []StorageAPI, volume, path string,
|
||||
offset, length, totalLength, blockSize int64, dataBlocks, parityBlocks int,
|
||||
checkSums []string, algo HashAlgo, pool *bpool.BytePool) (int64, error) {
|
||||
|
||||
// Offset and length cannot be negative.
|
||||
if offset < 0 || length < 0 {
|
||||
return 0, traceError(errUnexpected)
|
||||
@@ -169,27 +196,15 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
|
||||
return 0, traceError(errUnexpected)
|
||||
}
|
||||
|
||||
// chunkSize is the amount of data that needs to be read from each disk at a time.
|
||||
// chunkSize is the amount of data that needs to be read from
|
||||
// each disk at a time.
|
||||
chunkSize := getChunkSize(blockSize, dataBlocks)
|
||||
|
||||
// bitRotVerify verifies if the file on a particular disk doesn't have bitrot
|
||||
// by verifying the hash of the contents of the file.
|
||||
bitRotVerify := func() func(diskIndex int) bool {
|
||||
verified := make([]bool, len(disks))
|
||||
// Return closure so that we have reference to []verified and
|
||||
// not recalculate the hash on it every time the function is
|
||||
// called for the same disk.
|
||||
return func(diskIndex int) bool {
|
||||
if verified[diskIndex] {
|
||||
// Already validated.
|
||||
return true
|
||||
}
|
||||
// Is this a valid block?
|
||||
isValid := isValidBlock(disks[diskIndex], volume, path, checkSums[diskIndex], algo)
|
||||
verified[diskIndex] = isValid
|
||||
return isValid
|
||||
}
|
||||
}()
|
||||
brVerifiers := make([]bitRotVerifier, len(disks))
|
||||
for i := range brVerifiers {
|
||||
brVerifiers[i].algo = algo
|
||||
brVerifiers[i].checkSum = checkSums[i]
|
||||
}
|
||||
|
||||
// Total bytes written to writer
|
||||
var bytesWritten int64
|
||||
@@ -241,7 +256,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
|
||||
return bytesWritten, err
|
||||
}
|
||||
// Issue a parallel read across the disks specified in readDisks.
|
||||
parallelRead(volume, path, readDisks, disks, enBlocks, blockOffset, curChunkSize, bitRotVerify, pool)
|
||||
parallelRead(volume, path, readDisks, disks, enBlocks, blockOffset, curChunkSize, brVerifiers, pool)
|
||||
if isSuccessDecodeBlocks(enBlocks, dataBlocks) {
|
||||
// If enough blocks are available to do rs.Reconstruct()
|
||||
break
|
||||
@@ -299,27 +314,6 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
|
||||
return bytesWritten, nil
|
||||
}
|
||||
|
||||
// isValidBlock - calculates the checksum hash for the block and
|
||||
// validates if its correct returns true for valid cases, false otherwise.
|
||||
func isValidBlock(disk StorageAPI, volume, path, checkSum, checkSumAlgo string) (ok bool) {
|
||||
// Disk is not available, not a valid block.
|
||||
if disk == nil {
|
||||
return false
|
||||
}
|
||||
// Checksum not available, not a valid block.
|
||||
if checkSum == "" {
|
||||
return false
|
||||
}
|
||||
// Read everything for a given block and calculate hash.
|
||||
hashWriter := newHash(checkSumAlgo)
|
||||
hashBytes, err := hashSum(disk, volume, path, hashWriter)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to calculate checksum %s/%s", volume, path)
|
||||
return false
|
||||
}
|
||||
return hex.EncodeToString(hashBytes) == checkSum
|
||||
}
|
||||
|
||||
// decodeData - decode encoded blocks.
|
||||
func decodeData(enBlocks [][]byte, dataBlocks, parityBlocks int) error {
|
||||
// Initialized reedsolomon.
|
||||
|
||||
@@ -213,6 +213,12 @@ func (r ReadDiskDown) ReadFile(volume string, path string, offset int64, buf []b
|
||||
return 0, errFaultyDisk
|
||||
}
|
||||
|
||||
func (r ReadDiskDown) ReadFileWithVerify(volume string, path string, offset int64, buf []byte,
|
||||
algo HashAlgo, expectedHash string) (n int64, err error) {
|
||||
|
||||
return 0, errFaultyDisk
|
||||
}
|
||||
|
||||
func TestErasureReadFileDiskFail(t *testing.T) {
|
||||
// Initialize environment needed for the test.
|
||||
dataBlocks := 7
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
// newHashWriters - inititialize a slice of hashes for the disk count.
|
||||
func newHashWriters(diskCount int, algo string) []hash.Hash {
|
||||
func newHashWriters(diskCount int, algo HashAlgo) []hash.Hash {
|
||||
hashWriters := make([]hash.Hash, diskCount)
|
||||
for index := range hashWriters {
|
||||
hashWriters[index] = newHash(algo)
|
||||
@@ -38,13 +38,13 @@ func newHashWriters(diskCount int, algo string) []hash.Hash {
|
||||
}
|
||||
|
||||
// newHash - gives you a newly allocated hash depending on the input algorithm.
|
||||
func newHash(algo string) (h hash.Hash) {
|
||||
func newHash(algo HashAlgo) (h hash.Hash) {
|
||||
switch algo {
|
||||
case sha256Algo:
|
||||
case HashSha256:
|
||||
// sha256 checksum specially on ARM64 platforms or whenever
|
||||
// requested as dictated by `xl.json` entry.
|
||||
h = sha256.New()
|
||||
case blake2bAlgo:
|
||||
case HashBlake2b:
|
||||
// ignore the error, because New512 without a key never fails
|
||||
// New512 only returns a non-nil error, if the length of the passed
|
||||
// key > 64 bytes - but we use blake2b as hash function (no key)
|
||||
@@ -71,7 +71,7 @@ var hashBufferPool = sync.Pool{
|
||||
|
||||
// hashSum calculates the hash of the entire path and returns.
|
||||
func hashSum(disk StorageAPI, volume, path string, writer hash.Hash) ([]byte, error) {
|
||||
// Fetch staging a new staging buffer from the pool.
|
||||
// Fetch a new staging buffer from the pool.
|
||||
bufp := hashBufferPool.Get().(*[]byte)
|
||||
defer hashBufferPool.Put(bufp)
|
||||
|
||||
@@ -207,3 +207,16 @@ func copyBuffer(writer io.Writer, disk StorageAPI, volume string, path string, b
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// bitRotVerifier - type representing bit-rot verification process for
|
||||
// a single under-lying object (currently whole files)
|
||||
type bitRotVerifier struct {
|
||||
// has the bit-rot verification been done?
|
||||
isVerified bool
|
||||
// is the data free of bit-rot?
|
||||
hasBitRot bool
|
||||
// hashing algorithm
|
||||
algo HashAlgo
|
||||
// hex-encoded expected raw-hash value
|
||||
checkSum string
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func newNotificationEvent(event eventData) NotificationEvent {
|
||||
// For all other events we should set ETag and Size.
|
||||
nEvent.S3.Object = objectMeta{
|
||||
Key: escapedObj,
|
||||
ETag: event.ObjInfo.MD5Sum,
|
||||
ETag: event.ObjInfo.ETag,
|
||||
Size: event.ObjInfo.Size,
|
||||
ContentType: event.ObjInfo.ContentType,
|
||||
UserDefined: event.ObjInfo.UserDefined,
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
||||
}
|
||||
|
||||
bucketName := "bucket"
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ func TestInitEventNotifier(t *testing.T) {
|
||||
}
|
||||
|
||||
// create bucket
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
@@ -408,7 +408,7 @@ func TestListenBucketNotification(t *testing.T) {
|
||||
objectName := "object"
|
||||
|
||||
// Create the bucket to listen on
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
@@ -518,7 +518,7 @@ func TestAddRemoveBucketListenerConfig(t *testing.T) {
|
||||
|
||||
// Make a bucket to store topicConfigs.
|
||||
randBucket := getRandomBucketName()
|
||||
if err := obj.MakeBucket(randBucket); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(randBucket, ""); err != nil {
|
||||
t.Fatalf("Failed to make bucket %s", randBucket)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,12 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
// fsFormat - structure holding 'fs' format.
|
||||
@@ -29,6 +33,15 @@ type fsFormat struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// FS format version strings.
|
||||
const (
|
||||
// Represents the current backend disk structure
|
||||
// version under `.minio.sys` and actual data namespace.
|
||||
|
||||
// formatConfigV1.fsFormat.Version
|
||||
fsFormatBackendV1 = "1"
|
||||
)
|
||||
|
||||
// xlFormat - structure holding 'xl' format.
|
||||
type xlFormat struct {
|
||||
Version string `json:"version"` // Version of 'xl' format.
|
||||
@@ -38,6 +51,15 @@ type xlFormat struct {
|
||||
JBOD []string `json:"jbod"`
|
||||
}
|
||||
|
||||
// XL format version strings.
|
||||
const (
|
||||
// Represents the current backend disk structure
|
||||
// version under `.minio.sys` and actual data namespace.
|
||||
|
||||
// formatConfigV1.xlFormat.Version
|
||||
xlFormatBackendV1 = "1"
|
||||
)
|
||||
|
||||
// formatConfigV1 - structure holds format config version '1'.
|
||||
type formatConfigV1 struct {
|
||||
Version string `json:"version"` // Version of the format config.
|
||||
@@ -47,6 +69,120 @@ type formatConfigV1 struct {
|
||||
XL *xlFormat `json:"xl,omitempty"` // XL field holds xl format.
|
||||
}
|
||||
|
||||
// Format json file.
|
||||
const (
|
||||
// Format config file carries backend format specific details.
|
||||
formatConfigFile = "format.json"
|
||||
|
||||
// Format config tmp file carries backend format.
|
||||
formatConfigFileTmp = "format.json.tmp"
|
||||
)
|
||||
|
||||
// `format.json` version value.
|
||||
const (
|
||||
// formatConfigV1.Version represents the version string
|
||||
// of the current structure and its fields in `format.json`.
|
||||
formatFileV1 = "1"
|
||||
|
||||
// Future `format.json` structure changes should have
|
||||
// its own version and should be subsequently listed here.
|
||||
)
|
||||
|
||||
// Constitutes `format.json` backend name.
|
||||
const (
|
||||
// Represents FS backend.
|
||||
formatBackendFS = "fs"
|
||||
|
||||
// Represents XL backend.
|
||||
formatBackendXL = "xl"
|
||||
)
|
||||
|
||||
// CheckFS if the format is FS and is valid with right values
|
||||
// returns appropriate errors otherwise.
|
||||
func (f *formatConfigV1) CheckFS() error {
|
||||
// Validate if format config version is v1.
|
||||
if f.Version != formatFileV1 {
|
||||
return fmt.Errorf("Unknown format file version '%s'", f.Version)
|
||||
}
|
||||
|
||||
// Validate if we have the expected format.
|
||||
if f.Format != formatBackendFS {
|
||||
return fmt.Errorf("FS backend format required. Found '%s'", f.Format)
|
||||
}
|
||||
|
||||
// Check if format is currently supported.
|
||||
if f.FS.Version != fsFormatBackendV1 {
|
||||
return fmt.Errorf("Unknown backend FS format version '%s'", f.FS.Version)
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFormat - loads format config v1, returns `errUnformattedDisk`
|
||||
// if reading format.json fails with io.EOF.
|
||||
func (f *formatConfigV1) LoadFormat(lk *lock.LockedFile) error {
|
||||
_, err := f.ReadFrom(lk)
|
||||
if errorCause(err) == io.EOF {
|
||||
// No data on disk `format.json` still empty
|
||||
// treat it as unformatted disk.
|
||||
return traceError(errUnformattedDisk)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
||||
// Serialize to prepare to write to disk.
|
||||
var fbytes []byte
|
||||
fbytes, err = json.Marshal(f)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
if err = lk.Truncate(0); err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
_, err = lk.Write(fbytes)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
return int64(len(fbytes)), nil
|
||||
}
|
||||
|
||||
func (f *formatConfigV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||
var fbytes []byte
|
||||
fi, err := lk.Stat()
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
fbytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
if len(fbytes) == 0 {
|
||||
return 0, traceError(io.EOF)
|
||||
}
|
||||
// Decode `format.json`.
|
||||
if err = json.Unmarshal(fbytes, f); err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
return int64(len(fbytes)), nil
|
||||
}
|
||||
|
||||
func newFSFormat() (format *formatConfigV1) {
|
||||
return newFSFormatV1()
|
||||
}
|
||||
|
||||
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
|
||||
func newFSFormatV1() (format *formatConfigV1) {
|
||||
return &formatConfigV1{
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendFS,
|
||||
FS: &fsFormat{
|
||||
Version: fsFormatBackendV1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
All disks online
|
||||
@@ -770,10 +906,10 @@ func loadFormatXL(bootstrapDisks []StorageAPI, readQuorum int) (disks []StorageA
|
||||
|
||||
func checkFormatXLValue(formatXL *formatConfigV1) error {
|
||||
// Validate format version and format type.
|
||||
if formatXL.Version != "1" {
|
||||
if formatXL.Version != formatFileV1 {
|
||||
return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version)
|
||||
}
|
||||
if formatXL.Format != "xl" {
|
||||
if formatXL.Format != formatBackendXL {
|
||||
return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format)
|
||||
}
|
||||
if formatXL.XL.Version != "1" {
|
||||
@@ -875,10 +1011,10 @@ func initFormatXL(storageDisks []StorageAPI) (err error) {
|
||||
}
|
||||
// Allocate format config.
|
||||
formats[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: mustGetUUID(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,7 +18,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
// generates a valid format.json for XL backend.
|
||||
@@ -30,10 +35,10 @@ func genFormatXLValid() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -51,10 +56,10 @@ func genFormatXLInvalidVersion() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -75,10 +80,10 @@ func genFormatXLInvalidFormat() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -99,10 +104,10 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -116,8 +121,8 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
|
||||
|
||||
func genFormatFS() *formatConfigV1 {
|
||||
return &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "fs",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendFS,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,10 +135,10 @@ func genFormatXLInvalidJBODCount() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -151,10 +156,10 @@ func genFormatXLInvalidJBOD() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -178,10 +183,10 @@ func genFormatXLInvalidDisks() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -202,10 +207,10 @@ func genFormatXLInvalidDisksOrder() []*formatConfigV1 {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -223,7 +228,7 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
|
||||
var err error
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
err = obj.MakeBucket("bucket")
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
return []StorageAPI{}, err
|
||||
}
|
||||
@@ -240,10 +245,10 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
|
||||
// Remove the content of export dir 10 but preserve .minio.sys because it is automatically
|
||||
// created when minio starts
|
||||
for i := 3; i <= 5; i++ {
|
||||
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
return []StorageAPI{}, err
|
||||
}
|
||||
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "tmp"); err != nil {
|
||||
if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, "tmp"); err != nil {
|
||||
return []StorageAPI{}, err
|
||||
}
|
||||
if err = xl.storageDisks[i].DeleteFile(bucket, object+"/xl.json"); err != nil {
|
||||
@@ -346,7 +351,7 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
|
||||
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
err = obj.MakeBucket("bucket")
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -361,19 +366,19 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now, remove two format files.. Load them and reorder
|
||||
if err = xl.storageDisks[3].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[3].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = xl.storageDisks[11].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[11].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove the content of export dir 10 but preserve .minio.sys because it is automatically
|
||||
// created when minio starts
|
||||
if err = xl.storageDisks[10].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[10].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = xl.storageDisks[10].DeleteFile(".minio.sys", "tmp"); err != nil {
|
||||
if err = xl.storageDisks[10].DeleteFile(minioMetaBucket, "tmp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = xl.storageDisks[10].DeleteFile(bucket, object+"/xl.json"); err != nil {
|
||||
@@ -419,7 +424,7 @@ func TestFormatXLReorderByInspection(t *testing.T) {
|
||||
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
err = obj.MakeBucket("bucket")
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -434,10 +439,10 @@ func TestFormatXLReorderByInspection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now, remove two format files.. Load them and reorder
|
||||
if err = xl.storageDisks[3].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[3].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = xl.storageDisks[5].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[5].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -555,10 +560,10 @@ func TestSavedUUIDOrder(t *testing.T) {
|
||||
}
|
||||
for index := range jbod {
|
||||
formatConfigs[index] = &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "xl",
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendXL,
|
||||
XL: &xlFormat{
|
||||
Version: "1",
|
||||
Version: xlFormatBackendV1,
|
||||
Disk: jbod[index],
|
||||
JBOD: jbod,
|
||||
},
|
||||
@@ -682,6 +687,163 @@ func TestGenericFormatCheckXL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFSCheckFormatFSErr - test loadFormatFS loading older format.
|
||||
func TestFSCheckFormatFSErr(t *testing.T) {
|
||||
// Prepare for testing
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
// Assign a new UUID.
|
||||
uuid := mustGetUUID()
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
format *formatConfigV1
|
||||
formatWriteErr error
|
||||
formatCheckErr error
|
||||
shouldPass bool
|
||||
}{
|
||||
{
|
||||
format: &formatConfigV1{
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendFS,
|
||||
FS: &fsFormat{
|
||||
Version: fsFormatBackendV1,
|
||||
},
|
||||
},
|
||||
formatCheckErr: nil,
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
format: &formatConfigV1{
|
||||
Version: formatFileV1,
|
||||
Format: formatBackendFS,
|
||||
FS: &fsFormat{
|
||||
Version: "10",
|
||||
},
|
||||
},
|
||||
formatCheckErr: errors.New("Unknown backend FS format version '10'"),
|
||||
shouldPass: false,
|
||||
},
|
||||
{
|
||||
format: &formatConfigV1{
|
||||
Version: formatFileV1,
|
||||
Format: "garbage",
|
||||
FS: &fsFormat{
|
||||
Version: fsFormatBackendV1,
|
||||
},
|
||||
},
|
||||
formatCheckErr: errors.New("FS backend format required. Found 'garbage'"),
|
||||
},
|
||||
{
|
||||
format: &formatConfigV1{
|
||||
Version: "-1",
|
||||
Format: formatBackendFS,
|
||||
FS: &fsFormat{
|
||||
Version: fsFormatBackendV1,
|
||||
},
|
||||
},
|
||||
formatCheckErr: errors.New("Unknown format file version '-1'"),
|
||||
},
|
||||
}
|
||||
|
||||
fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile)
|
||||
for i, testCase := range testCases {
|
||||
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = testCase.format.WriteTo(lk)
|
||||
lk.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Expected nil, got %s", i+1, err)
|
||||
}
|
||||
|
||||
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
formatCfg := &formatConfigV1{}
|
||||
_, err = formatCfg.ReadFrom(lk)
|
||||
lk.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = formatCfg.CheckFS()
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Should not fail with unexpected %s, expected nil", i+1, err)
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Should fail with expected %s, got nil", i+1, testCase.formatCheckErr)
|
||||
}
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if errorCause(err).Error() != testCase.formatCheckErr.Error() {
|
||||
t.Errorf("Test %d: Should fail with expected %s, got %s", i+1, testCase.formatCheckErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFSCheckFormatFS - test loadFormatFS with healty and faulty disks
|
||||
func TestFSCheckFormatFS(t *testing.T) {
|
||||
// Prepare for testing
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
// Assign a new UUID.
|
||||
uuid := mustGetUUID()
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile)
|
||||
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
format := newFSFormatV1()
|
||||
_, err = format.WriteTo(lk)
|
||||
lk.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Loading corrupted format file
|
||||
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
t.Fatal("Should not fail here", err)
|
||||
}
|
||||
file.Write([]byte{'b'})
|
||||
file.Close()
|
||||
|
||||
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
format = &formatConfigV1{}
|
||||
_, err = format.ReadFrom(lk)
|
||||
lk.Close()
|
||||
if err == nil {
|
||||
t.Fatal("Should return an error here")
|
||||
}
|
||||
|
||||
// Loading format file from disk not found.
|
||||
removeAll(disk)
|
||||
_, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal("Should return 'format.json' does not exist, but got", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFormatXLErrs(t *testing.T) {
|
||||
nDisks := 16
|
||||
fsDirs, err := getRandomDisks(nDisks)
|
||||
@@ -749,7 +911,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||
|
||||
// disks 0..10 returns unformatted disk
|
||||
for i := 0; i <= 10; i++ {
|
||||
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -873,7 +1035,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||
}
|
||||
xl = obj.(*xlObjects)
|
||||
for i := 0; i <= 15; i++ {
|
||||
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -894,7 +1056,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||
}
|
||||
xl = obj.(*xlObjects)
|
||||
for i := 0; i <= 15; i++ {
|
||||
if err = xl.storageDisks[i].AppendFile(".minio.sys", "format.json", []byte("corrupted data")); err != nil {
|
||||
if err = xl.storageDisks[i].AppendFile(minioMetaBucket, formatConfigFile, []byte("corrupted data")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -998,7 +1160,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||
}
|
||||
xl = obj.(*xlObjects)
|
||||
for i := 0; i <= 15; i++ {
|
||||
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil {
|
||||
if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,18 +123,27 @@ func fsMkdir(dirPath string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup if directory exists, returns directory
|
||||
// attributes upon success.
|
||||
func fsStatDir(statDir string) (os.FileInfo, error) {
|
||||
if statDir == "" {
|
||||
func fsStat(statLoc string) (os.FileInfo, error) {
|
||||
if statLoc == "" {
|
||||
return nil, traceError(errInvalidArgument)
|
||||
}
|
||||
if err := checkPathLength(statDir); err != nil {
|
||||
if err := checkPathLength(statLoc); err != nil {
|
||||
return nil, traceError(err)
|
||||
}
|
||||
fi, err := osStat(preparePath(statLoc))
|
||||
if err != nil {
|
||||
return nil, traceError(err)
|
||||
}
|
||||
|
||||
fi, err := osStat(preparePath(statDir))
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// Lookup if directory exists, returns directory
|
||||
// attributes upon success.
|
||||
func fsStatDir(statDir string) (os.FileInfo, error) {
|
||||
fi, err := fsStat(statDir)
|
||||
if err != nil {
|
||||
err = errorCause(err)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, traceError(errVolumeNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
@@ -152,16 +161,9 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
|
||||
|
||||
// Lookup if file exists, returns file attributes upon success
|
||||
func fsStatFile(statFile string) (os.FileInfo, error) {
|
||||
if statFile == "" {
|
||||
return nil, traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err := checkPathLength(statFile); err != nil {
|
||||
return nil, traceError(err)
|
||||
}
|
||||
|
||||
fi, err := osStat(preparePath(statFile))
|
||||
fi, err := fsStat(statFile)
|
||||
if err != nil {
|
||||
err = errorCause(err)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
@@ -174,7 +176,7 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
|
||||
return nil, traceError(err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, traceError(errFileNotFound)
|
||||
return nil, traceError(errFileAccessDenied)
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
@@ -230,7 +232,7 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
|
||||
|
||||
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
|
||||
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
|
||||
if filePath == "" || reader == nil || buf == nil {
|
||||
if filePath == "" || reader == nil {
|
||||
return 0, traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
@@ -263,11 +265,18 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
|
||||
}
|
||||
}
|
||||
|
||||
bytesWritten, err := io.CopyBuffer(writer, reader, buf)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
var bytesWritten int64
|
||||
if buf != nil {
|
||||
bytesWritten, err = io.CopyBuffer(writer, reader, buf)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
} else {
|
||||
bytesWritten, err = io.Copy(writer, reader)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
}
|
||||
|
||||
return bytesWritten, nil
|
||||
}
|
||||
|
||||
@@ -276,6 +285,12 @@ func fsRemoveUploadIDPath(basePath, uploadIDPath string) error {
|
||||
if basePath == "" || uploadIDPath == "" {
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
if err := checkPathLength(basePath); err != nil {
|
||||
return traceError(err)
|
||||
}
|
||||
if err := checkPathLength(uploadIDPath); err != nil {
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
// List all the entries in uploadID.
|
||||
entries, err := readDir(uploadIDPath)
|
||||
@@ -319,6 +334,26 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
|
||||
// Renames source path to destination path, creates all the
|
||||
// missing parents if they don't exist.
|
||||
func fsRenameFile(sourcePath, destPath string) error {
|
||||
if err := checkPathLength(sourcePath); err != nil {
|
||||
return traceError(err)
|
||||
}
|
||||
if err := checkPathLength(destPath); err != nil {
|
||||
return traceError(err)
|
||||
}
|
||||
// Verify if source path exists.
|
||||
if _, err := os.Stat(preparePath(sourcePath)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return traceError(errFileAccessDenied)
|
||||
} else if isSysErrPathNotFound(err) {
|
||||
return traceError(errFileNotFound)
|
||||
} else if isSysErrNotDir(err) {
|
||||
// File path cannot be verified since one of the parents is a file.
|
||||
return traceError(errFileAccessDenied)
|
||||
}
|
||||
return traceError(err)
|
||||
}
|
||||
if err := mkdirAll(pathutil.Dir(destPath), 0777); err != nil {
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,31 @@ import (
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
func TestFSRenameFile(t *testing.T) {
|
||||
// create posix test setup
|
||||
_, path, err := newPosixTestSetup()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create posix test setup, %s", err)
|
||||
}
|
||||
defer removeAll(path)
|
||||
|
||||
if err = fsMkdir(pathJoin(path, "testvolume1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNameTooLong {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSStats(t *testing.T) {
|
||||
// create posix test setup
|
||||
_, path, err := newPosixTestSetup()
|
||||
@@ -48,9 +73,8 @@ func TestFSStats(t *testing.T) {
|
||||
t.Fatalf("Unable to create volume, %s", err)
|
||||
}
|
||||
|
||||
var buf = make([]byte, 4096)
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// Seek back.
|
||||
@@ -60,7 +84,7 @@ func TestFSStats(t *testing.T) {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// Seek back.
|
||||
@@ -110,7 +134,7 @@ func TestFSStats(t *testing.T) {
|
||||
srcFSPath: path,
|
||||
srcVol: "success-vol",
|
||||
srcPath: "path",
|
||||
expectedErr: errFileNotFound,
|
||||
expectedErr: errFileAccessDenied,
|
||||
},
|
||||
// Test case - 6.
|
||||
// Test case with src path segment > 255.
|
||||
@@ -143,7 +167,8 @@ func TestFSStats(t *testing.T) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if testCase.srcPath != "" {
|
||||
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol,
|
||||
testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
} else {
|
||||
@@ -174,9 +199,8 @@ func TestFSCreateAndOpen(t *testing.T) {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
var buf = make([]byte, 4096)
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// Seek back.
|
||||
@@ -204,7 +228,7 @@ func TestFSCreateAndOpen(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, buf, reader.Size())
|
||||
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
|
||||
if errorCause(err) != testCase.expectedErr {
|
||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
@@ -297,15 +321,14 @@ func TestFSRemoves(t *testing.T) {
|
||||
t.Fatalf("Unable to create directory, %s", err)
|
||||
}
|
||||
|
||||
var buf = make([]byte, 4096)
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// Seek back.
|
||||
reader.Seek(0, 0)
|
||||
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// Seek back.
|
||||
@@ -417,9 +440,8 @@ func TestFSRemoveMeta(t *testing.T) {
|
||||
|
||||
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
||||
|
||||
var buf = make([]byte, 4096)
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(filePath, reader, buf, reader.Size()); err != nil {
|
||||
if _, err = fsCreateFile(filePath, reader, nil, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -24,15 +24,31 @@ import (
|
||||
pathutil "path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
// FS format, and object metadata.
|
||||
const (
|
||||
fsMetaJSONFile = "fs.json"
|
||||
fsFormatJSONFile = "format.json"
|
||||
// fs.json object metadata.
|
||||
fsMetaJSONFile = "fs.json"
|
||||
)
|
||||
|
||||
// FS metadata constants.
|
||||
const (
|
||||
// FS backend meta 1.0.0 version.
|
||||
fsMetaVersion100 = "1.0.0"
|
||||
|
||||
// FS backend meta 1.0.1 version.
|
||||
fsMetaVersion = "1.0.1"
|
||||
|
||||
// FS backend meta format.
|
||||
fsMetaFormat = "fs"
|
||||
|
||||
// Add more constants here.
|
||||
)
|
||||
|
||||
// A fsMetaV1 represents a metadata header mapping keys to sets of values.
|
||||
@@ -47,6 +63,19 @@ type fsMetaV1 struct {
|
||||
Parts []objectPartInfo `json:"parts,omitempty"`
|
||||
}
|
||||
|
||||
// IsValid - tells if the format is sane by validating the version
|
||||
// string and format style.
|
||||
func (m fsMetaV1) IsValid() bool {
|
||||
return isFSMetaValid(m.Version, m.Format)
|
||||
}
|
||||
|
||||
// Verifies if the backend format metadata is sane by validating
|
||||
// the version string and format style.
|
||||
func isFSMetaValid(version, format string) bool {
|
||||
return ((version == fsMetaVersion || version == fsMetaVersion100) &&
|
||||
format == fsMetaFormat)
|
||||
}
|
||||
|
||||
// Converts metadata to object info.
|
||||
func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo {
|
||||
if len(m.Meta) == 0 {
|
||||
@@ -75,17 +104,15 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
|
||||
objInfo.IsDir = fi.IsDir()
|
||||
}
|
||||
|
||||
objInfo.MD5Sum = m.Meta["md5Sum"]
|
||||
// Extract etag from metadata.
|
||||
objInfo.ETag = extractETag(m.Meta)
|
||||
objInfo.ContentType = m.Meta["content-type"]
|
||||
objInfo.ContentEncoding = m.Meta["content-encoding"]
|
||||
|
||||
// md5Sum has already been extracted into objInfo.MD5Sum. We
|
||||
// need to remove it from m.Meta to avoid it from appearing as
|
||||
// part of response headers. e.g, X-Minio-* or X-Amz-*.
|
||||
delete(m.Meta, "md5Sum")
|
||||
|
||||
// Save all the other userdefined API.
|
||||
objInfo.UserDefined = m.Meta
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of
|
||||
// response headers. e.g, X-Minio-* or X-Amz-*.
|
||||
objInfo.UserDefined = cleanMetaETag(m.Meta)
|
||||
|
||||
// Success..
|
||||
return objInfo
|
||||
@@ -204,6 +231,12 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||
// obtain format.
|
||||
m.Format = parseFSFormat(fsMetaBuf)
|
||||
|
||||
// Verify if the format is valid, return corrupted format
|
||||
// for unrecognized formats.
|
||||
if !isFSMetaValid(m.Version, m.Format) {
|
||||
return 0, traceError(errCorruptedFormat)
|
||||
}
|
||||
|
||||
// obtain metadata.
|
||||
m.Meta = parseFSMetaMap(fsMetaBuf)
|
||||
|
||||
@@ -217,17 +250,6 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||
return int64(len(fsMetaBuf)), nil
|
||||
}
|
||||
|
||||
// FS metadata constants.
|
||||
const (
|
||||
// FS backend meta version.
|
||||
fsMetaVersion = "1.0.0"
|
||||
|
||||
// FS backend meta format.
|
||||
fsMetaFormat = "fs"
|
||||
|
||||
// Add more constants here.
|
||||
)
|
||||
|
||||
// newFSMetaV1 - initializes new fsMetaV1.
|
||||
func newFSMetaV1() (fsMeta fsMetaV1) {
|
||||
fsMeta = fsMetaV1{}
|
||||
@@ -237,60 +259,97 @@ func newFSMetaV1() (fsMeta fsMetaV1) {
|
||||
return fsMeta
|
||||
}
|
||||
|
||||
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
|
||||
func newFSFormatV1() (format *formatConfigV1) {
|
||||
return &formatConfigV1{
|
||||
Version: "1",
|
||||
Format: "fs",
|
||||
FS: &fsFormat{
|
||||
Version: "1",
|
||||
},
|
||||
}
|
||||
}
|
||||
// Check if disk has already a valid format, holds a read lock and
|
||||
// upon success returns it to the caller to be closed.
|
||||
func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
|
||||
fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
|
||||
|
||||
// loads format.json from minioMetaBucket if it exists.
|
||||
func loadFormatFS(fsPath string) (*formatConfigV1, error) {
|
||||
rlk, err := lock.RLockedOpenFile(pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile))
|
||||
rlk, err := lock.RLockedOpenFile(preparePath(fsFormatPath))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errUnformattedDisk
|
||||
// If format.json not found then
|
||||
// its an unformatted disk.
|
||||
return nil, traceError(errUnformattedDisk)
|
||||
}
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
defer rlk.Close()
|
||||
|
||||
formatBytes, err := ioutil.ReadAll(rlk)
|
||||
if err != nil {
|
||||
var format = &formatConfigV1{}
|
||||
if err = format.LoadFormat(rlk.LockedFile); err != nil {
|
||||
rlk.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
format := &formatConfigV1{}
|
||||
if err = json.Unmarshal(formatBytes, format); err != nil {
|
||||
// Check format FS.
|
||||
if err = format.CheckFS(); err != nil {
|
||||
rlk.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return format, nil
|
||||
// Always return read lock here and should be closed by the caller.
|
||||
return rlk, traceError(err)
|
||||
}
|
||||
|
||||
// writes FS format (format.json) into minioMetaBucket.
|
||||
func saveFormatFS(formatPath string, fsFormat *formatConfigV1) error {
|
||||
metadataBytes, err := json.Marshal(fsFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Creates a new format.json if unformatted.
|
||||
func createFormatFS(fsPath string) error {
|
||||
fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
|
||||
|
||||
// fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory.
|
||||
lk, err := lock.LockedOpenFile(preparePath(formatPath), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
// Attempt a write lock on formatConfigFile `format.json`
|
||||
// file stored in minioMetaBucket(.minio.sys) directory.
|
||||
lk, err := lock.TryLockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
// Close the locked file upon return.
|
||||
defer lk.Close()
|
||||
|
||||
_, err = lk.Write(metadataBytes)
|
||||
// Success.
|
||||
// Load format on disk, checks if we are unformatted
|
||||
// writes the new format.json
|
||||
var format = &formatConfigV1{}
|
||||
err = format.LoadFormat(lk)
|
||||
if errorCause(err) == errUnformattedDisk {
|
||||
_, err = newFSFormat().WriteTo(lk)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
|
||||
// This loop validates format.json by holding a read lock and
|
||||
// proceeds if disk unformatted to hold non-blocking WriteLock
|
||||
// If for some reason non-blocking WriteLock fails and the error
|
||||
// is lock.ErrAlreadyLocked i.e some other process is holding a
|
||||
// lock we retry in the loop again.
|
||||
for {
|
||||
// Validate the `format.json` for expected values.
|
||||
rlk, err = checkLockedValidFormatFS(fsPath)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Holding a read lock ensures that any write lock operation
|
||||
// is blocked if attempted in-turn avoiding corruption on
|
||||
// the backend disk.
|
||||
return rlk, nil
|
||||
case errorCause(err) == errUnformattedDisk:
|
||||
if err = createFormatFS(fsPath); err != nil {
|
||||
// Existing write locks detected.
|
||||
if errorCause(err) == lock.ErrAlreadyLocked {
|
||||
// Lock already present, sleep and attempt again.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// Unexpected error, return.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Loop will continue to attempt a read-lock on `format.json`.
|
||||
default:
|
||||
// Unhandled error return.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return if the part info in uploadedParts and completeParts are same.
|
||||
func isPartsSame(uploadedParts []objectPartInfo, completeParts []completePart) bool {
|
||||
if len(uploadedParts) != len(completeParts) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
sha256sum := ""
|
||||
@@ -58,7 +58,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
// Construct the full path of fs.json
|
||||
fsPath := pathJoin("buckets", bucketName, objectName, "fs.json")
|
||||
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json")
|
||||
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
||||
|
||||
rlk, err := fs.rwPool.Open(fsPath)
|
||||
@@ -85,7 +85,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
sha256sum := ""
|
||||
@@ -95,7 +95,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
// Construct the full path of fs.json
|
||||
fsPath := pathJoin("buckets", bucketName, objectName, "fs.json")
|
||||
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json")
|
||||
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
||||
|
||||
rlk, err := fs.rwPool.Open(fsPath)
|
||||
@@ -110,10 +110,10 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
if fsMeta.Version != "1.0.0" {
|
||||
if fsMeta.Version != fsMetaVersion {
|
||||
t.Fatalf("Unexpected version %s", fsMeta.Version)
|
||||
}
|
||||
if fsMeta.Format != "fs" {
|
||||
if fsMeta.Format != fsMetaFormat {
|
||||
t.Fatalf("Unexpected format %s", fsMeta.Format)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -706,6 +706,11 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||
}
|
||||
|
||||
if _, err := fs.statBucketDir(bucket); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
@@ -866,7 +871,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
if len(fsMeta.Meta) == 0 {
|
||||
fsMeta.Meta = make(map[string]string)
|
||||
}
|
||||
fsMeta.Meta["md5Sum"] = s3MD5
|
||||
fsMeta.Meta["etag"] = s3MD5
|
||||
|
||||
// Write all the set metadata.
|
||||
if _, err = fsMeta.WriteTo(metaFile); err != nil {
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestFSWriteUploadJSON(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
_, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
@@ -60,7 +60,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
data := []byte("12345")
|
||||
dataLen := int64(len(data))
|
||||
|
||||
if err = obj.MakeBucket(bucketName); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
objectName := "object"
|
||||
data := []byte("12345")
|
||||
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
||||
objectName := "object"
|
||||
data := []byte("12345")
|
||||
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
|
||||
100
cmd/fs-v1.go
100
cmd/fs-v1.go
@@ -24,6 +24,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
@@ -41,6 +42,9 @@ type fsObjects struct {
|
||||
// temporary transactions.
|
||||
fsUUID string
|
||||
|
||||
// This value shouldn't be touched, once initialized.
|
||||
fsFormatRlk *lock.RLockedFile // Is a read lock on `format.json`.
|
||||
|
||||
// FS rw pool.
|
||||
rwPool *fsIOPool
|
||||
|
||||
@@ -108,24 +112,10 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
||||
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
||||
}
|
||||
|
||||
// Load `format.json`.
|
||||
format, err := loadFormatFS(fsPath)
|
||||
if err != nil && err != errUnformattedDisk {
|
||||
return nil, fmt.Errorf("Unable to load 'format.json', %s", err)
|
||||
}
|
||||
|
||||
// If the `format.json` doesn't exist create one.
|
||||
if err == errUnformattedDisk {
|
||||
fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)
|
||||
// Initialize format.json, if already exists overwrite it.
|
||||
if serr := saveFormatFS(fsFormatPath, newFSFormatV1()); serr != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize 'format.json', %s", serr)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate if we have the same format.
|
||||
if err == nil && format.Format != "fs" {
|
||||
return nil, fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format)
|
||||
// Initialize `format.json`, this function also returns.
|
||||
rlk, err := initFormatFS(fsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize fs objects.
|
||||
@@ -141,6 +131,12 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
||||
},
|
||||
}
|
||||
|
||||
// Once the filesystem has initialized hold the read lock for
|
||||
// the life time of the server. This is done to ensure that under
|
||||
// shared backend mode for FS, remote servers do not migrate
|
||||
// or cause changes on backend format.
|
||||
fs.fsFormatRlk = rlk
|
||||
|
||||
// Initialize and load bucket policies.
|
||||
err = initBucketPolicies(fs)
|
||||
if err != nil {
|
||||
@@ -204,7 +200,7 @@ func (fs fsObjects) statBucketDir(bucket string) (os.FileInfo, error) {
|
||||
|
||||
// MakeBucket - create a new bucket, returns if it
|
||||
// already exists.
|
||||
func (fs fsObjects) MakeBucket(bucket string) error {
|
||||
func (fs fsObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
bucketDir, err := fs.getBucketDir(bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
@@ -471,12 +467,6 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
|
||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
// This is a special case with object whose name ends with
|
||||
// a slash separator, we always return object not found here.
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
return ObjectInfo{}, toObjectErr(traceError(errFileNotFound), bucket, object)
|
||||
}
|
||||
|
||||
if err := checkGetObjArgs(bucket, object); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@@ -488,6 +478,25 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
return fs.getObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// This function does the following check, suppose
|
||||
// object is "a/b/c/d", stat makes sure that objects ""a/b/c""
|
||||
// "a/b" and "a" do not exist.
|
||||
func (fs fsObjects) parentDirIsObject(bucket, parent string) bool {
|
||||
var isParentDirObject func(string) bool
|
||||
isParentDirObject = func(p string) bool {
|
||||
if p == "." {
|
||||
return false
|
||||
}
|
||||
if _, err := fsStatFile(pathJoin(fs.fsPath, bucket, p)); err == nil {
|
||||
// If there is already a file at prefix "p" return error.
|
||||
return true
|
||||
}
|
||||
// Check if there is a file as one of the parent paths.
|
||||
return isParentDirObject(path.Dir(p))
|
||||
}
|
||||
return isParentDirObject(parent)
|
||||
}
|
||||
|
||||
// PutObject - creates an object upon reading from the input stream
|
||||
// until EOF, writes data directly to configured filesystem path.
|
||||
// Additionally writes `fs.json` which carries the necessary metadata
|
||||
@@ -495,18 +504,29 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, retErr error) {
|
||||
var err error
|
||||
|
||||
// This is a special case with size as '0' and object ends with
|
||||
// a slash separator, we treat it like a valid operation and
|
||||
// return success.
|
||||
// Validate if bucket name is valid and exists.
|
||||
if _, err = fs.statBucketDir(bucket); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// This is a special case with size as '0' and object ends
|
||||
// with a slash separator, we treat it like a valid operation
|
||||
// and return success.
|
||||
if isObjectDir(object, size) {
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||
}
|
||||
return dirObjectInfo(bucket, object, size, metadata), nil
|
||||
}
|
||||
|
||||
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
if _, err = fs.statBucketDir(bucket); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket)
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||
}
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
@@ -592,12 +612,12 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
// Update the md5sum if not set with the newly calculated one.
|
||||
if len(metadata["md5Sum"]) == 0 {
|
||||
metadata["md5Sum"] = newMD5Hex
|
||||
if len(metadata["etag"]) == 0 {
|
||||
metadata["etag"] = newMD5Hex
|
||||
}
|
||||
|
||||
// md5Hex representation.
|
||||
md5Hex := metadata["md5Sum"]
|
||||
md5Hex := metadata["etag"]
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
// Returns md5 mismatch.
|
||||
@@ -729,8 +749,12 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
fsMetaMap := parseFSMetaMap(fsMetaBuf)
|
||||
return fsMetaMap["md5Sum"], nil
|
||||
// Check if FS metadata is valid, if not return error.
|
||||
if !isFSMetaValid(parseFSVersion(fsMetaBuf), parseFSFormat(fsMetaBuf)) {
|
||||
return "", toObjectErr(traceError(errCorruptedFormat), bucket, entry)
|
||||
}
|
||||
|
||||
return extractETag(parseFSMetaMap(fsMetaBuf)), nil
|
||||
}
|
||||
|
||||
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
|
||||
@@ -781,8 +805,8 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
// Protect reading `fs.json`.
|
||||
objectLock := globalNSMutex.NewNSLock(bucket, entry)
|
||||
objectLock.RLock()
|
||||
var md5Sum string
|
||||
md5Sum, err = fs.getObjectETag(bucket, entry)
|
||||
var etag string
|
||||
etag, err = fs.getObjectETag(bucket, entry)
|
||||
objectLock.RUnlock()
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
@@ -802,7 +826,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
Size: fi.Size(),
|
||||
ModTime: fi.ModTime(),
|
||||
IsDir: fi.IsDir(),
|
||||
MD5Sum: md5Sum,
|
||||
ETag: etag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestFSShutdown(t *testing.T) {
|
||||
obj := initFSObjects(disk, t)
|
||||
fs := obj.(*fsObjects)
|
||||
objectContent := "12345"
|
||||
obj.MakeBucket(bucketName)
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
sha256sum := ""
|
||||
obj.PutObject(bucketName, objectName, int64(len(objectContent)), bytes.NewReader([]byte(objectContent)), nil, sha256sum)
|
||||
return fs, disk
|
||||
@@ -85,47 +85,6 @@ func TestFSShutdown(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFSLoadFormatFS - test loadFormatFS with healty and faulty disks
|
||||
func TestFSLoadFormatFS(t *testing.T) {
|
||||
// Prepare for testing
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
// Assign a new UUID.
|
||||
uuid := mustGetUUID()
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||
if err := saveFormatFS(preparePath(fsFormatPath), newFSFormatV1()); err != nil {
|
||||
t.Fatal("Should not fail here", err)
|
||||
}
|
||||
_, err := loadFormatFS(disk)
|
||||
if err != nil {
|
||||
t.Fatal("Should not fail here", err)
|
||||
}
|
||||
// Loading corrupted format file
|
||||
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
t.Fatal("Should not fail here", err)
|
||||
}
|
||||
file.Write([]byte{'b'})
|
||||
file.Close()
|
||||
_, err = loadFormatFS(disk)
|
||||
if err == nil {
|
||||
t.Fatal("Should return an error here")
|
||||
}
|
||||
// Loading format file from disk not found.
|
||||
removeAll(disk)
|
||||
_, err = loadFormatFS(disk)
|
||||
if err != nil && err != errUnformattedDisk {
|
||||
t.Fatal("Should return unformatted disk, but got", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks
|
||||
func TestFSGetBucketInfo(t *testing.T) {
|
||||
// Prepare for testing
|
||||
@@ -136,7 +95,7 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||
fs := obj.(*fsObjects)
|
||||
bucketName := "bucket"
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
|
||||
// Test with valid parameters
|
||||
info, err := fs.GetBucketInfo(bucketName)
|
||||
@@ -161,6 +120,74 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSPutObject(t *testing.T) {
|
||||
// Prepare for tests
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
obj := initFSObjects(disk, t)
|
||||
bucketName := "bucket"
|
||||
objectName := "1/2/3/4/object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sha256sum := ""
|
||||
|
||||
// With a regular object.
|
||||
_, err := obj.PutObject(bucketName+"non-existent", objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
if _, ok := errorCause(err).(BucketNotFound); !ok {
|
||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||
}
|
||||
|
||||
// With a directory object.
|
||||
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", int64(0), bytes.NewReader([]byte("")), nil, sha256sum)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
if _, ok := errorCause(err).(BucketNotFound); !ok {
|
||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||
}
|
||||
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok {
|
||||
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
|
||||
} else {
|
||||
if nerr.Bucket != "bucket" {
|
||||
t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
|
||||
}
|
||||
if nerr.Object != "1/2/3/4/object/1" {
|
||||
t.Fatalf("Expected '1/2/3/4/object/1', got %s", nerr.Object)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1/", 0, bytes.NewReader([]byte("")), nil, sha256sum)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||
}
|
||||
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok {
|
||||
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
|
||||
} else {
|
||||
if nerr.Bucket != "bucket" {
|
||||
t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
|
||||
}
|
||||
if nerr.Object != "1/2/3/4/object/1/" {
|
||||
t.Fatalf("Expected '1/2/3/4/object/1/', got %s", nerr.Object)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks
|
||||
func TestFSDeleteObject(t *testing.T) {
|
||||
// Prepare for tests
|
||||
@@ -172,7 +199,7 @@ func TestFSDeleteObject(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
sha256sum := ""
|
||||
obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
||||
|
||||
@@ -217,7 +244,7 @@ func TestFSDeleteBucket(t *testing.T) {
|
||||
fs := obj.(*fsObjects)
|
||||
bucketName := "bucket"
|
||||
|
||||
err := obj.MakeBucket(bucketName)
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@@ -235,7 +262,7 @@ func TestFSDeleteBucket(t *testing.T) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
|
||||
// Delete bucker should get error disk not found.
|
||||
removeAll(disk)
|
||||
@@ -256,7 +283,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
fs := obj.(*fsObjects)
|
||||
|
||||
bucketName := "bucket"
|
||||
if err := obj.MakeBucket(bucketName); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
|
||||
func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
|
||||
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
|
||||
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(traceError(err))
|
||||
@@ -40,7 +40,7 @@ func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, e
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
|
||||
@@ -57,9 +57,16 @@ func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, e
|
||||
return bucketInfo, nil
|
||||
}
|
||||
|
||||
// AnonPutObject - SendPUT request without authentication.
|
||||
// This is needed when clients send PUT requests on objects that can be uploaded without auth.
|
||||
func (a *azureObjects) AnonPutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
||||
// azure doesn't support anonymous put
|
||||
return ObjectInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonGetObject - SendGET request without authentication.
|
||||
// This is needed when clients send GET requests on objects that can be downloaded without auth.
|
||||
func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
|
||||
func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
|
||||
u := a.client.GetBlobURL(bucket, object)
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
@@ -88,12 +95,12 @@ func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, le
|
||||
|
||||
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
|
||||
// result to ObjectInfo.
|
||||
func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
resp, err := http.Head(a.client.GetBlobURL(bucket, object))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
@@ -120,7 +127,7 @@ func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectIn
|
||||
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
|
||||
objInfo.MD5Sum = resp.Header.Get("Etag")
|
||||
objInfo.ETag = resp.Header.Get("Etag")
|
||||
objInfo.ModTime = t
|
||||
objInfo.Name = object
|
||||
objInfo.Size = contentLength
|
||||
@@ -128,7 +135,7 @@ func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectIn
|
||||
}
|
||||
|
||||
// AnonListObjects - Use Azure equivalent ListBlobs.
|
||||
func (a AzureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
params := storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
@@ -175,7 +182,7 @@ func (a AzureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
|
||||
Name: object.Name,
|
||||
ModTime: t,
|
||||
Size: object.Properties.ContentLength,
|
||||
MD5Sum: object.Properties.Etag,
|
||||
ETag: object.Properties.Etag,
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
|
||||
@@ -17,27 +17,27 @@
|
||||
package cmd
|
||||
|
||||
// HealBucket - Not relevant.
|
||||
func (a AzureObjects) HealBucket(bucket string) error {
|
||||
func (a *azureObjects) HealBucket(bucket string) error {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListBucketsHeal - Not relevant.
|
||||
func (a AzureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) {
|
||||
func (a *azureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) {
|
||||
return nil, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// HealObject - Not relevant.
|
||||
func (a AzureObjects) HealObject(bucket, object string) (int, int, error) {
|
||||
func (a *azureObjects) HealObject(bucket, object string) (int, int, error) {
|
||||
return 0, 0, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListObjectsHeal - Not relevant.
|
||||
func (a AzureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
return ListObjectsInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListUploadsHeal - Not relevant.
|
||||
func (a AzureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
||||
func (a *azureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
||||
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
return ListMultipartsInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -36,6 +37,36 @@ import (
|
||||
|
||||
const globalAzureAPIVersion = "2016-05-31"
|
||||
|
||||
// Canonicalize the metadata headers, without this azure-sdk calculates
|
||||
// incorrect signature. This attempt to canonicalize is to convert
|
||||
// any HTTP header which is of form say `accept-encoding` should be
|
||||
// converted to `Accept-Encoding` in its canonical form.
|
||||
// Also replaces X-Amz-Meta prefix with X-Ms-Meta as Azure expects user
|
||||
// defined metadata to have X-Ms-Meta prefix.
|
||||
func s3ToAzureHeaders(headers map[string]string) (newHeaders map[string]string) {
|
||||
newHeaders = make(map[string]string)
|
||||
for k, v := range headers {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if strings.HasPrefix(k, "X-Amz-Meta") {
|
||||
k = strings.Replace(k, "X-Amz-Meta", "X-Ms-Meta", -1)
|
||||
}
|
||||
newHeaders[k] = v
|
||||
}
|
||||
return newHeaders
|
||||
}
|
||||
|
||||
// Prefix user metadata with "X-Amz-Meta-".
|
||||
// client.GetBlobMetadata() already strips "X-Ms-Meta-"
|
||||
func azureToS3Metadata(meta map[string]string) (newMeta map[string]string) {
|
||||
newMeta = make(map[string]string)
|
||||
|
||||
for k, v := range meta {
|
||||
k = "X-Amz-Meta-" + k
|
||||
newMeta[k] = v
|
||||
}
|
||||
return newMeta
|
||||
}
|
||||
|
||||
// To store metadata during NewMultipartUpload which will be used after
|
||||
// CompleteMultipartUpload to call SetBlobMetadata.
|
||||
type azureMultipartMetaInfo struct {
|
||||
@@ -64,8 +95,8 @@ func (a *azureMultipartMetaInfo) del(key string) {
|
||||
delete(a.meta, key)
|
||||
}
|
||||
|
||||
// AzureObjects - Implements Object layer for Azure blob storage.
|
||||
type AzureObjects struct {
|
||||
// azureObjects - Implements Object layer for Azure blob storage.
|
||||
type azureObjects struct {
|
||||
client storage.BlobStorageClient // Azure sdk client
|
||||
metaInfo azureMultipartMetaInfo
|
||||
}
|
||||
@@ -122,16 +153,16 @@ func azureToObjectError(err error, params ...string) error {
|
||||
return e
|
||||
}
|
||||
|
||||
// Inits azure blob storage client and returns AzureObjects.
|
||||
// Inits azure blob storage client and returns azureObjects.
|
||||
func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) {
|
||||
if endPoint == "" {
|
||||
endPoint = storage.DefaultBaseURL
|
||||
}
|
||||
c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure)
|
||||
if err != nil {
|
||||
return AzureObjects{}, err
|
||||
return &azureObjects{}, err
|
||||
}
|
||||
return &AzureObjects{
|
||||
return &azureObjects{
|
||||
client: c.GetBlobService(),
|
||||
metaInfo: azureMultipartMetaInfo{
|
||||
meta: make(map[string]map[string]string),
|
||||
@@ -142,30 +173,24 @@ func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLa
|
||||
|
||||
// Shutdown - save any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (a AzureObjects) Shutdown() error {
|
||||
func (a *azureObjects) Shutdown() error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to Azure backend.
|
||||
func (a AzureObjects) StorageInfo() StorageInfo {
|
||||
func (a *azureObjects) StorageInfo() StorageInfo {
|
||||
return StorageInfo{}
|
||||
}
|
||||
|
||||
// MakeBucket - Create a new container on azure backend.
|
||||
func (a AzureObjects) MakeBucket(bucket string) error {
|
||||
// will never be called, only satisfy ObjectLayer interface
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// MakeBucketWithLocation - Create a new container on azure backend.
|
||||
func (a AzureObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
|
||||
return azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
|
||||
// GetBucketInfo - Get bucket metadata..
|
||||
func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
// Azure does not have an equivalent call, hence use ListContainers.
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{
|
||||
Prefix: bucket,
|
||||
@@ -188,7 +213,7 @@ func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
}
|
||||
|
||||
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
|
||||
func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
||||
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(traceError(err))
|
||||
@@ -207,13 +232,13 @@ func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
||||
}
|
||||
|
||||
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
|
||||
func (a AzureObjects) DeleteBucket(bucket string) error {
|
||||
func (a *azureObjects) DeleteBucket(bucket string) error {
|
||||
return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket)
|
||||
}
|
||||
|
||||
// ListObjects - lists all blobs on azure with in a container filtered by prefix
|
||||
// and marker, uses Azure equivalent ListBlobs.
|
||||
func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
@@ -235,7 +260,7 @@ func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxK
|
||||
Name: object.Name,
|
||||
ModTime: t,
|
||||
Size: object.Properties.ContentLength,
|
||||
MD5Sum: canonicalizeETag(object.Properties.Etag),
|
||||
ETag: canonicalizeETag(object.Properties.Etag),
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
@@ -250,7 +275,7 @@ func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxK
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
||||
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
||||
byteRange := fmt.Sprintf("%d-", startOffset)
|
||||
if length > 0 && startOffset > 0 {
|
||||
byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1)
|
||||
@@ -273,7 +298,14 @@ func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length
|
||||
|
||||
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
|
||||
// uses zure equivalent GetBlobProperties.
|
||||
func (a AzureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
blobMeta, err := a.client.GetBlobMetadata(bucket, object)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
meta := azureToS3Metadata(blobMeta)
|
||||
|
||||
prop, err := a.client.GetBlobProperties(bucket, object)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
@@ -282,55 +314,69 @@ func (a AzureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
|
||||
if err != nil {
|
||||
return objInfo, traceError(err)
|
||||
}
|
||||
|
||||
if prop.ContentEncoding != "" {
|
||||
meta["Content-Encoding"] = prop.ContentEncoding
|
||||
}
|
||||
meta["Content-Type"] = prop.ContentType
|
||||
|
||||
objInfo = ObjectInfo{
|
||||
Bucket: bucket,
|
||||
UserDefined: make(map[string]string),
|
||||
MD5Sum: canonicalizeETag(prop.Etag),
|
||||
UserDefined: meta,
|
||||
ETag: canonicalizeETag(prop.Etag),
|
||||
ModTime: t,
|
||||
Name: object,
|
||||
Size: prop.ContentLength,
|
||||
}
|
||||
if prop.ContentEncoding != "" {
|
||||
objInfo.UserDefined["Content-Encoding"] = prop.ContentEncoding
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = prop.ContentType
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// Canonicalize the metadata headers, without this azure-sdk calculates
|
||||
// incorrect signature. This attempt to canonicalize is to convert
|
||||
// any HTTP header which is of form say `accept-encoding` should be
|
||||
// converted to `Accept-Encoding` in its canonical form.
|
||||
func canonicalMetadata(metadata map[string]string) (canonical map[string]string) {
|
||||
canonical = make(map[string]string)
|
||||
for k, v := range metadata {
|
||||
canonical[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
return canonical
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// PutObject - Create a new blob with the incoming data,
|
||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||
func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
||||
func (a *azureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
||||
var sha256Writer hash.Hash
|
||||
var md5sumWriter hash.Hash
|
||||
|
||||
var writers []io.Writer
|
||||
|
||||
md5sum := metadata["etag"]
|
||||
delete(metadata, "etag")
|
||||
|
||||
teeReader := data
|
||||
|
||||
if sha256sum != "" {
|
||||
sha256Writer = sha256.New()
|
||||
teeReader = io.TeeReader(data, sha256Writer)
|
||||
writers = append(writers, sha256Writer)
|
||||
}
|
||||
|
||||
delete(metadata, "md5Sum")
|
||||
if md5sum != "" {
|
||||
md5sumWriter = md5.New()
|
||||
writers = append(writers, md5sumWriter)
|
||||
}
|
||||
|
||||
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, canonicalMetadata(metadata))
|
||||
if len(writers) > 0 {
|
||||
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
||||
}
|
||||
|
||||
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, s3ToAzureHeaders(metadata))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
if md5sum != "" {
|
||||
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
||||
if newMD5sum != md5sum {
|
||||
a.client.DeleteBlob(bucket, object, nil)
|
||||
return ObjectInfo{}, azureToObjectError(traceError(BadDigest{md5sum, newMD5sum}))
|
||||
}
|
||||
}
|
||||
|
||||
if sha256sum != "" {
|
||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
||||
if newSHA256sum != sha256sum {
|
||||
a.client.DeleteBlob(bucket, object, nil)
|
||||
return ObjectInfo{}, traceError(SHA256Mismatch{})
|
||||
return ObjectInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,7 +385,7 @@ func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reade
|
||||
|
||||
// CopyObject - Copies a blob from source container to destination container.
|
||||
// Uses Azure equivalent CopyBlob API.
|
||||
func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
|
||||
@@ -349,7 +395,7 @@ func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject st
|
||||
|
||||
// DeleteObject - Deletes a blob on azure container, uses Azure
|
||||
// equivalent DeleteBlob API.
|
||||
func (a AzureObjects) DeleteObject(bucket, object string) error {
|
||||
func (a *azureObjects) DeleteObject(bucket, object string) error {
|
||||
err := a.client.DeleteBlob(bucket, object, nil)
|
||||
if err != nil {
|
||||
return azureToObjectError(traceError(err), bucket, object)
|
||||
@@ -361,7 +407,7 @@ func (a AzureObjects) DeleteObject(bucket, object string) error {
|
||||
// FIXME: Full ListMultipartUploads is not supported yet. It is supported just enough to help our client libs to
|
||||
// support re-uploads. a.client.ListBlobs() can be made to return entries which include uncommitted blobs using
|
||||
// which we need to filter out the committed blobs to get the list of uncommitted blobs.
|
||||
func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
result.MaxUploads = maxUploads
|
||||
result.Prefix = prefix
|
||||
result.Delimiter = delimiter
|
||||
@@ -377,7 +423,7 @@ func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMa
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
||||
func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
// Azure doesn't return a unique upload ID and we use object name in place of it. Azure allows multiple uploads to
|
||||
// co-exist as long as the user keeps the blocks uploaded (in block blobs) unique amongst concurrent upload attempts.
|
||||
// Each concurrent client, keeps its own blockID list which it can commit.
|
||||
@@ -386,14 +432,14 @@ func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[str
|
||||
// Store an empty map as a placeholder else ListObjectParts/PutObjectPart will not work properly.
|
||||
metadata = make(map[string]string)
|
||||
} else {
|
||||
metadata = canonicalMetadata(metadata)
|
||||
metadata = s3ToAzureHeaders(metadata)
|
||||
}
|
||||
a.metaInfo.set(uploadID, metadata)
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// CopyObjectPart - Not implemented.
|
||||
func (a AzureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
||||
func (a *azureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
||||
return info, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
@@ -421,39 +467,66 @@ func azureParseBlockID(blockID string) (int, string, error) {
|
||||
}
|
||||
|
||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||
func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
|
||||
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
|
||||
if meta := a.metaInfo.get(uploadID); meta == nil {
|
||||
return info, traceError(InvalidUploadID{})
|
||||
}
|
||||
var sha256Writer hash.Hash
|
||||
var md5sumWriter hash.Hash
|
||||
var etag string
|
||||
|
||||
var writers []io.Writer
|
||||
|
||||
if sha256sum != "" {
|
||||
sha256Writer = sha256.New()
|
||||
writers = append(writers, sha256Writer)
|
||||
}
|
||||
|
||||
teeReader := io.TeeReader(data, sha256Writer)
|
||||
if md5Hex != "" {
|
||||
md5sumWriter = md5.New()
|
||||
writers = append(writers, md5sumWriter)
|
||||
etag = md5Hex
|
||||
} else {
|
||||
// Generate random ETag.
|
||||
etag = getMD5Hash([]byte(mustGetUUID()))
|
||||
}
|
||||
|
||||
id := azureGetBlockID(partID, md5Hex)
|
||||
teeReader := data
|
||||
|
||||
if len(writers) > 0 {
|
||||
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
||||
}
|
||||
|
||||
id := azureGetBlockID(partID, etag)
|
||||
err = a.client.PutBlockWithLength(bucket, object, id, uint64(size), teeReader, nil)
|
||||
if err != nil {
|
||||
return info, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
if md5Hex != "" {
|
||||
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
||||
if newMD5sum != md5Hex {
|
||||
a.client.DeleteBlob(bucket, object, nil)
|
||||
return PartInfo{}, azureToObjectError(traceError(BadDigest{md5Hex, newMD5sum}))
|
||||
}
|
||||
}
|
||||
|
||||
if sha256sum != "" {
|
||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
||||
if newSHA256sum != sha256sum {
|
||||
return PartInfo{}, traceError(SHA256Mismatch{})
|
||||
return PartInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
||||
}
|
||||
}
|
||||
|
||||
info.PartNumber = partID
|
||||
info.ETag = md5Hex
|
||||
info.ETag = etag
|
||||
info.LastModified = UTCNow()
|
||||
info.Size = size
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - Use Azure equivalent GetBlockList.
|
||||
func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
result.Bucket = bucket
|
||||
result.Object = object
|
||||
result.UploadID = uploadID
|
||||
@@ -502,13 +575,13 @@ func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumbe
|
||||
// AbortMultipartUpload - Not Implemented.
|
||||
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
|
||||
// gets deleted after one week.
|
||||
func (a AzureObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
a.metaInfo.del(uploadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
||||
func (a AzureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
|
||||
func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
|
||||
meta := a.metaInfo.get(uploadID)
|
||||
if meta == nil {
|
||||
return objInfo, traceError(InvalidUploadID{uploadID})
|
||||
@@ -536,6 +609,10 @@ func (a AzureObjects) CompleteMultipartUpload(bucket, object, uploadID string, u
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
err = a.client.SetBlobMetadata(bucket, object, nil, meta)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
}
|
||||
a.metaInfo.del(uploadID)
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
@@ -598,7 +675,7 @@ func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
|
||||
// storage.ContainerAccessTypePrivate - none in minio terminology
|
||||
// As the common denominator for minio and azure is readonly and none, we support
|
||||
// these two policies at the bucket level.
|
||||
func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
var policies []BucketAccessPolicy
|
||||
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
||||
@@ -626,7 +703,7 @@ func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketA
|
||||
}
|
||||
|
||||
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
||||
func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
||||
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
|
||||
if err != nil {
|
||||
@@ -644,7 +721,7 @@ func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolic
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies - Set the container ACL to "private"
|
||||
func (a AzureObjects) DeleteBucketPolicies(bucket string) error {
|
||||
func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
|
||||
perm := storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypePrivate,
|
||||
AccessPolicies: nil,
|
||||
|
||||
@@ -25,18 +25,33 @@ import (
|
||||
)
|
||||
|
||||
// Test canonical metadata.
|
||||
func TestCanonicalMetadata(t *testing.T) {
|
||||
metadata := map[string]string{
|
||||
func TestS3ToAzureHeaders(t *testing.T) {
|
||||
headers := map[string]string{
|
||||
"accept-encoding": "gzip",
|
||||
"content-encoding": "gzip",
|
||||
}
|
||||
expectedCanonicalM := map[string]string{
|
||||
expectedHeaders := map[string]string{
|
||||
"Accept-Encoding": "gzip",
|
||||
"Content-Encoding": "gzip",
|
||||
}
|
||||
actualCanonicalM := canonicalMetadata(metadata)
|
||||
if !reflect.DeepEqual(actualCanonicalM, expectedCanonicalM) {
|
||||
t.Fatalf("Test failed, expected %#v, got %#v", expectedCanonicalM, actualCanonicalM)
|
||||
actualHeaders := s3ToAzureHeaders(headers)
|
||||
if !reflect.DeepEqual(actualHeaders, expectedHeaders) {
|
||||
t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, actualHeaders)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAzureToS3Metadata(t *testing.T) {
|
||||
// Just one testcase. Adding more test cases does not add value to the testcase
|
||||
// as azureToS3Metadata() just adds a prefix.
|
||||
metadata := map[string]string{
|
||||
"First-Name": "myname",
|
||||
}
|
||||
expectedMeta := map[string]string{
|
||||
"X-Amz-Meta-First-Name": "myname",
|
||||
}
|
||||
actualMeta := azureToS3Metadata(metadata)
|
||||
if !reflect.DeepEqual(actualMeta, expectedMeta) {
|
||||
t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, actualMeta)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
|
||||
@@ -52,17 +54,23 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
@@ -151,6 +159,138 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
// PutObjectHandler - PUT Object
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// X-Amz-Copy-Source shouldn't be set for this call.
|
||||
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
||||
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
// Get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to validate content-md5 format.")
|
||||
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
size := r.ContentLength
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
if reqAuthType == authTypeStreamingSigned {
|
||||
sizeStr := r.Header.Get("x-amz-decoded-content-length")
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse `x-amz-decoded-content-length` into its integer value", sizeStr)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if size == -1 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract metadata to be saved from incoming HTTP header.
|
||||
metadata := extractMetadataFromHeader(r.Header)
|
||||
if reqAuthType == authTypeStreamingSigned {
|
||||
if contentEncoding, ok := metadata["content-encoding"]; ok {
|
||||
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
|
||||
if contentEncoding != "" {
|
||||
// Make sure to trim and save the content-encoding
|
||||
// parameter for a streaming signature which is set
|
||||
// to a custom value for example: "aws-chunked,gzip".
|
||||
metadata["content-encoding"] = contentEncoding
|
||||
} else {
|
||||
// Trimmed content encoding is empty when the header
|
||||
// value is set to "aws-chunked" only.
|
||||
|
||||
// Make sure to delete the content-encoding parameter
|
||||
// for a streaming signature which is set to value
|
||||
// for example: "aws-chunked"
|
||||
delete(metadata, "content-encoding")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we hex encode md5sum here.
|
||||
metadata["etag"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
sha256sum := ""
|
||||
|
||||
// Lock the object.
|
||||
objectLock := globalNSMutex.NewNSLock(bucket, object)
|
||||
objectLock.Lock()
|
||||
defer objectLock.Unlock()
|
||||
|
||||
var objInfo ObjectInfo
|
||||
switch reqAuthType {
|
||||
case authTypeAnonymous:
|
||||
// Create anonymous object.
|
||||
objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum)
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||
}
|
||||
// Create object.
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
@@ -173,17 +313,23 @@ func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.R
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
@@ -581,17 +727,23 @@ func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *htt
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
@@ -645,17 +797,23 @@ func (api gatewayAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.R
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
@@ -691,7 +849,7 @@ func (api gatewayAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -702,10 +860,16 @@ func (api gatewayAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r
|
||||
s3Error = isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
}
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
|
||||
@@ -21,52 +21,49 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/cli"
|
||||
)
|
||||
|
||||
var gatewayTemplate = `NAME:
|
||||
const azureGatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} BACKEND [ENDPOINT]
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
BACKEND:
|
||||
azure: Microsoft Azure Blob Storage. Default ENDPOINT is https://core.windows.net
|
||||
s3: Amazon Simple Storage Service (S3). Default ENDPOINT is https://s3.amazonaws.com
|
||||
ENDPOINT:
|
||||
Azure server endpoint. Default ENDPOINT is https://core.windows.net
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of your storage backend.
|
||||
MINIO_SECRET_KEY: Password or secret key of your storage backend.
|
||||
MINIO_ACCESS_KEY: Username or access key of Azure storage.
|
||||
MINIO_SECRET_KEY: Password or secret key of Azure storage.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Azure Blob Storage backend.
|
||||
$ export MINIO_ACCESS_KEY=azureaccountname
|
||||
$ export MINIO_SECRET_KEY=azureaccountkey
|
||||
$ {{.HelpName}} azure
|
||||
|
||||
2. Start minio gateway server for AWS S3 backend.
|
||||
$ export MINIO_ACCESS_KEY=accesskey
|
||||
$ export MINIO_SECRET_KEY=secretkey
|
||||
$ {{.HelpName}} s3
|
||||
|
||||
3. Start minio gateway server for S3 backend on custom endpoint.
|
||||
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
$ {{.HelpName}} s3 https://play.minio.io:9000
|
||||
$ {{.HelpName}}
|
||||
2. Start minio gateway server for Azure Blob Storage backend on custom endpoint.
|
||||
$ export MINIO_ACCESS_KEY=azureaccountname
|
||||
$ export MINIO_SECRET_KEY=azureaccountkey
|
||||
$ {{.HelpName}} https://azure.example.com
|
||||
`
|
||||
|
||||
var gatewayCmd = cli.Command{
|
||||
Name: "gateway",
|
||||
Usage: "Start object storage gateway.",
|
||||
Action: gatewayMain,
|
||||
CustomHelpTemplate: gatewayTemplate,
|
||||
var azureBackendCmd = cli.Command{
|
||||
Name: "azure",
|
||||
Usage: "Microsoft Azure Blob Storage.",
|
||||
Action: azureGatewayMain,
|
||||
CustomHelpTemplate: azureGatewayTemplate,
|
||||
Flags: append(serverFlags,
|
||||
cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
@@ -76,6 +73,59 @@ var gatewayCmd = cli.Command{
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
|
||||
const s3GatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENDPOINT:
|
||||
S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of S3 storage.
|
||||
MINIO_SECRET_KEY: Password or secret key of S3 storage.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for AWS S3 backend.
|
||||
$ export MINIO_ACCESS_KEY=accesskey
|
||||
$ export MINIO_SECRET_KEY=secretkey
|
||||
$ {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for S3 backend on custom endpoint.
|
||||
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
$ {{.HelpName}} https://play.minio.io:9000
|
||||
`
|
||||
|
||||
var s3BackendCmd = cli.Command{
|
||||
Name: "s3",
|
||||
Usage: "Amazon Simple Storage Service (S3).",
|
||||
Action: s3GatewayMain,
|
||||
CustomHelpTemplate: s3GatewayTemplate,
|
||||
Flags: append(serverFlags,
|
||||
cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
Usage: "Disable startup banner.",
|
||||
},
|
||||
),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
|
||||
var gatewayCmd = cli.Command{
|
||||
Name: "gateway",
|
||||
Usage: "Start object storage gateway.",
|
||||
HideHelpCommand: true,
|
||||
Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd},
|
||||
}
|
||||
|
||||
// Represents the type of the gateway backend.
|
||||
type gatewayBackend string
|
||||
|
||||
@@ -96,12 +146,28 @@ func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
|
||||
return accessKey, secretKey
|
||||
}
|
||||
|
||||
// Set browser setting from environment variables
|
||||
func mustSetBrowserSettingFromEnv() {
|
||||
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
|
||||
browserFlag, err := ParseBrowserFlag(browser)
|
||||
if err != nil {
|
||||
fatalIf(errors.New("invalid value"), "Unknown value ‘%s’ in MINIO_BROWSER environment variable.", browser)
|
||||
}
|
||||
|
||||
// browser Envs are set globally, this does not represent
|
||||
// if browser is turned off or on.
|
||||
globalIsEnvBrowser = true
|
||||
globalIsBrowserEnabled = bool(browserFlag)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize gateway layer depending on the backend type.
|
||||
// Supported backend types are
|
||||
//
|
||||
// - Azure Blob Storage.
|
||||
// - Add your favorite backend here.
|
||||
func newGatewayLayer(backendType, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
|
||||
func newGatewayLayer(backendType gatewayBackend, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
|
||||
|
||||
switch gatewayBackend(backendType) {
|
||||
case azureBackend:
|
||||
return newAzureLayer(endpoint, accessKey, secretKey, secure)
|
||||
@@ -162,20 +228,64 @@ func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error)
|
||||
}
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway'.
|
||||
func gatewayMain(ctx *cli.Context) {
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "gateway", 1)
|
||||
// Validate gateway arguments.
|
||||
func validateGatewayArguments(serverAddr, endpointAddr string) error {
|
||||
if err := CheckLocalServerAddr(serverAddr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
_, port := mustSplitHostPort(serverAddr)
|
||||
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
|
||||
// to IPv6 address i.e minio will start listening on IPv6 address whereas another
|
||||
// (non-)minio process is listening on IPv4 of given port.
|
||||
// To avoid this error situation we check for port availability only for macOS.
|
||||
if err := checkPortAvailability(port); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if endpointAddr != "" {
|
||||
// Reject the endpoint if it points to the gateway handler itself.
|
||||
sameTarget, err := sameLocalAddrs(endpointAddr, serverAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sameTarget {
|
||||
return errors.New("endpoint points to the local gateway")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway azure' command line.
|
||||
func azureGatewayMain(ctx *cli.Context) {
|
||||
if ctx.Args().Present() && ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "azure", 1)
|
||||
}
|
||||
|
||||
gatewayMain(ctx, azureBackend)
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway s3' command line.
|
||||
func s3GatewayMain(ctx *cli.Context) {
|
||||
if ctx.Args().Present() && ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "s3", 1)
|
||||
}
|
||||
|
||||
gatewayMain(ctx, s3Backend)
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway'.
|
||||
func gatewayMain(ctx *cli.Context, backendType gatewayBackend) {
|
||||
// Fetch access and secret key from env.
|
||||
accessKey, secretKey := mustGetGatewayCredsFromEnv()
|
||||
|
||||
// Fetch browser env setting
|
||||
mustSetBrowserSettingFromEnv()
|
||||
|
||||
// Initialize new gateway config.
|
||||
//
|
||||
// TODO: add support for custom region when we add
|
||||
// support for S3 backend storage, currently this can
|
||||
// default to "us-east-1"
|
||||
|
||||
newGatewayConfig(accessKey, secretKey, globalMinioDefaultRegion)
|
||||
|
||||
// Get quiet flag from command line argument.
|
||||
@@ -184,12 +294,14 @@ func gatewayMain(ctx *cli.Context) {
|
||||
log.EnableQuiet()
|
||||
}
|
||||
|
||||
// First argument is selected backend type.
|
||||
backendType := ctx.Args().First()
|
||||
serverAddr := ctx.String("address")
|
||||
endpointAddr := ctx.Args().Get(0)
|
||||
err := validateGatewayArguments(serverAddr, endpointAddr)
|
||||
fatalIf(err, "Invalid argument")
|
||||
|
||||
// Second argument is endpoint. If no endpoint is specified then the
|
||||
// gateway implementation should use a default setting.
|
||||
endPoint, secure, err := parseGatewayEndpoint(ctx.Args().Get(1))
|
||||
endPoint, secure, err := parseGatewayEndpoint(endpointAddr)
|
||||
fatalIf(err, "Unable to parse endpoint")
|
||||
|
||||
// Create certs path for SSL configuration.
|
||||
@@ -201,6 +313,15 @@ func gatewayMain(ctx *cli.Context) {
|
||||
initNSLock(false) // Enable local namespace lock.
|
||||
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
|
||||
// credentials Envs are set globally.
|
||||
globalIsEnvCreds = true
|
||||
|
||||
// Register web router when its enabled.
|
||||
if globalIsBrowserEnabled {
|
||||
aerr := registerWebRouter(router)
|
||||
fatalIf(aerr, "Unable to configure web browser")
|
||||
}
|
||||
registerGatewayAPIRouter(router, newObject)
|
||||
|
||||
var handlerFns = []HandlerFunc{
|
||||
@@ -211,6 +332,13 @@ func gatewayMain(ctx *cli.Context) {
|
||||
// Adds 'crossdomain.xml' policy handler to serve legacy flash clients.
|
||||
setCrossDomainPolicy,
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
// Redirect some pre-defined browser request paths to a static location prefix.
|
||||
setBrowserRedirectHandler,
|
||||
// Validates if incoming request is for restricted buckets.
|
||||
setPrivateBucketHandler,
|
||||
// Adds cache control for all browser requests.
|
||||
setBrowserCacheControlHandler,
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
setTimeValidityHandler,
|
||||
// CORS setting for all browser API requests.
|
||||
setCorsHandler,
|
||||
@@ -221,9 +349,11 @@ func gatewayMain(ctx *cli.Context) {
|
||||
// routes them accordingly. Client receives a HTTP error for
|
||||
// invalid/unsupported signatures.
|
||||
setAuthHandler,
|
||||
// Add new handlers here.
|
||||
|
||||
}
|
||||
|
||||
apiServer := NewServerMux(ctx.String("address"), registerHandlers(router, handlerFns...))
|
||||
apiServer := NewServerMux(serverAddr, registerHandlers(router, handlerFns...))
|
||||
|
||||
_, _, globalIsSSL, err = getSSLConfig()
|
||||
fatalIf(err, "Invalid SSL key file")
|
||||
|
||||
@@ -16,7 +16,11 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test parseGatewayEndpoint
|
||||
func TestParseGatewayEndpoint(t *testing.T) {
|
||||
@@ -48,3 +52,57 @@ func TestParseGatewayEndpoint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBrowserFromEnv(t *testing.T) {
|
||||
browser := os.Getenv("MINIO_BROWSER")
|
||||
|
||||
os.Setenv("MINIO_BROWSER", "on")
|
||||
mustSetBrowserSettingFromEnv()
|
||||
if globalIsBrowserEnabled != true {
|
||||
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, false)
|
||||
}
|
||||
|
||||
os.Setenv("MINIO_BROWSER", "off")
|
||||
mustSetBrowserSettingFromEnv()
|
||||
if globalIsBrowserEnabled != false {
|
||||
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true)
|
||||
}
|
||||
os.Setenv("MINIO_BROWSER", "")
|
||||
mustSetBrowserSettingFromEnv()
|
||||
if globalIsBrowserEnabled != false {
|
||||
t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true)
|
||||
}
|
||||
os.Setenv("MINIO_BROWSER", browser)
|
||||
}
|
||||
|
||||
// Test validateGatewayArguments
|
||||
func TestValidateGatewayArguments(t *testing.T) {
|
||||
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !strings.HasPrefix(ip, "127.")
|
||||
}, "")
|
||||
if len(nonLoopBackIPs) == 0 {
|
||||
t.Fatalf("No non-loop back IP address found for this host")
|
||||
}
|
||||
nonLoopBackIP := nonLoopBackIPs.ToSlice()[0]
|
||||
|
||||
testCases := []struct {
|
||||
serverAddr string
|
||||
endpointAddr string
|
||||
valid bool
|
||||
}{
|
||||
{":9000", "http://localhost:9001", true},
|
||||
{":9000", "http://google.com", true},
|
||||
{"123.123.123.123:9000", "http://localhost:9000", false},
|
||||
{":9000", "http://localhost:9000", false},
|
||||
{":9000", nonLoopBackIP + ":9000", false},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
err := validateGatewayArguments(test.serverAddr, test.endpointAddr)
|
||||
if test.valid && err != nil {
|
||||
t.Errorf("Test %d expected not to return error but got %s", i+1, err)
|
||||
}
|
||||
if !test.valid && err == nil {
|
||||
t.Errorf("Test %d expected to fail but it did not", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,10 +27,11 @@ import (
|
||||
type GatewayLayer interface {
|
||||
ObjectLayer
|
||||
|
||||
MakeBucketWithLocation(bucket, location string) error
|
||||
|
||||
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
||||
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
|
||||
AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error)
|
||||
|
||||
SetBucketPolicies(string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
|
||||
DeleteBucketPolicies(string) error
|
||||
|
||||
@@ -17,13 +17,44 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
)
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
|
||||
var sha256sumBytes []byte
|
||||
|
||||
var err error
|
||||
if sha256sum != "" {
|
||||
sha256sumBytes, err = hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
var md5sumBytes []byte
|
||||
md5sum := metadata["etag"]
|
||||
if md5sum != "" {
|
||||
md5sumBytes, err = hex.DecodeString(md5sum)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
}
|
||||
|
||||
oi, err := l.anonClient.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
return fromMinioClientObjectInfo(bucket, oi), nil
|
||||
}
|
||||
|
||||
// AnonGetObject - Get object anonymously
|
||||
func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
|
||||
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
|
||||
r := minio.NewGetReqHeaders()
|
||||
if err := r.SetRange(startOffset, startOffset+length-1); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, key)
|
||||
@@ -43,7 +74,7 @@ func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64,
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Get object info anonymously
|
||||
func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
|
||||
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
|
||||
r := minio.NewHeadReqHeaders()
|
||||
oi, err := l.anonClient.StatObject(bucket, object, r)
|
||||
if err != nil {
|
||||
@@ -54,7 +85,7 @@ func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo,
|
||||
}
|
||||
|
||||
// AnonListObjects - List objects anonymously
|
||||
func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
|
||||
@@ -64,7 +95,7 @@ func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string,
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata anonymously.
|
||||
func (l *s3Gateway) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
|
||||
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
|
||||
} else if !exists {
|
||||
|
||||
@@ -17,26 +17,26 @@
|
||||
package cmd
|
||||
|
||||
// HealBucket - Not relevant.
|
||||
func (l *s3Gateway) HealBucket(bucket string) error {
|
||||
func (l *s3Objects) HealBucket(bucket string) error {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListBucketsHeal - Not relevant.
|
||||
func (l *s3Gateway) ListBucketsHeal() (buckets []BucketInfo, err error) {
|
||||
func (l *s3Objects) ListBucketsHeal() (buckets []BucketInfo, err error) {
|
||||
return []BucketInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// HealObject - Not relevant.
|
||||
func (l *s3Gateway) HealObject(bucket string, object string) (int, int, error) {
|
||||
func (l *s3Objects) HealObject(bucket string, object string) (int, int, error) {
|
||||
return 0, 0, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListObjectsHeal - Not relevant.
|
||||
func (l *s3Gateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
return ListObjectsInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListUploadsHeal - Not relevant.
|
||||
func (l *s3Gateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
return ListMultipartsInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
@@ -17,12 +17,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"encoding/hex"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
)
|
||||
@@ -91,8 +90,8 @@ func s3ToObjectError(err error, params ...string) error {
|
||||
return e
|
||||
}
|
||||
|
||||
// s3Gateway implements gateway for Minio and S3 compatible object storage servers.
|
||||
type s3Gateway struct {
|
||||
// s3Objects implements gateway for Minio and S3 compatible object storage servers.
|
||||
type s3Objects struct {
|
||||
Client *minio.Core
|
||||
anonClient *minio.Core
|
||||
}
|
||||
@@ -115,7 +114,7 @@ func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (Ga
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &s3Gateway{
|
||||
return &s3Objects{
|
||||
Client: client,
|
||||
anonClient: anonClient,
|
||||
}, nil
|
||||
@@ -123,24 +122,18 @@ func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (Ga
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (l *s3Gateway) Shutdown() error {
|
||||
func (l *s3Objects) Shutdown() error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo is not relevant to S3 backend.
|
||||
func (l *s3Gateway) StorageInfo() StorageInfo {
|
||||
func (l *s3Objects) StorageInfo() StorageInfo {
|
||||
return StorageInfo{}
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on S3 backend.
|
||||
func (l *s3Gateway) MakeBucket(bucket string) error {
|
||||
// will never be called, only satisfy ObjectLayer interface
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on S3 backend.
|
||||
func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
|
||||
err := l.Client.MakeBucket(bucket, location)
|
||||
if err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket)
|
||||
@@ -149,7 +142,7 @@ func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error {
|
||||
}
|
||||
|
||||
// GetBucketInfo gets bucket metadata..
|
||||
func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
buckets, err := l.Client.ListBuckets()
|
||||
if err != nil {
|
||||
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
|
||||
@@ -170,7 +163,7 @@ func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
}
|
||||
|
||||
// ListBuckets lists all S3 buckets
|
||||
func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) {
|
||||
func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
|
||||
buckets, err := l.Client.ListBuckets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -188,7 +181,7 @@ func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) {
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket on S3
|
||||
func (l *s3Gateway) DeleteBucket(bucket string) error {
|
||||
func (l *s3Objects) DeleteBucket(bucket string) error {
|
||||
err := l.Client.RemoveBucket(bucket)
|
||||
if err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket)
|
||||
@@ -197,7 +190,7 @@ func (l *s3Gateway) DeleteBucket(bucket string) error {
|
||||
}
|
||||
|
||||
// ListObjects lists all blobs in S3 bucket filtered by prefix
|
||||
func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
|
||||
@@ -207,7 +200,7 @@ func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, del
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
|
||||
func (l *s3Gateway) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
|
||||
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
|
||||
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
|
||||
@@ -266,10 +259,17 @@ func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResul
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (l *s3Gateway) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
|
||||
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
|
||||
r := minio.NewGetReqHeaders()
|
||||
if err := r.SetRange(startOffset, startOffset+length-1); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, key)
|
||||
|
||||
if length < 0 && length != -1 {
|
||||
return s3ToObjectError(traceError(errInvalidArgument), bucket, key)
|
||||
}
|
||||
|
||||
if startOffset >= 0 && length >= 0 {
|
||||
if err := r.SetRange(startOffset, startOffset+length-1); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
}
|
||||
object, _, err := l.Client.GetObject(bucket, key, r)
|
||||
if err != nil {
|
||||
@@ -278,10 +278,9 @@ func (l *s3Gateway) GetObject(bucket string, key string, startOffset int64, leng
|
||||
|
||||
defer object.Close()
|
||||
|
||||
if _, err := io.CopyN(writer, object, length); err != nil {
|
||||
if _, err := io.Copy(writer, object); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -295,7 +294,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
|
||||
Name: oi.Key,
|
||||
ModTime: oi.LastModified,
|
||||
Size: oi.Size,
|
||||
MD5Sum: oi.ETag,
|
||||
ETag: oi.ETag,
|
||||
UserDefined: userDefined,
|
||||
ContentType: oi.ContentType,
|
||||
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
|
||||
@@ -303,7 +302,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
|
||||
}
|
||||
|
||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||
func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
|
||||
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
|
||||
r := minio.NewHeadReqHeaders()
|
||||
oi, err := l.Client.StatObject(bucket, object, r)
|
||||
if err != nil {
|
||||
@@ -314,7 +313,7 @@ func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectI
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
|
||||
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
|
||||
var sha256sumBytes []byte
|
||||
|
||||
var err error
|
||||
@@ -326,13 +325,13 @@ func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.
|
||||
}
|
||||
|
||||
var md5sumBytes []byte
|
||||
md5sum := metadata["md5Sum"]
|
||||
md5sum := metadata["etag"]
|
||||
if md5sum != "" {
|
||||
md5sumBytes, err = hex.DecodeString(md5sum)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "md5Sum")
|
||||
delete(metadata, "etag")
|
||||
}
|
||||
|
||||
oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||
@@ -344,7 +343,7 @@ func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.
|
||||
}
|
||||
|
||||
// CopyObject copies a blob from source container to destination container.
|
||||
func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
|
||||
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
|
||||
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject)
|
||||
@@ -359,7 +358,7 @@ func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket st
|
||||
}
|
||||
|
||||
// DeleteObject deletes a blob in bucket
|
||||
func (l *s3Gateway) DeleteObject(bucket string, object string) error {
|
||||
func (l *s3Objects) DeleteObject(bucket string, object string) error {
|
||||
err := l.Client.RemoveObject(bucket, object)
|
||||
if err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, object)
|
||||
@@ -407,7 +406,7 @@ func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all multipart uploads.
|
||||
func (l *s3Gateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
return ListMultipartsInfo{}, err
|
||||
@@ -435,12 +434,12 @@ func toMinioClientMetadata(metadata map[string]string) map[string][]string {
|
||||
}
|
||||
|
||||
// NewMultipartUpload upload object in multiple parts
|
||||
func (l *s3Gateway) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return l.Client.NewMultipartUpload(bucket, object, toMinioClientMetadata(metadata))
|
||||
}
|
||||
|
||||
// CopyObjectPart copy part of object to other bucket and object
|
||||
func (l *s3Gateway) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
||||
func (l *s3Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
||||
// FIXME: implement CopyObjectPart
|
||||
return PartInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
@@ -449,14 +448,14 @@ func (l *s3Gateway) CopyObjectPart(srcBucket string, srcObject string, destBucke
|
||||
func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
|
||||
return PartInfo{
|
||||
Size: op.Size,
|
||||
ETag: op.ETag,
|
||||
ETag: canonicalizeETag(op.ETag),
|
||||
LastModified: op.LastModified,
|
||||
PartNumber: op.PartNumber,
|
||||
}
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *s3Gateway) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
|
||||
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
|
||||
md5HexBytes, err := hex.DecodeString(md5Hex)
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
@@ -501,7 +500,7 @@ func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInf
|
||||
}
|
||||
|
||||
// ListObjectParts returns all object parts for specified object in specified bucket
|
||||
func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
|
||||
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
|
||||
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
@@ -511,7 +510,7 @@ func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID strin
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts a ongoing multipart upload
|
||||
func (l *s3Gateway) AbortMultipartUpload(bucket string, object string, uploadID string) error {
|
||||
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
|
||||
return l.Client.AbortMultipartUpload(bucket, object, uploadID)
|
||||
}
|
||||
|
||||
@@ -533,7 +532,7 @@ func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart {
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||
func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
|
||||
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
|
||||
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
|
||||
@@ -543,7 +542,7 @@ func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, upload
|
||||
}
|
||||
|
||||
// SetBucketPolicies sets policy on bucket
|
||||
func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, "")
|
||||
}
|
||||
@@ -552,7 +551,7 @@ func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
|
||||
}
|
||||
|
||||
// GetBucketPolicies will get policy on bucket
|
||||
func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo, err := l.Client.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "")
|
||||
@@ -561,10 +560,9 @@ func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies deletes all policies on bucket
|
||||
func (l *s3Gateway) DeleteBucketPolicies(bucket string) error {
|
||||
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
|
||||
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
|
||||
return s3ToObjectError(traceError(err), bucket, "")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// Prints the formatted startup message.
|
||||
func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, backendType string) {
|
||||
func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey string, backendType gatewayBackend) {
|
||||
// Prints credential.
|
||||
printGatewayCommonMsg(apiEndPoints, accessKey, secretKey)
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
const (
|
||||
globalMinioCertExpireWarnDays = time.Hour * 24 * 30 // 30 days.
|
||||
|
||||
globalMinioDefaultRegion = "us-east-1"
|
||||
globalMinioDefaultRegion = ""
|
||||
globalMinioDefaultOwnerID = "minio"
|
||||
globalMinioDefaultStorageClass = "STANDARD"
|
||||
globalWindowsOSName = "windows"
|
||||
@@ -64,6 +64,7 @@ var (
|
||||
|
||||
// This flag is set to 'true' by default
|
||||
globalIsBrowserEnabled = true
|
||||
|
||||
// This flag is set to 'true' when MINIO_BROWSER env is set.
|
||||
globalIsEnvBrowser = false
|
||||
|
||||
@@ -72,6 +73,7 @@ var (
|
||||
|
||||
// This flag is set to 'true' wen MINIO_REGION env is set.
|
||||
globalIsEnvRegion = false
|
||||
|
||||
// This flag is set to 'us-east-1' by default
|
||||
globalServerRegion = globalMinioDefaultRegion
|
||||
|
||||
@@ -128,3 +130,23 @@ var (
|
||||
colorBold = color.New(color.Bold).SprintFunc()
|
||||
colorBlue = color.New(color.FgBlue).SprintfFunc()
|
||||
)
|
||||
|
||||
// Returns minio global information, as a key value map.
|
||||
// returned list of global values is not an exhaustive
|
||||
// list. Feel free to add new relevant fields.
|
||||
func getGlobalInfo() (globalInfo map[string]interface{}) {
|
||||
globalInfo = map[string]interface{}{
|
||||
"isDistXL": globalIsDistXL,
|
||||
"isXL": globalIsXL,
|
||||
"isBrowserEnabled": globalIsBrowserEnabled,
|
||||
"isEnvBrowser": globalIsEnvBrowser,
|
||||
"isEnvCreds": globalIsEnvCreds,
|
||||
"isEnvRegion": globalIsEnvRegion,
|
||||
"isSSL": globalIsSSL,
|
||||
"serverRegion": globalServerRegion,
|
||||
"serverUserAgent": globalServerUserAgent,
|
||||
// Add more relevant global settings here.
|
||||
}
|
||||
|
||||
return globalInfo
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
|
||||
} // else for both err as nil or io.EOF
|
||||
location = locationConstraint.Location
|
||||
if location == "" {
|
||||
location = globalMinioDefaultRegion
|
||||
location = serverConfig.GetRegion()
|
||||
}
|
||||
return location, ErrNone
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
|
||||
// Validates input location is same as configured region
|
||||
// of Minio server.
|
||||
func isValidLocation(location string) bool {
|
||||
return serverConfig.GetRegion() == location
|
||||
return serverConfig.GetRegion() == "" || serverConfig.GetRegion() == location
|
||||
}
|
||||
|
||||
// Supported headers that needs to be extracted.
|
||||
@@ -134,7 +134,7 @@ func getRedirectPostRawQuery(objInfo ObjectInfo) string {
|
||||
redirectValues := make(url.Values)
|
||||
redirectValues.Set("bucket", objInfo.Bucket)
|
||||
redirectValues.Set("key", objInfo.Name)
|
||||
redirectValues.Set("etag", "\""+objInfo.MD5Sum+"\"")
|
||||
redirectValues.Set("etag", "\""+objInfo.ETag+"\"")
|
||||
return redirectValues.Encode()
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func testAuthenticate(authType string, t *testing.T) {
|
||||
// Secret key too small.
|
||||
{"myuser", "pass", errInvalidSecretKeyLength},
|
||||
// Secret key too long.
|
||||
{"myuser", "pass1234567890123456789012345678901234567", errInvalidSecretKeyLength},
|
||||
{"myuser", "pass1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890", errInvalidSecretKeyLength},
|
||||
// Authentication error.
|
||||
{"myuser", "mypassword", errInvalidAccessKeyID},
|
||||
// Authentication error.
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// Similar to removeEntry but only removes an entry only if the lock entry exists in map.
|
||||
func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
|
||||
if lri, ok := l.lockMap[nlrip.name]; ok {
|
||||
if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) {
|
||||
@@ -38,7 +38,7 @@ func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
|
||||
// removeEntry either, based on the uid of the lock message, removes a single entry from the
|
||||
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
|
||||
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
|
||||
func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
|
||||
// Find correct entry to remove based on uid.
|
||||
for index, entry := range *lri {
|
||||
if entry.uid == uid {
|
||||
|
||||
@@ -38,9 +38,9 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
|
||||
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
|
||||
|
||||
// first test by simulating item has already been deleted
|
||||
locker.removeEntryIfExists(nlrip)
|
||||
locker.ll.removeEntryIfExists(nlrip)
|
||||
{
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@@ -48,10 +48,10 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
|
||||
}
|
||||
|
||||
// then test normal deletion
|
||||
locker.lockMap["name"] = []lockRequesterInfo{lri} // add item
|
||||
locker.removeEntryIfExists(nlrip)
|
||||
locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item
|
||||
locker.ll.removeEntryIfExists(nlrip)
|
||||
{
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@@ -81,32 +81,32 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
|
||||
timeLastCheck: UTCNow(),
|
||||
}
|
||||
|
||||
locker.lockMap["name"] = []lockRequesterInfo{
|
||||
locker.ll.lockMap["name"] = []lockRequesterInfo{
|
||||
lockRequesterInfo1,
|
||||
lockRequesterInfo2,
|
||||
}
|
||||
|
||||
lri, _ := locker.lockMap["name"]
|
||||
lri, _ := locker.ll.lockMap["name"]
|
||||
|
||||
// test unknown uid
|
||||
if locker.removeEntry("name", "unknown-uid", &lri) {
|
||||
if locker.ll.removeEntry("name", "unknown-uid", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", false, true)
|
||||
}
|
||||
|
||||
if !locker.removeEntry("name", "0123-4567", &lri) {
|
||||
if !locker.ll.removeEntry("name", "0123-4567", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", true, false)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{lockRequesterInfo2}
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
}
|
||||
}
|
||||
|
||||
if !locker.removeEntry("name", "89ab-cdef", &lri) {
|
||||
if !locker.ll.removeEntry("name", "89ab-cdef", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", true, false)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
|
||||
@@ -60,9 +60,7 @@ func isWriteLock(lri []lockRequesterInfo) bool {
|
||||
// lockServer is type for RPC handlers
|
||||
type lockServer struct {
|
||||
AuthRPCServer
|
||||
serviceEndpoint string
|
||||
mutex sync.Mutex
|
||||
lockMap map[string][]lockRequesterInfo
|
||||
ll localLocker
|
||||
}
|
||||
|
||||
// Start lock maintenance from all lock servers.
|
||||
@@ -91,30 +89,11 @@ func startLockMaintainence(lockServers []*lockServer) {
|
||||
|
||||
// Register distributed NS lock handlers.
|
||||
func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error {
|
||||
// Initialize a new set of lock servers.
|
||||
lockServers := newLockServers(endpoints)
|
||||
|
||||
// Start lock maintenance from all lock servers.
|
||||
startLockMaintainence(lockServers)
|
||||
startLockMaintainence(globalLockServers)
|
||||
|
||||
// Register initialized lock servers to their respective rpc endpoints.
|
||||
return registerStorageLockers(mux, lockServers)
|
||||
}
|
||||
|
||||
// Create one lock server for every local storage rpc server.
|
||||
func newLockServers(endpoints EndpointList) (lockServers []*lockServer) {
|
||||
for _, endpoint := range endpoints {
|
||||
// Initialize new lock server for each local node.
|
||||
if endpoint.IsLocal {
|
||||
lockServers = append(lockServers, &lockServer{
|
||||
serviceEndpoint: endpoint.Path,
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return lockServers
|
||||
return registerStorageLockers(mux, globalLockServers)
|
||||
}
|
||||
|
||||
// registerStorageLockers - register locker rpc handlers for net/rpc library clients
|
||||
@@ -125,129 +104,178 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
|
||||
return traceError(err)
|
||||
}
|
||||
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
lockRouter.Path(path.Join(lockServicePath, lockServer.serviceEndpoint)).Handler(lockRPCServer)
|
||||
lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Distributed lock handlers
|
||||
// localLocker implements Dsync.NetLocker
|
||||
type localLocker struct {
|
||||
mutex sync.Mutex
|
||||
serviceEndpoint string
|
||||
serverAddr string
|
||||
lockMap map[string][]lockRequesterInfo
|
||||
}
|
||||
|
||||
// Lock - rpc handler for (single) write lock operation.
|
||||
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) ServerAddr() string {
|
||||
return l.serverAddr
|
||||
}
|
||||
|
||||
func (l *localLocker) ServiceEndpoint() string {
|
||||
return l.serviceEndpoint
|
||||
}
|
||||
|
||||
func (l *localLocker) Lock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, *reply = l.lockMap[args.LockArgs.Resource]
|
||||
if !*reply { // No locks held on the given name, so claim write lock
|
||||
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
|
||||
_, isLockTaken := l.lockMap[args.Resource]
|
||||
if !isLockTaken { // No locks held on the given name, so claim write lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{
|
||||
{
|
||||
writer: true,
|
||||
node: args.LockArgs.ServerAddr,
|
||||
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||
uid: args.LockArgs.UID,
|
||||
node: args.ServerAddr,
|
||||
serviceEndpoint: args.ServiceEndpoint,
|
||||
uid: args.UID,
|
||||
timestamp: UTCNow(),
|
||||
timeLastCheck: UTCNow(),
|
||||
},
|
||||
}
|
||||
}
|
||||
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
|
||||
return nil
|
||||
// return reply=true if lock was granted.
|
||||
return !isLockTaken, nil
|
||||
}
|
||||
|
||||
// Unlock - rpc handler for (single) write unlock operation.
|
||||
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) Unlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
var lri []lockRequesterInfo
|
||||
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name
|
||||
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.LockArgs.Resource)
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resource)
|
||||
}
|
||||
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock
|
||||
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.LockArgs.Resource, len(lri))
|
||||
if reply = isWriteLock(lri); !reply {
|
||||
// Unless it is a write lock
|
||||
return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resource, len(lri))
|
||||
}
|
||||
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) {
|
||||
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.LockArgs.UID)
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
|
||||
}
|
||||
return nil
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
// RLock - rpc handler for read lock operation.
|
||||
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) RLock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
lrInfo := lockRequesterInfo{
|
||||
writer: false,
|
||||
node: args.LockArgs.ServerAddr,
|
||||
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||
uid: args.LockArgs.UID,
|
||||
node: args.ServerAddr,
|
||||
serviceEndpoint: args.ServiceEndpoint,
|
||||
uid: args.UID,
|
||||
timestamp: UTCNow(),
|
||||
timeLastCheck: UTCNow(),
|
||||
}
|
||||
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
||||
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
|
||||
l.lockMap[args.LockArgs.Resource] = append(l.lockMap[args.LockArgs.Resource], lrInfo)
|
||||
if lri, ok := l.lockMap[args.Resource]; ok {
|
||||
if reply = !isWriteLock(lri); reply {
|
||||
// Unless there is a write lock
|
||||
l.lockMap[args.Resource] = append(l.lockMap[args.Resource], lrInfo)
|
||||
}
|
||||
} else { // No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{lrInfo}
|
||||
*reply = true
|
||||
} else {
|
||||
// No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{lrInfo}
|
||||
reply = true
|
||||
}
|
||||
return nil
|
||||
return reply, nil
|
||||
}
|
||||
|
||||
func (l *localLocker) RUnlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
var lri []lockRequesterInfo
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resource)
|
||||
}
|
||||
if reply = !isWriteLock(lri); !reply {
|
||||
// A write-lock is held, cannot release a read lock
|
||||
return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resource)
|
||||
}
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
|
||||
}
|
||||
return reply, nil
|
||||
}
|
||||
|
||||
func (l *localLocker) ForceUnlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if len(args.UID) != 0 {
|
||||
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
|
||||
}
|
||||
if _, ok := l.lockMap[args.Resource]; ok {
|
||||
// Only clear lock when it is taken
|
||||
// Remove the lock (irrespective of write or read lock)
|
||||
delete(l.lockMap, args.Resource)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
/// Distributed lock handlers
|
||||
|
||||
// Lock - rpc handler for (single) write lock operation.
|
||||
func (l *lockServer) Lock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.Lock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unlock - rpc handler for (single) write unlock operation.
|
||||
func (l *lockServer) Unlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.Unlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// RLock - rpc handler for read lock operation.
|
||||
func (l *lockServer) RLock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.RLock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// RUnlock - rpc handler for read unlock operation.
|
||||
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
var lri []lockRequesterInfo
|
||||
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name
|
||||
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.LockArgs.Resource)
|
||||
}
|
||||
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
|
||||
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.LockArgs.Resource)
|
||||
}
|
||||
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) {
|
||||
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.LockArgs.UID)
|
||||
}
|
||||
return nil
|
||||
*reply, err = l.ll.RUnlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceUnlock - rpc handler for force unlock operation.
|
||||
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args.LockArgs.UID) != 0 {
|
||||
return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.LockArgs.UID)
|
||||
}
|
||||
if _, ok := l.lockMap[args.LockArgs.Resource]; ok { // Only clear lock when set
|
||||
delete(l.lockMap, args.LockArgs.Resource) // Remove the lock (irrespective of write or read lock)
|
||||
}
|
||||
*reply = true
|
||||
return nil
|
||||
*reply, err = l.ll.ForceUnlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// Expired - rpc handler for expired lock status.
|
||||
func (l *lockServer) Expired(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.ll.mutex.Lock()
|
||||
defer l.ll.mutex.Unlock()
|
||||
// Lock found, proceed to verify if belongs to given uid.
|
||||
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
||||
if lri, ok := l.ll.lockMap[args.LockArgs.Resource]; ok {
|
||||
// Check whether uid is still active
|
||||
for _, entry := range lri {
|
||||
if entry.uid == args.LockArgs.UID {
|
||||
@@ -277,10 +305,10 @@ type nameLockRequesterInfoPair struct {
|
||||
//
|
||||
// We will ignore the error, and we will retry later to get a resolve on this lock
|
||||
func (l *lockServer) lockMaintenance(interval time.Duration) {
|
||||
l.mutex.Lock()
|
||||
l.ll.mutex.Lock()
|
||||
// Get list of long lived locks to check for staleness.
|
||||
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
|
||||
l.mutex.Unlock()
|
||||
nlripLongLived := getLongLivedLocks(l.ll.lockMap, interval)
|
||||
l.ll.mutex.Unlock()
|
||||
|
||||
serverCred := serverConfig.GetCredential()
|
||||
// Validate if long lived locks are indeed clean.
|
||||
@@ -308,9 +336,9 @@ func (l *lockServer) lockMaintenance(interval time.Duration) {
|
||||
if expired {
|
||||
// The lock is no longer active at server that originated the lock
|
||||
// So remove the lock from the map.
|
||||
l.mutex.Lock()
|
||||
l.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
|
||||
l.mutex.Unlock()
|
||||
l.ll.mutex.Lock()
|
||||
l.ll.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
|
||||
l.ll.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,10 +49,12 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) {
|
||||
}
|
||||
|
||||
locker := &lockServer{
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
serviceEndpoint: "rpc-path",
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
ll: localLocker{
|
||||
mutex: sync.Mutex{},
|
||||
serviceEndpoint: "rpc-path",
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
},
|
||||
}
|
||||
creds := serverConfig.GetCredential()
|
||||
loginArgs := LoginRPCArgs{
|
||||
@@ -93,7 +95,7 @@ func TestLockRpcServerLock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: true,
|
||||
@@ -163,7 +165,7 @@ func TestLockRpcServerUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !testLockEquality(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@@ -194,7 +196,7 @@ func TestLockRpcServerRLock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: false,
|
||||
@@ -281,7 +283,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: false,
|
||||
@@ -305,7 +307,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !testLockEquality(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@@ -427,6 +429,12 @@ func TestLockServers(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
defer removeAll(rootPath)
|
||||
|
||||
currentIsDistXL := globalIsDistXL
|
||||
defer func() {
|
||||
globalIsDistXL = currentIsDistXL
|
||||
@@ -471,9 +479,13 @@ func TestLockServers(t *testing.T) {
|
||||
// Validates lock server initialization.
|
||||
for i, testCase := range testCases {
|
||||
globalIsDistXL = testCase.isDistXL
|
||||
lockServers := newLockServers(testCase.endpoints)
|
||||
if len(lockServers) != testCase.totalLockServers {
|
||||
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(lockServers))
|
||||
globalLockServers = nil
|
||||
_, _ = newDsyncNodes(testCase.endpoints)
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error initializing lock servers: %v", err)
|
||||
}
|
||||
if len(globalLockServers) != testCase.totalLockServers {
|
||||
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(globalLockServers))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10
cmd/main.go
10
cmd/main.go
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/cli"
|
||||
@@ -59,7 +60,7 @@ VERSION:
|
||||
` + Version +
|
||||
`{{ "\n"}}`
|
||||
|
||||
func newApp() *cli.App {
|
||||
func newApp(name string) *cli.App {
|
||||
// Collection of minio commands currently supported are.
|
||||
commands := []cli.Command{}
|
||||
|
||||
@@ -108,7 +109,7 @@ func newApp() *cli.App {
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "Minio"
|
||||
app.Name = name
|
||||
app.Author = "Minio.io"
|
||||
app.Version = Version
|
||||
app.Usage = "Cloud Storage Server."
|
||||
@@ -137,10 +138,11 @@ func newApp() *cli.App {
|
||||
|
||||
// Main main for minio server.
|
||||
func Main(args []string) {
|
||||
app := newApp()
|
||||
// Set the minio app name.
|
||||
appName := filepath.Base(args[0])
|
||||
|
||||
// Run the app - exit on error.
|
||||
if err := app.Run(args); err != nil {
|
||||
if err := newApp(appName).Run(args); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,9 @@ import (
|
||||
// Global name space lock.
|
||||
var globalNSMutex *nsLockMap
|
||||
|
||||
// Global lock servers
|
||||
var globalLockServers []*lockServer
|
||||
|
||||
// RWLocker - locker interface extends sync.Locker
|
||||
// to introduce RLock, RUnlock.
|
||||
type RWLocker interface {
|
||||
@@ -36,27 +39,45 @@ type RWLocker interface {
|
||||
}
|
||||
|
||||
// Initialize distributed locking only in case of distributed setup.
|
||||
// Returns if the setup is distributed or not on success.
|
||||
func initDsyncNodes() error {
|
||||
// Returns lock clients and the node index for the current server.
|
||||
func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) {
|
||||
cred := serverConfig.GetCredential()
|
||||
// Initialize rpc lock client information only if this instance is a distributed setup.
|
||||
clnts := make([]dsync.NetLocker, len(globalEndpoints))
|
||||
myNode := -1
|
||||
for index, endpoint := range globalEndpoints {
|
||||
clnts[index] = newLockRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: endpoint.Host,
|
||||
secureConn: globalIsSSL,
|
||||
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
|
||||
serviceName: lockServiceName,
|
||||
})
|
||||
if endpoint.IsLocal && myNode == -1 {
|
||||
clnts = make([]dsync.NetLocker, len(endpoints))
|
||||
myNode = -1
|
||||
for index, endpoint := range endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
// For a remote endpoints setup a lock RPC client.
|
||||
clnts[index] = newLockRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: endpoint.Host,
|
||||
secureConn: globalIsSSL,
|
||||
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
|
||||
serviceName: lockServiceName,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Local endpoint
|
||||
if myNode == -1 {
|
||||
myNode = index
|
||||
}
|
||||
// For a local endpoint, setup a local lock server to
|
||||
// avoid network requests.
|
||||
localLockServer := lockServer{
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
ll: localLocker{
|
||||
mutex: sync.Mutex{},
|
||||
serviceEndpoint: endpoint.Path,
|
||||
serverAddr: endpoint.Host,
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
},
|
||||
}
|
||||
globalLockServers = append(globalLockServers, &localLockServer)
|
||||
clnts[index] = &(localLockServer.ll)
|
||||
}
|
||||
|
||||
return dsync.Init(clnts, myNode)
|
||||
return clnts, myNode
|
||||
}
|
||||
|
||||
// initNSLock - initialize name space lock map.
|
||||
@@ -240,14 +261,12 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
|
||||
if _, found := n.lockMap[param]; found {
|
||||
// Remove lock from the map.
|
||||
delete(n.lockMap, param)
|
||||
|
||||
// delete the lock state entry for given
|
||||
// <volume, path> pair.
|
||||
err := n.deleteLockInfoEntryForVolumePath(param)
|
||||
if err != nil {
|
||||
errorIf(err, "Failed to delete lock info entry")
|
||||
}
|
||||
}
|
||||
|
||||
// delete the lock state entry for given
|
||||
// <volume, path> pair. Ignore error as there
|
||||
// is no way to report it back
|
||||
n.deleteLockInfoEntryForVolumePath(param)
|
||||
}
|
||||
|
||||
// lockInstance - frontend/top-level interface for namespace locks.
|
||||
|
||||
@@ -122,6 +122,15 @@ func (d *naughtyDisk) ReadFile(volume string, path string, offset int64, buf []b
|
||||
return d.disk.ReadFile(volume, path, offset, buf)
|
||||
}
|
||||
|
||||
func (d *naughtyDisk) ReadFileWithVerify(volume, path string, offset int64,
|
||||
buf []byte, algo HashAlgo, expectedHash string) (n int64, err error) {
|
||||
|
||||
if err := d.calcError(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return d.disk.ReadFileWithVerify(volume, path, offset, buf, algo, expectedHash)
|
||||
}
|
||||
|
||||
func (d *naughtyDisk) PrepareFile(volume, path string, length int64) error {
|
||||
if err := d.calcError(); err != nil {
|
||||
return err
|
||||
|
||||
128
cmd/net.go
128
cmd/net.go
@@ -17,11 +17,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
@@ -186,6 +189,121 @@ func checkPortAvailability(port string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractHostPort - extracts host/port from many address formats
|
||||
// such as, ":9000", "localhost:9000", "http://localhost:9000/"
|
||||
func extractHostPort(hostAddr string) (string, string, error) {
|
||||
var addr, scheme string
|
||||
|
||||
if hostAddr == "" {
|
||||
return "", "", errors.New("unable to process empty address")
|
||||
}
|
||||
|
||||
// Parse address to extract host and scheme field
|
||||
u, err := url.Parse(hostAddr)
|
||||
if err != nil {
|
||||
// Ignore scheme not present error
|
||||
if !strings.Contains(err.Error(), "missing protocol scheme") {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
addr = u.Host
|
||||
scheme = u.Scheme
|
||||
}
|
||||
|
||||
// Use the given parameter again if url.Parse()
|
||||
// didn't return any useful result.
|
||||
if addr == "" {
|
||||
addr = hostAddr
|
||||
scheme = "http"
|
||||
}
|
||||
|
||||
// At this point, addr can be one of the following form:
|
||||
// ":9000"
|
||||
// "localhost:9000"
|
||||
// "localhost" <- in this case, we check for scheme
|
||||
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "missing port in address") {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
host = addr
|
||||
|
||||
switch scheme {
|
||||
case "https":
|
||||
port = "443"
|
||||
case "http":
|
||||
port = "80"
|
||||
default:
|
||||
return "", "", errors.New("unable to guess port from scheme")
|
||||
}
|
||||
}
|
||||
|
||||
return host, port, nil
|
||||
}
|
||||
|
||||
// isLocalHost - checks if the given parameter
|
||||
// correspond to one of the local IP of the
|
||||
// current machine
|
||||
func isLocalHost(host string) (bool, error) {
|
||||
hostIPs, err := getHostIP4(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If intersection of two IP sets is not empty, then the host is local host.
|
||||
isLocal := !localIP4.Intersection(hostIPs).IsEmpty()
|
||||
return isLocal, nil
|
||||
}
|
||||
|
||||
// sameLocalAddrs - returns true if two addresses, even with different
|
||||
// formats, point to the same machine, e.g:
|
||||
// ':9000' and 'http://localhost:9000/' will return true
|
||||
func sameLocalAddrs(addr1, addr2 string) (bool, error) {
|
||||
|
||||
// Extract host & port from given parameters
|
||||
host1, port1, err := extractHostPort(addr1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
host2, port2, err := extractHostPort(addr2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var addr1Local, addr2Local bool
|
||||
|
||||
if host1 == "" {
|
||||
// If empty host means it is localhost
|
||||
addr1Local = true
|
||||
} else {
|
||||
// Host not empty, check if it is local
|
||||
if addr1Local, err = isLocalHost(host1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if host2 == "" {
|
||||
// If empty host means it is localhost
|
||||
addr2Local = true
|
||||
} else {
|
||||
// Host not empty, check if it is local
|
||||
if addr2Local, err = isLocalHost(host2); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// If both of addresses point to the same machine, check if
|
||||
// have the same port
|
||||
if addr1Local && addr2Local {
|
||||
if port1 == port2 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CheckLocalServerAddr - checks if serverAddr is valid and local host.
|
||||
func CheckLocalServerAddr(serverAddr string) error {
|
||||
host, port, err := net.SplitHostPort(serverAddr)
|
||||
@@ -201,13 +319,15 @@ func CheckLocalServerAddr(serverAddr string) error {
|
||||
return fmt.Errorf("port number must be between 1 to 65535")
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
hostIPs, err := getHostIP4(host)
|
||||
// 0.0.0.0 is a wildcard address and refers to local network
|
||||
// addresses. I.e, 0.0.0.0:9000 like ":9000" refers to port
|
||||
// 9000 on localhost.
|
||||
if host != "" && host != net.IPv4zero.String() {
|
||||
isLocalHost, err := isLocalHost(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if localIP4.Intersection(hostIPs).IsEmpty() {
|
||||
if !isLocalHost {
|
||||
return fmt.Errorf("host in server address should be this server")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
@@ -220,6 +221,7 @@ func TestCheckLocalServerAddr(t *testing.T) {
|
||||
}{
|
||||
{":54321", nil},
|
||||
{"localhost:54321", nil},
|
||||
{"0.0.0.0:9000", nil},
|
||||
{"", fmt.Errorf("missing port in address")},
|
||||
{"localhost", fmt.Errorf("missing port in address localhost")},
|
||||
{"example.org:54321", fmt.Errorf("host in server address should be this server")},
|
||||
@@ -240,3 +242,77 @@ func TestCheckLocalServerAddr(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractHostPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
host string
|
||||
port string
|
||||
expectedErr error
|
||||
}{
|
||||
{"", "", "", errors.New("unable to process empty address")},
|
||||
{"localhost", "localhost", "80", nil},
|
||||
{"localhost:9000", "localhost", "9000", nil},
|
||||
{"http://:9000/", "", "9000", nil},
|
||||
{"http://8.8.8.8:9000/", "8.8.8.8", "9000", nil},
|
||||
{"https://facebook.com:9000/", "facebook.com", "9000", nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
host, port, err := extractHostPort(testCase.addr)
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: should succeed but failed with err: %v", i+1, err)
|
||||
}
|
||||
if host != testCase.host {
|
||||
t.Fatalf("Test %d: expected: %v, found: %v", i+1, testCase.host, host)
|
||||
}
|
||||
if port != testCase.port {
|
||||
t.Fatalf("Test %d: expected: %v, found: %v", i+1, testCase.port, port)
|
||||
}
|
||||
|
||||
}
|
||||
if testCase.expectedErr != nil {
|
||||
if err == nil {
|
||||
t.Fatalf("Test %d:, should fail but succeeded.", i+1)
|
||||
}
|
||||
if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("Test %d: failed with different error, expected: '%v', found:'%v'.", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSameLocalAddrs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr1 string
|
||||
addr2 string
|
||||
sameAddr bool
|
||||
expectedErr error
|
||||
}{
|
||||
{"", "", false, errors.New("unable to process empty address")},
|
||||
{":9000", ":9000", true, nil},
|
||||
{"localhost:9000", ":9000", true, nil},
|
||||
{"localhost:9000", "http://localhost:9000", true, nil},
|
||||
{"8.8.8.8:9000", "http://localhost:9000", false, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2)
|
||||
if testCase.expectedErr != nil && err == nil {
|
||||
t.Fatalf("Test %d: should fail but succeeded", i+1)
|
||||
}
|
||||
if testCase.expectedErr == nil && err != nil {
|
||||
t.Fatalf("Test %d: should succeed but failed with %v", i+1, err)
|
||||
}
|
||||
if err == nil {
|
||||
if sameAddr != testCase.sameAddr {
|
||||
t.Fatalf("Test %d: expected: %v, found: %v", i+1, testCase.sameAddr, sameAddr)
|
||||
}
|
||||
} else {
|
||||
if err.Error() != testCase.expectedErr.Error() {
|
||||
t.Fatalf("Test %d: failed with different error, expected: '%v', found:'%v'.", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ const (
|
||||
// Buckets meta prefix.
|
||||
bucketMetaPrefix = "buckets"
|
||||
|
||||
// Md5Sum of empty string.
|
||||
emptyStrMd5Sum = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
// ETag (hex encoded md5sum) of empty string.
|
||||
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
)
|
||||
|
||||
// Global object layer mutex, used for safely updating object layer.
|
||||
@@ -65,10 +65,10 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string
|
||||
// This is a special case with size as '0' and object ends with
|
||||
// a slash separator, we treat it like a valid operation and
|
||||
// return success.
|
||||
md5Sum := metadata["md5Sum"]
|
||||
delete(metadata, "md5Sum")
|
||||
if md5Sum == "" {
|
||||
md5Sum = emptyStrMd5Sum
|
||||
etag := metadata["etag"]
|
||||
delete(metadata, "etag")
|
||||
if etag == "" {
|
||||
etag = emptyETag
|
||||
}
|
||||
|
||||
return ObjectInfo{
|
||||
@@ -78,7 +78,7 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string
|
||||
ContentType: "application/octet-stream",
|
||||
IsDir: true,
|
||||
Size: size,
|
||||
MD5Sum: md5Sum,
|
||||
ETag: etag,
|
||||
UserDefined: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,8 +101,8 @@ type ObjectInfo struct {
|
||||
// IsDir indicates if the object is prefix.
|
||||
IsDir bool
|
||||
|
||||
// Hex encoded md5 checksum of the object.
|
||||
MD5Sum string
|
||||
// Hex encoded unique entity tag of the object.
|
||||
ETag string
|
||||
|
||||
// A standard MIME type describing the format of the object.
|
||||
ContentType string
|
||||
|
||||
@@ -39,7 +39,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
bucketName := getRandomBucketName()
|
||||
objectName := "test-object"
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(bucketName)
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -192,7 +192,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||
// Setup for the tests.
|
||||
bucketName := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(bucketName)
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -303,7 +303,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
bucketName := getRandomBucketName()
|
||||
objectName := "test-object"
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(bucketName)
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
// Testing GetObjectInfo().
|
||||
func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err := obj.MakeBucket("test-getobjectinfo")
|
||||
err := obj.MakeBucketWithLocation("test-getobjectinfo", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@@ -69,8 +69,8 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
{"test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false},
|
||||
{"test-getobjectinfo", "Asia/myfile", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/myfile"}, false},
|
||||
// Test case with existing bucket but object name set to a directory (Test number 12).
|
||||
{"test-getobjectinfo", "Asia", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia"}, false},
|
||||
// Valid case with existing object (Test number 13).
|
||||
{"test-getobjectinfo", "Asia/", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/"}, false},
|
||||
// Valid case with existing object (Test number 14).
|
||||
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -36,6 +36,10 @@ func checkBucketAndObjectNames(bucket, object string) error {
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
// Objects with "/" are invalid, verify to return a different error.
|
||||
if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) {
|
||||
return traceError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ type ObjectLayer interface {
|
||||
StorageInfo() StorageInfo
|
||||
|
||||
// Bucket operations.
|
||||
MakeBucket(bucket string) error
|
||||
MakeBucketWithLocation(bucket string, location string) error
|
||||
GetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
|
||||
ListBuckets() (buckets []BucketInfo, err error)
|
||||
DeleteBucket(bucket string) error
|
||||
|
||||
@@ -41,7 +41,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
"empty-bucket",
|
||||
}
|
||||
for _, bucket := range testBuckets {
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@@ -549,7 +549,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
if testCase.result.Objects[j].Name != result.Objects[j].Name {
|
||||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
||||
}
|
||||
if result.Objects[j].MD5Sum == "" {
|
||||
if result.Objects[j].ETag == "" {
|
||||
t.Errorf("Test %d: %s: Expected md5sum to be not empty, but found empty instead", i+1, instanceType)
|
||||
}
|
||||
|
||||
@@ -599,7 +599,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
|
||||
bucket := "ls-benchmark-bucket"
|
||||
// Create a bucket.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
}
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err = obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -91,7 +91,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -137,7 +137,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -173,7 +173,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -253,7 +253,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -265,7 +265,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -386,7 +386,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before initiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -404,7 +404,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// objectNames[0].
|
||||
// uploadIds [1-3].
|
||||
// Bucket to test for mutiple upload Id's for a given object.
|
||||
err = obj.MakeBucket(bucketNames[1])
|
||||
err = obj.MakeBucketWithLocation(bucketNames[1], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -425,7 +425,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// bucketnames[2].
|
||||
// objectNames[0-2].
|
||||
// uploadIds [4-9].
|
||||
err = obj.MakeBucket(bucketNames[2])
|
||||
err = obj.MakeBucketWithLocation(bucketNames[2], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -1288,7 +1288,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -1531,7 +1531,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -1769,7 +1769,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err = obj.MakeBucket(bucketNames[0])
|
||||
err = obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@@ -1782,8 +1782,8 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
// Parts with size greater than 5 MB.
|
||||
// Generating a 6MB byte array.
|
||||
// Parts with size greater than 5 MiB.
|
||||
// Generating a 6MiB byte array.
|
||||
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
|
||||
validPartMD5 := getMD5Hash(validPart)
|
||||
// Create multipart parts.
|
||||
@@ -1930,7 +1930,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
|
||||
// Asserting IsTruncated.
|
||||
if actualResult.MD5Sum != testCase.expectedS3MD5 {
|
||||
if actualResult.ETag != testCase.expectedS3MD5 {
|
||||
t.Errorf("Test %d: %s: Expected the result to be \"%v\", but found it to \"%v\"", i+1, instanceType, testCase.expectedS3MD5, actualResult)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
func md5Header(data []byte) map[string]string {
|
||||
return map[string]string{"md5Sum": getMD5Hash([]byte(data))}
|
||||
return map[string]string{"etag": getMD5Hash([]byte(data))}
|
||||
}
|
||||
|
||||
// Wrapper for calling PutObject tests for both XL multiple disks and single node setup.
|
||||
@@ -44,14 +44,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -94,29 +94,29 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
|
||||
// Test case - 7.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{bucket, object, []byte(""), map[string]string{"md5Sum": "a35"}, "", 0, "",
|
||||
{bucket, object, []byte(""), map[string]string{"etag": "a35"}, "", 0, "",
|
||||
BadDigest{ExpectedMD5: "a35", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}},
|
||||
|
||||
// Test case - 8.
|
||||
// With incorrect sha256.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, "incorrect-sha256", int64(len("abcd")), "", SHA256Mismatch{}},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "incorrect-sha256", int64(len("abcd")), "", SHA256Mismatch{}},
|
||||
|
||||
// Test case - 9.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"md5Sum": "a35"}, "", int64(len("abcd") + 1), "",
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "a35"}, "", int64(len("abcd") + 1), "",
|
||||
IncompleteBody{}},
|
||||
|
||||
// Test case - 10.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"md5Sum": "a35"}, "", int64(len("abcd") - 1), "",
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "a35"}, "", int64(len("abcd") - 1), "",
|
||||
BadDigest{ExpectedMD5: "a35", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}},
|
||||
|
||||
// Test case - 11-14.
|
||||
// Validating for success cases.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, "", int64(len("abcd")), "", nil},
|
||||
{bucket, object, []byte("efgh"), map[string]string{"md5Sum": "1f7690ebdd9b4caf8fab49ca1757bf27"}, "", int64(len("efgh")), "", nil},
|
||||
{bucket, object, []byte("ijkl"), map[string]string{"md5Sum": "09a0877d04abf8759f99adec02baf579"}, "", int64(len("ijkl")), "", nil},
|
||||
{bucket, object, []byte("mnop"), map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"}, "", int64(len("mnop")), "", nil},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "", int64(len("abcd")), "", nil},
|
||||
{bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, "", int64(len("efgh")), "", nil},
|
||||
{bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, "", int64(len("ijkl")), "", nil},
|
||||
{bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, "", int64(len("mnop")), "", nil},
|
||||
|
||||
// Test case 15-17.
|
||||
// With no metadata
|
||||
@@ -169,8 +169,8 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if expectedMD5, ok := testCase.inputMeta["md5Sum"]; ok && expectedMD5 != objInfo.MD5Sum {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.MD5Sum)
|
||||
if expectedMD5, ok := testCase.inputMeta["etag"]; ok && expectedMD5 != objInfo.ETag {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -189,14 +189,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -220,10 +220,10 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
expectedError error
|
||||
}{
|
||||
// Validating for success cases.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, []byte("efgh"), map[string]string{"md5Sum": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil},
|
||||
{bucket, object, []byte("ijkl"), map[string]string{"md5Sum": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil},
|
||||
{bucket, object, []byte("mnop"), map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil},
|
||||
{bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil},
|
||||
{bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil},
|
||||
}
|
||||
|
||||
sha256sum := ""
|
||||
@@ -246,8 +246,8 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if testCase.inputMeta["md5Sum"] != objInfo.MD5Sum {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.MD5Sum)
|
||||
if testCase.inputMeta["etag"] != objInfo.ETag {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -271,7 +271,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
bucket,
|
||||
object,
|
||||
[]byte("mnop"),
|
||||
map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
int64(len("mnop")),
|
||||
false,
|
||||
"",
|
||||
@@ -303,7 +303,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -338,7 +338,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
|
||||
@@ -119,10 +119,7 @@ func IsValidObjectName(object string) bool {
|
||||
if len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
return false
|
||||
}
|
||||
if hasPrefix(object, slashSeparator) {
|
||||
if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) {
|
||||
return false
|
||||
}
|
||||
return IsValidObjectPrefix(object)
|
||||
@@ -190,6 +187,35 @@ func getCompleteMultipartMD5(parts []completePart) (string, error) {
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
// Clean meta etag keys 'md5Sum', 'etag'.
|
||||
func cleanMetaETag(metadata map[string]string) map[string]string {
|
||||
return cleanMetadata(metadata, "md5Sum", "etag")
|
||||
}
|
||||
|
||||
// Clean metadata takes keys to be filtered
|
||||
// and returns a new map with the keys filtered.
|
||||
func cleanMetadata(metadata map[string]string, keyNames ...string) map[string]string {
|
||||
var newMeta = make(map[string]string)
|
||||
for k, v := range metadata {
|
||||
if contains(keyNames, k) {
|
||||
continue
|
||||
}
|
||||
newMeta[k] = v
|
||||
}
|
||||
return newMeta
|
||||
}
|
||||
|
||||
// Extracts etag value from the metadata.
|
||||
func extractETag(metadata map[string]string) string {
|
||||
// md5Sum tag is kept for backward compatibility.
|
||||
etag, ok := metadata["md5Sum"]
|
||||
if !ok {
|
||||
etag = metadata["etag"]
|
||||
}
|
||||
// Success.
|
||||
return etag
|
||||
}
|
||||
|
||||
// Prefix matcher string matches prefix in a platform specific way.
|
||||
// For example on windows since its case insensitive we are supposed
|
||||
// to do case insensitive checks.
|
||||
|
||||
@@ -59,8 +59,8 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf
|
||||
// set object-related metadata headers
|
||||
w.Header().Set("Last-Modified", objInfo.ModTime.UTC().Format(http.TimeFormat))
|
||||
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
if objInfo.ETag != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
}
|
||||
}
|
||||
// x-amz-copy-source-if-modified-since: Return the object only if it has been modified
|
||||
@@ -95,7 +95,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf
|
||||
// same as the one specified; otherwise return a 412 (precondition failed).
|
||||
ifMatchETagHeader := r.Header.Get("x-amz-copy-source-if-match")
|
||||
if ifMatchETagHeader != "" {
|
||||
if objInfo.MD5Sum != "" && !isETagEqual(objInfo.MD5Sum, ifMatchETagHeader) {
|
||||
if objInfo.ETag != "" && !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
|
||||
// If the object ETag does not match with the specified ETag.
|
||||
writeHeaders()
|
||||
writeErrorResponse(w, ErrPreconditionFailed, r.URL)
|
||||
@@ -107,7 +107,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf
|
||||
// one specified otherwise, return a 304 (not modified).
|
||||
ifNoneMatchETagHeader := r.Header.Get("x-amz-copy-source-if-none-match")
|
||||
if ifNoneMatchETagHeader != "" {
|
||||
if objInfo.MD5Sum != "" && isETagEqual(objInfo.MD5Sum, ifNoneMatchETagHeader) {
|
||||
if objInfo.ETag != "" && isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
|
||||
// If the object ETag matches with the specified ETag.
|
||||
writeHeaders()
|
||||
writeErrorResponse(w, ErrPreconditionFailed, r.URL)
|
||||
@@ -144,8 +144,8 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn
|
||||
// set object-related metadata headers
|
||||
w.Header().Set("Last-Modified", objInfo.ModTime.UTC().Format(http.TimeFormat))
|
||||
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
if objInfo.ETag != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
}
|
||||
}
|
||||
// If-Modified-Since : Return the object only if it has been modified since the specified time,
|
||||
@@ -180,7 +180,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn
|
||||
// otherwise return a 412 (precondition failed).
|
||||
ifMatchETagHeader := r.Header.Get("If-Match")
|
||||
if ifMatchETagHeader != "" {
|
||||
if !isETagEqual(objInfo.MD5Sum, ifMatchETagHeader) {
|
||||
if !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
|
||||
// If the object ETag does not match with the specified ETag.
|
||||
writeHeaders()
|
||||
writeErrorResponse(w, ErrPreconditionFailed, r.URL)
|
||||
@@ -192,7 +192,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn
|
||||
// one specified otherwise, return a 304 (not modified).
|
||||
ifNoneMatchETagHeader := r.Header.Get("If-None-Match")
|
||||
if ifNoneMatchETagHeader != "" {
|
||||
if isETagEqual(objInfo.MD5Sum, ifNoneMatchETagHeader) {
|
||||
if isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
|
||||
// If the object ETag matches with the specified ETag.
|
||||
writeHeaders()
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
|
||||
@@ -360,10 +360,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
defaultMeta := objInfo.UserDefined
|
||||
|
||||
// Make sure to remove saved md5sum, object might have been uploaded
|
||||
// as multipart which doesn't have a standard md5sum, we just let
|
||||
// CopyObject calculate a new one.
|
||||
delete(defaultMeta, "md5Sum")
|
||||
// Make sure to remove saved etag, CopyObject calculates a new one.
|
||||
delete(defaultMeta, "etag")
|
||||
|
||||
newMetadata := getCpObjMetadataFromHeader(r.Header, defaultMeta)
|
||||
// Check if x-amz-metadata-directive was not set to REPLACE and source,
|
||||
@@ -383,8 +381,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
md5Sum := objInfo.MD5Sum
|
||||
response := generateCopyObjectResponse(md5Sum, objInfo.ModTime)
|
||||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
@@ -482,7 +479,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Make sure we hex encode md5sum here.
|
||||
metadata["md5Sum"] = hex.EncodeToString(md5Bytes)
|
||||
metadata["etag"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
sha256sum := ""
|
||||
|
||||
@@ -510,7 +507,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -518,14 +515,14 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -540,7 +537,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
@@ -791,7 +788,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -799,14 +796,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -965,7 +962,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
// Get object location.
|
||||
location := getLocation(r)
|
||||
// Generate complete multipart response.
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.MD5Sum)
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse CompleteMultipartUpload response")
|
||||
@@ -974,7 +971,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
}
|
||||
|
||||
// Set etag.
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
|
||||
@@ -2126,8 +2126,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
}
|
||||
|
||||
// Parts with size greater than 5 MB.
|
||||
// Generating a 6MB byte array.
|
||||
// Parts with size greater than 5 MiB.
|
||||
// Generating a 6 MiB byte array.
|
||||
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
|
||||
validPartMD5 := getMD5Hash(validPart)
|
||||
// Create multipart parts.
|
||||
@@ -2147,11 +2147,11 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
{bucketName, objectName, uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh"))},
|
||||
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
|
||||
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
|
||||
// Part with size larger than 5Mb.
|
||||
// Part with size larger than 5 MiB.
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
|
||||
// Part with size larger than 5Mb.
|
||||
// Part with size larger than 5 MiB.
|
||||
// Parts uploaded for anonymous/unsigned API handler test.
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
@@ -2192,7 +2192,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
},
|
||||
// inputParts - 3.
|
||||
// Case with valid parts,but parts are unsorted.
|
||||
// Part size greater than 5MB.
|
||||
// Part size greater than 5 MiB.
|
||||
{
|
||||
[]completePart{
|
||||
{ETag: validPartMD5, PartNumber: 6},
|
||||
@@ -2201,7 +2201,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
},
|
||||
// inputParts - 4.
|
||||
// Case with valid part.
|
||||
// Part size greater than 5MB.
|
||||
// Part size greater than 5 MiB.
|
||||
{
|
||||
[]completePart{
|
||||
{ETag: validPartMD5, PartNumber: 5},
|
||||
@@ -2211,7 +2211,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// inputParts - 5.
|
||||
// Used for the case of testing for anonymous API request.
|
||||
// Part size greater than 5MB.
|
||||
// Part size greater than 5 MiB.
|
||||
{
|
||||
[]completePart{
|
||||
{ETag: validPartMD5, PartNumber: 1},
|
||||
@@ -2481,8 +2481,8 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
}
|
||||
|
||||
// Parts with size greater than 5 MB.
|
||||
// Generating a 6MB byte array.
|
||||
// Parts with size greater than 5 MiB.
|
||||
// Generating a 6 MiB byte array.
|
||||
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
|
||||
validPartMD5 := getMD5Hash(validPart)
|
||||
// Create multipart parts.
|
||||
@@ -2502,11 +2502,11 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
{bucketName, objectName, uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh"))},
|
||||
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
|
||||
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
|
||||
// Part with size larger than 5Mb.
|
||||
// Part with size larger than 5 MiB.
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
|
||||
// Part with size larger than 5Mb.
|
||||
// Part with size larger than 5 MiB.
|
||||
// Parts uploaded for anonymous/unsigned API handler test.
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
|
||||
@@ -79,7 +79,7 @@ func (s *ObjectLayerAPISuite) TestMakeBucket(c *C) {
|
||||
|
||||
// Tests validate bucket creation.
|
||||
func testMakeBucket(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket-unknown")
|
||||
err := obj.MakeBucketWithLocation("bucket-unknown", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(c *C) {
|
||||
|
||||
// Tests validate creation of part files during Multipart operation.
|
||||
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -104,14 +104,14 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
|
||||
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
|
||||
completedParts := completeMultipartUpload{}
|
||||
for i := 1; i <= 10; i++ {
|
||||
expectedMD5Sumhex := getMD5Hash(data)
|
||||
expectedETaghex := getMD5Hash(data)
|
||||
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedMD5Sumhex, "")
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedETaghex, "")
|
||||
if err != nil {
|
||||
c.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if calcPartInfo.ETag != expectedMD5Sumhex {
|
||||
if calcPartInfo.ETag != expectedETaghex {
|
||||
c.Errorf("MD5 Mismatch")
|
||||
}
|
||||
completedParts.Parts = append(completedParts.Parts, completePart{
|
||||
@@ -123,7 +123,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if objInfo.MD5Sum != "7d364cb728ce42a74a96d22949beefb2-10" {
|
||||
if objInfo.ETag != "7d364cb728ce42a74a96d22949beefb2-10" {
|
||||
c.Errorf("Md5 mismtch")
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(c *C) {
|
||||
|
||||
// Tests validate abortion of Multipart operation.
|
||||
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -153,18 +153,18 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHan
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
expectedMD5Sumhex := getMD5Hash([]byte(randomString))
|
||||
expectedETaghex := getMD5Hash([]byte(randomString))
|
||||
|
||||
metadata["md5"] = expectedMD5Sumhex
|
||||
metadata["md5"] = expectedETaghex
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex, "")
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedETaghex, "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if calcPartInfo.ETag != expectedMD5Sumhex {
|
||||
if calcPartInfo.ETag != expectedETaghex {
|
||||
c.Errorf("Md5 Mismatch")
|
||||
}
|
||||
parts[i] = expectedMD5Sumhex
|
||||
parts[i] = expectedETaghex
|
||||
}
|
||||
err = obj.AbortMultipartUpload("bucket", "key", uploadID)
|
||||
if err != nil {
|
||||
@@ -180,7 +180,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(c *C) {
|
||||
// Tests validate object creation.
|
||||
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
objects := make(map[string][]byte)
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -191,18 +191,18 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrH
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
expectedMD5Sumhex := getMD5Hash([]byte(randomString))
|
||||
expectedETaghex := getMD5Hash([]byte(randomString))
|
||||
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
objects[key] = []byte(randomString)
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = expectedMD5Sumhex
|
||||
metadata["etag"] = expectedETaghex
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata, "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if objInfo.MD5Sum != expectedMD5Sumhex {
|
||||
if objInfo.ETag != expectedETaghex {
|
||||
c.Errorf("Md5 Mismatch")
|
||||
}
|
||||
}
|
||||
@@ -235,7 +235,7 @@ func (s *ObjectLayerAPISuite) TestPaging(c *C) {
|
||||
|
||||
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
|
||||
func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
obj.MakeBucket("bucket")
|
||||
obj.MakeBucketWithLocation("bucket", "")
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 0)
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
@@ -438,7 +438,7 @@ func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(c *C) {
|
||||
|
||||
// Tests validate overwriting of an existing object.
|
||||
func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -488,11 +488,11 @@ func (s *ObjectLayerAPISuite) TestBucketRecreateFails(c *C) {
|
||||
|
||||
// Tests validate that recreation of the bucket fails.
|
||||
func testBucketRecreateFails(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("string")
|
||||
err := obj.MakeBucketWithLocation("string", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.MakeBucket("string")
|
||||
err = obj.MakeBucketWithLocation("string", "")
|
||||
if err == nil {
|
||||
c.Fatalf("%s: Expected error but found nil.", instanceType)
|
||||
}
|
||||
@@ -513,7 +513,7 @@ func testPutObject(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
length := int64(len(content))
|
||||
readerEOF := newTestReaderEOF(content)
|
||||
readerNoEOF := newTestReaderNoEOF(content)
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -552,7 +552,7 @@ func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(c *C) {
|
||||
|
||||
// Tests validate PutObject with subdirectory prefix.
|
||||
func testPutObjectInSubdir(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -593,7 +593,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
}
|
||||
|
||||
// add one and test exists.
|
||||
err = obj.MakeBucket("bucket1")
|
||||
err = obj.MakeBucketWithLocation("bucket1", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -607,7 +607,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
}
|
||||
|
||||
// add two and test exists.
|
||||
err = obj.MakeBucket("bucket2")
|
||||
err = obj.MakeBucketWithLocation("bucket2", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -621,7 +621,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
}
|
||||
|
||||
// add three and test exists + prefix.
|
||||
err = obj.MakeBucket("bucket22")
|
||||
err = obj.MakeBucketWithLocation("bucket22", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -645,11 +645,11 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, c TestErrHandler
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time.
|
||||
// add one and test exists.
|
||||
err := obj.MakeBucket("bucket1")
|
||||
err := obj.MakeBucketWithLocation("bucket1", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.MakeBucket("bucket2")
|
||||
err = obj.MakeBucketWithLocation("bucket2", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -698,7 +698,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(c *C) {
|
||||
|
||||
// Tests validate that GetObject fails on a non-existent bucket as expected.
|
||||
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -736,7 +736,7 @@ func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(c *C) {
|
||||
// Tests validate that GetObject on an existing directory fails as expected.
|
||||
func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
bucketName := "bucket"
|
||||
err := obj.MakeBucket(bucketName)
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@@ -749,23 +749,26 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
for i, objName := range []string{"dir1", "dir1/", "dir1/dir3", "dir1/dir3/"} {
|
||||
_, err = obj.GetObjectInfo(bucketName, objName)
|
||||
if isErrObjectNotFound(err) {
|
||||
err = errorCause(err)
|
||||
err1 := err.(ObjectNotFound)
|
||||
if err1.Bucket != bucketName {
|
||||
c.Errorf("Test %d, %s: Expected the bucket name in the error message to be `%s`, but instead found `%s`",
|
||||
i+1, instanceType, bucketName, err1.Bucket)
|
||||
}
|
||||
if err1.Object != objName {
|
||||
c.Errorf("Test %d, %s: Expected the object name in the error message to be `%s`, but instead found `%s`",
|
||||
i+1, instanceType, objName, err1.Object)
|
||||
}
|
||||
} else {
|
||||
if err.Error() != "ObjectNotFound" {
|
||||
c.Errorf("Test %d, %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType,
|
||||
"ObjectNotFound", err.Error())
|
||||
testCases := []struct {
|
||||
dir string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
dir: "dir1/",
|
||||
err: ObjectNotFound{Bucket: bucketName, Object: "dir1/"},
|
||||
},
|
||||
{
|
||||
dir: "dir1/dir3/",
|
||||
err: ObjectNotFound{Bucket: bucketName, Object: "dir1/dir3/"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, expectedErr := obj.GetObjectInfo(bucketName, testCase.dir)
|
||||
if expectedErr != nil {
|
||||
expectedErr = errorCause(expectedErr)
|
||||
if expectedErr.Error() != testCase.err.Error() {
|
||||
c.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -778,7 +781,7 @@ func (s *ObjectLayerAPISuite) TestContentType(c *C) {
|
||||
|
||||
// Test content-type.
|
||||
func testContentType(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
err := obj.MakeBucket("bucket")
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
112
cmd/posix.go
112
cmd/posix.go
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -249,7 +251,7 @@ func (s *posix) MakeVol(volume string) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -283,7 +285,7 @@ func (s *posix) ListVols() (volsInfo []VolInfo, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return nil, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -347,7 +349,7 @@ func (s *posix) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return VolInfo{}, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -386,7 +388,7 @@ func (s *posix) DeleteVol(volume string) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -420,7 +422,7 @@ func (s *posix) ListDir(volume, dirPath string) (entries []string, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return nil, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -457,7 +459,7 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return nil, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -512,18 +514,37 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) {
|
||||
// number of bytes copied. The error is EOF only if no bytes were
|
||||
// read. On return, n == len(buf) if and only if err == nil. n == 0
|
||||
// for io.EOF.
|
||||
//
|
||||
// If an EOF happens after reading some but not all the bytes,
|
||||
// ReadFull returns ErrUnexpectedEOF.
|
||||
// Additionally ReadFile also starts reading from an offset.
|
||||
// ReadFile symantics are same as io.ReadFull
|
||||
func (s *posix) ReadFile(volume string, path string, offset int64, buf []byte) (n int64, err error) {
|
||||
//
|
||||
// Additionally ReadFile also starts reading from an offset. ReadFile
|
||||
// semantics are same as io.ReadFull.
|
||||
func (s *posix) ReadFile(volume, path string, offset int64, buf []byte) (n int64, err error) {
|
||||
|
||||
return s.ReadFileWithVerify(volume, path, offset, buf, "", "")
|
||||
}
|
||||
|
||||
// ReadFileWithVerify is the same as ReadFile but with hashsum
|
||||
// verification: the operation will fail if the hash verification
|
||||
// fails.
|
||||
//
|
||||
// The `expectedHash` is the expected hex-encoded hash string for
|
||||
// verification. With an empty expected hash string, hash verification
|
||||
// is skipped. An empty HashAlgo defaults to `blake2b`.
|
||||
//
|
||||
// The function takes care to minimize the number of disk read
|
||||
// operations.
|
||||
func (s *posix) ReadFileWithVerify(volume, path string, offset int64, buf []byte,
|
||||
algo HashAlgo, expectedHash string) (n int64, err error) {
|
||||
|
||||
defer func() {
|
||||
if err == syscall.EIO {
|
||||
atomic.AddInt32(&s.ioErrCount, 1)
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return 0, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -571,19 +592,66 @@ func (s *posix) ReadFile(volume string, path string, offset int64, buf []byte) (
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
// Verify it is a regular file, otherwise subsequent Seek is
|
||||
// undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
return 0, errIsNotRegular
|
||||
}
|
||||
|
||||
// Seek to requested offset.
|
||||
_, err = file.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
// If expected hash string is empty hash verification is
|
||||
// skipped.
|
||||
needToHash := expectedHash != ""
|
||||
var hasher hash.Hash
|
||||
|
||||
if needToHash {
|
||||
// If the hashing algo is invalid, return an error.
|
||||
if !isValidHashAlgo(algo) {
|
||||
return 0, errBitrotHashAlgoInvalid
|
||||
}
|
||||
|
||||
// Compute hash of object from start to the byte at
|
||||
// (offset - 1), and as a result of this read, seek to
|
||||
// `offset`.
|
||||
hasher = newHash(algo)
|
||||
if offset > 0 {
|
||||
_, err = io.CopyN(hasher, file, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Seek to requested offset.
|
||||
_, err = file.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Read until buffer is full.
|
||||
m, err := io.ReadFull(file, buf)
|
||||
if err == io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read full until buffer.
|
||||
m, err := io.ReadFull(file, buf)
|
||||
if needToHash {
|
||||
// Continue computing hash with buf.
|
||||
_, err = hasher.Write(buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Continue computing hash until end of file.
|
||||
_, err = io.Copy(hasher, file)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify the computed hash.
|
||||
computedHash := hex.EncodeToString(hasher.Sum(nil))
|
||||
if computedHash != expectedHash {
|
||||
return 0, hashMismatchError{expectedHash, computedHash}
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return int64(m), err
|
||||
@@ -596,7 +664,7 @@ func (s *posix) createFile(volume, path string) (f *os.File, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return nil, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -669,7 +737,7 @@ func (s *posix) PrepareFile(volume, path string, fileSize int64) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -716,7 +784,7 @@ func (s *posix) AppendFile(volume, path string, buf []byte) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -747,7 +815,7 @@ func (s *posix) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return FileInfo{}, errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -843,7 +911,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
@@ -883,7 +951,7 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
|
||||
}
|
||||
}()
|
||||
|
||||
if s.ioErrCount > maxAllowedIOError {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -26,6 +28,8 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// creates a temp dir and sets up posix layer.
|
||||
@@ -1017,6 +1021,115 @@ func TestPosixReadFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPosixReadFileWithVerify - tests the posix level
|
||||
// ReadFileWithVerify API. Only tests hashing related
|
||||
// functionality. Other functionality is tested with
|
||||
// TestPosixReadFile.
|
||||
func TestPosixReadFileWithVerify(t *testing.T) {
|
||||
// create posix test setup
|
||||
posixStorage, path, err := newPosixTestSetup()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create posix test setup, %s", err)
|
||||
}
|
||||
defer removeAll(path)
|
||||
|
||||
volume := "success-vol"
|
||||
// Setup test environment.
|
||||
if err = posixStorage.MakeVol(volume); err != nil {
|
||||
t.Fatalf("Unable to create volume, %s", err)
|
||||
}
|
||||
|
||||
blakeHash := func(s string) string {
|
||||
k := blake2b.Sum512([]byte(s))
|
||||
return hex.EncodeToString(k[:])
|
||||
}
|
||||
|
||||
sha256Hash := func(s string) string {
|
||||
k := sha256.Sum256([]byte(s))
|
||||
return hex.EncodeToString(k[:])
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
fileName string
|
||||
offset int64
|
||||
bufSize int
|
||||
algo HashAlgo
|
||||
expectedHash string
|
||||
|
||||
expectedBuf []byte
|
||||
expectedErr error
|
||||
}{
|
||||
// Hash verification is skipped with empty expected
|
||||
// hash - 1
|
||||
{
|
||||
"myobject", 0, 5, HashBlake2b, "",
|
||||
[]byte("Hello"), nil,
|
||||
},
|
||||
// Hash verification failure case - 2
|
||||
{
|
||||
"myobject", 0, 5, HashBlake2b, "a",
|
||||
[]byte(""),
|
||||
hashMismatchError{"a", blakeHash("Hello, world!")},
|
||||
},
|
||||
// Hash verification success with full content requested - 3
|
||||
{
|
||||
"myobject", 0, 13, HashBlake2b, blakeHash("Hello, world!"),
|
||||
[]byte("Hello, world!"), nil,
|
||||
},
|
||||
// Hash verification success with full content and Sha256 - 4
|
||||
{
|
||||
"myobject", 0, 13, HashSha256, sha256Hash("Hello, world!"),
|
||||
[]byte("Hello, world!"), nil,
|
||||
},
|
||||
// Hash verification success with partial content requested - 5
|
||||
{
|
||||
"myobject", 7, 4, HashBlake2b, blakeHash("Hello, world!"),
|
||||
[]byte("worl"), nil,
|
||||
},
|
||||
// Hash verification success with partial content and Sha256 - 6
|
||||
{
|
||||
"myobject", 7, 4, HashSha256, sha256Hash("Hello, world!"),
|
||||
[]byte("worl"), nil,
|
||||
},
|
||||
// Empty hash-algo returns error - 7
|
||||
{
|
||||
"myobject", 7, 4, "", blakeHash("Hello, world!"),
|
||||
[]byte("worl"), errBitrotHashAlgoInvalid,
|
||||
},
|
||||
// Empty content hash verification with empty
|
||||
// hash-algo algo returns error - 8
|
||||
{
|
||||
"myobject", 7, 0, "", blakeHash("Hello, world!"),
|
||||
[]byte(""), errBitrotHashAlgoInvalid,
|
||||
},
|
||||
}
|
||||
|
||||
// Create file used in testcases
|
||||
err = posixStorage.AppendFile(volume, "myobject", []byte("Hello, world!"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failure in test setup: %v\n", err)
|
||||
}
|
||||
|
||||
// Validate each test case.
|
||||
for i, testCase := range testCases {
|
||||
var n int64
|
||||
// Common read buffer.
|
||||
var buf = make([]byte, testCase.bufSize)
|
||||
n, err = posixStorage.ReadFileWithVerify(volume, testCase.fileName, testCase.offset, buf, testCase.algo, testCase.expectedHash)
|
||||
|
||||
switch {
|
||||
case err == nil && testCase.expectedErr != nil:
|
||||
t.Errorf("Test %d: Expected error %v but got none.", i+1, testCase.expectedErr)
|
||||
case err == nil && n != int64(testCase.bufSize):
|
||||
t.Errorf("Test %d: %d bytes were expected, but %d were written", i+1, testCase.bufSize, n)
|
||||
case err == nil && !bytes.Equal(testCase.expectedBuf, buf):
|
||||
t.Errorf("Test %d: Expected bytes: %v, but got: %v", i+1, testCase.expectedBuf, buf)
|
||||
case err != nil && err != testCase.expectedErr:
|
||||
t.Errorf("Test %d: Expected error: %v, but got: %v", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPosix posix.AppendFile()
|
||||
func TestPosixAppendFile(t *testing.T) {
|
||||
// create posix test setup
|
||||
|
||||
@@ -143,7 +143,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before initiating NewMultipartUpload.
|
||||
err = obj.MakeBucket(bucketName)
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -246,6 +246,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
}
|
||||
}
|
||||
|
||||
region := "us-east-1"
|
||||
// Test cases for signature-V4.
|
||||
testCasesV4BadData := []struct {
|
||||
objectName string
|
||||
@@ -330,7 +331,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
testCase.policy = fmt.Sprintf(testCase.policy, testCase.dates...)
|
||||
|
||||
req, perr := newPostRequestV4Generic("", bucketName, testCase.objectName, testCase.data, testCase.accessKey,
|
||||
testCase.secretKey, curTime, []byte(testCase.policy), nil, testCase.corruptedBase64, testCase.corruptedMultipart)
|
||||
testCase.secretKey, region, curTime, []byte(testCase.policy), nil, testCase.corruptedBase64, testCase.corruptedMultipart)
|
||||
if perr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
||||
}
|
||||
@@ -458,7 +459,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
||||
curTime := UTCNow()
|
||||
curTimePlus5Min := curTime.Add(time.Minute * 5)
|
||||
|
||||
err = obj.MakeBucket(bucketName)
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@@ -473,9 +474,10 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
||||
// Generate the final policy document
|
||||
policy = fmt.Sprintf(policy, dates...)
|
||||
|
||||
region := "us-east-1"
|
||||
// Create a new POST request with success_action_redirect field specified
|
||||
req, perr := newPostRequestV4Generic("", bucketName, keyName, []byte("objData"),
|
||||
credentials.AccessKey, credentials.SecretKey, curTime,
|
||||
credentials.AccessKey, credentials.SecretKey, region, curTime,
|
||||
[]byte(policy), map[string]string{"success_action_redirect": redirectURL.String()}, false, false)
|
||||
|
||||
if perr != nil {
|
||||
@@ -565,11 +567,11 @@ func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secret
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, contentLengthRange bool) []byte {
|
||||
func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName string, contentLengthRange bool) []byte {
|
||||
// Expire the request five minutes from now.
|
||||
expirationTime := t.Add(time.Minute * 5)
|
||||
|
||||
credStr := getCredentialString(accessKey, serverConfig.GetRegion(), t)
|
||||
credStr := getCredentialString(accessKey, region, t)
|
||||
// Create a new post policy.
|
||||
policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime)
|
||||
if contentLengthRange {
|
||||
@@ -578,10 +580,10 @@ func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, c
|
||||
return policy
|
||||
}
|
||||
|
||||
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string,
|
||||
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string,
|
||||
t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) {
|
||||
// Get the user credential.
|
||||
credStr := getCredentialString(accessKey, serverConfig.GetRegion(), t)
|
||||
credStr := getCredentialString(accessKey, region, t)
|
||||
|
||||
// Only need the encoding.
|
||||
encodedPolicy := base64.StdEncoding.EncodeToString(policy)
|
||||
@@ -591,7 +593,7 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []
|
||||
}
|
||||
|
||||
// Presign with V4 signature based on the policy.
|
||||
signature := postPresignSignatureV4(encodedPolicy, t, secretKey, serverConfig.GetRegion())
|
||||
signature := postPresignSignatureV4(encodedPolicy, t, secretKey, region)
|
||||
|
||||
formData := map[string]string{
|
||||
"bucket": bucketName,
|
||||
@@ -645,12 +647,14 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []
|
||||
|
||||
func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
|
||||
t := UTCNow()
|
||||
policy := buildGenericPolicy(t, accessKey, bucketName, objectName, true)
|
||||
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, nil, false, false)
|
||||
region := "us-east-1"
|
||||
policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, true)
|
||||
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
|
||||
}
|
||||
|
||||
func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
|
||||
t := UTCNow()
|
||||
policy := buildGenericPolicy(t, accessKey, bucketName, objectName, false)
|
||||
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, nil, false, false)
|
||||
region := "us-east-1"
|
||||
policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, false)
|
||||
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
|
||||
}
|
||||
|
||||
@@ -178,6 +178,23 @@ func (f retryStorage) ReadFile(volume, path string, offset int64, buffer []byte)
|
||||
return m, err
|
||||
}
|
||||
|
||||
// ReadFileWithVerify - a retryable implementation of reading at
|
||||
// offset from a file with verification.
|
||||
func (f retryStorage) ReadFileWithVerify(volume, path string, offset int64, buffer []byte,
|
||||
algo HashAlgo, expectedHash string) (m int64, err error) {
|
||||
|
||||
m, err = f.remoteStorage.ReadFileWithVerify(volume, path, offset, buffer,
|
||||
algo, expectedHash)
|
||||
if err == errDiskNotFound {
|
||||
err = f.reInit()
|
||||
if err == nil {
|
||||
return f.remoteStorage.ReadFileWithVerify(volume, path,
|
||||
offset, buffer, algo, expectedHash)
|
||||
}
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// ListDir - a retryable implementation of listing directory entries.
|
||||
func (f retryStorage) ListDir(volume, path string) (entries []string, err error) {
|
||||
entries, err = f.remoteStorage.ListDir(volume, path)
|
||||
|
||||
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -290,6 +292,31 @@ func TestRetryStorage(t *testing.T) {
|
||||
if n, err = disk.ReadFile("existent", "path", 7, buf2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Error("Error in ReadFile", err)
|
||||
}
|
||||
if n != 5 {
|
||||
t.Fatalf("Expected 5, got %d", n)
|
||||
}
|
||||
if !bytes.Equal(buf2, []byte("World")) {
|
||||
t.Fatalf("Expected `World`, got %s", string(buf2))
|
||||
}
|
||||
}
|
||||
|
||||
sha256Hash := func(s string) string {
|
||||
k := sha256.Sum256([]byte(s))
|
||||
return hex.EncodeToString(k[:])
|
||||
}
|
||||
for _, disk := range storageDisks {
|
||||
var buf2 = make([]byte, 5)
|
||||
var n int64
|
||||
if n, err = disk.ReadFileWithVerify("existent", "path", 7, buf2,
|
||||
HashSha256, sha256Hash("Hello, World")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Error("Error in ReadFileWithVerify", err)
|
||||
}
|
||||
if n != 5 {
|
||||
t.Fatalf("Expected 5, got %d", n)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/dsync"
|
||||
)
|
||||
|
||||
var serverFlags = []cli.Flag{
|
||||
@@ -81,8 +82,8 @@ EXAMPLES:
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during getUpdateInfo() here.
|
||||
if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil {
|
||||
if older > time.Duration(0) {
|
||||
log.Println(colorizeUpdateMessage(downloadURL, older))
|
||||
if updateMsg := computeUpdateMessage(downloadURL, older); updateMsg != "" {
|
||||
log.Println(updateMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,7 +108,7 @@ func initConfig() {
|
||||
// Config file does not exist, we create it fresh and return upon success.
|
||||
if isFile(getConfigFile()) {
|
||||
fatalIf(migrateConfig(), "Config migration failed.")
|
||||
fatalIf(loadConfig(), "Unable to load minio config file")
|
||||
fatalIf(loadConfig(), "Unable to load config version: '%s'.", v18)
|
||||
} else {
|
||||
fatalIf(newConfig(), "Unable to initialize minio config for the first time.")
|
||||
log.Println("Created minio configuration file successfully at " + getConfigDir())
|
||||
@@ -244,7 +245,8 @@ func serverMain(ctx *cli.Context) {
|
||||
|
||||
// Set nodes for dsync for distributed setup.
|
||||
if globalIsDistXL {
|
||||
fatalIf(initDsyncNodes(), "Unable to initialize distributed locking clients")
|
||||
clnts, myNode := newDsyncNodes(globalEndpoints)
|
||||
fatalIf(dsync.Init(clnts, myNode), "Unable to initialize distributed locking clients")
|
||||
}
|
||||
|
||||
// Initialize name space lock.
|
||||
|
||||
@@ -42,19 +42,22 @@ const (
|
||||
maxHTTPVerbLen = 7
|
||||
)
|
||||
|
||||
// HTTP2 PRI method.
|
||||
var httpMethodPRI = "PRI"
|
||||
|
||||
var defaultHTTP2Methods = []string{
|
||||
"PRI",
|
||||
httpMethodPRI,
|
||||
}
|
||||
|
||||
var defaultHTTP1Methods = []string{
|
||||
"OPTIONS",
|
||||
"GET",
|
||||
"HEAD",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"TRACE",
|
||||
"CONNECT",
|
||||
http.MethodOptions,
|
||||
http.MethodGet,
|
||||
http.MethodHead,
|
||||
http.MethodPost,
|
||||
http.MethodPut,
|
||||
http.MethodDelete,
|
||||
http.MethodTrace,
|
||||
http.MethodConnect,
|
||||
}
|
||||
|
||||
// ConnMux - Peeks into the incoming connection for relevant
|
||||
@@ -446,17 +449,9 @@ func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) {
|
||||
// All http requests start to be processed by httpHandler
|
||||
httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if tlsEnabled && r.TLS == nil {
|
||||
// TLS is enabled but Request is not TLS configured
|
||||
u := url.URL{
|
||||
Scheme: httpsScheme,
|
||||
Opaque: r.URL.Opaque,
|
||||
User: r.URL.User,
|
||||
Host: r.Host,
|
||||
Path: r.URL.Path,
|
||||
RawQuery: r.URL.RawQuery,
|
||||
Fragment: r.URL.Fragment,
|
||||
}
|
||||
http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect)
|
||||
// TLS is enabled but request is not TLS
|
||||
// configured - return error to client.
|
||||
writeErrorResponse(w, ErrInsecureClientRequest, &url.URL{})
|
||||
} else {
|
||||
|
||||
// Return ServiceUnavailable for clients which are sending requests
|
||||
@@ -470,7 +465,7 @@ func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) {
|
||||
}
|
||||
|
||||
// Execute registered handlers, update currentReqs to keep
|
||||
// tracks of current requests currently processed by the server
|
||||
// track of concurrent requests processing on the server
|
||||
atomic.AddInt32(&m.currentReqs, 1)
|
||||
m.handler.ServeHTTP(w, r)
|
||||
atomic.AddInt32(&m.currentReqs, -1)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user