mirror of
https://github.com/minio/minio.git
synced 2026-02-05 02:10:14 -05:00
Merge branch 'master' into release
This commit is contained in:
@@ -14,7 +14,7 @@ env:
|
||||
|
||||
script:
|
||||
- make
|
||||
- make test GOFLAGS="-race"
|
||||
- make test GOFLAGS="-timeout 20m -race -v"
|
||||
- make coverage
|
||||
|
||||
after_success:
|
||||
|
||||
1
Makefile
1
Makefile
@@ -107,6 +107,7 @@ spelling:
|
||||
|
||||
test: build
|
||||
@echo "Running all minio testing:"
|
||||
@go test $(GOFLAGS) .
|
||||
@go test $(GOFLAGS) github.com/minio/minio/cmd...
|
||||
@go test $(GOFLAGS) github.com/minio/minio/pkg...
|
||||
|
||||
|
||||
@@ -43,6 +43,8 @@ chmod 755 minio
|
||||
|GNU/Linux|64-bit Intel|https://dl.minio.io/server/minio/release/linux-amd64/minio|
|
||||
||32-bit Intel|https://dl.minio.io/server/minio/release/linux-386/minio|
|
||||
||32-bit ARM|https://dl.minio.io/server/minio/release/linux-arm/minio|
|
||||
||64-bit ARM|https://dl.minio.io/server/minio/release/linux-arm64/minio|
|
||||
||32-bit ARMv6|https://dl.minio.io/server/minio/release/linux-arm6vl/minio|
|
||||
```sh
|
||||
chmod +x minio
|
||||
./minio server ~/Photos
|
||||
|
||||
@@ -11,6 +11,8 @@ Minio是一个非常轻量的服务,可以很简单的和其他应用的结合
|
||||
|GNU/Linux|64-bit Intel|https://dl.minio.io/server/minio/release/linux-amd64/minio|
|
||||
||32-bit Intel|https://dl.minio.io/server/minio/release/linux-386/minio|
|
||||
||32-bit ARM|https://dl.minio.io/server/minio/release/linux-arm/minio|
|
||||
||64-bit ARM|https://dl.minio.io/server/minio/release/linux-arm64/minio|
|
||||
||32-bit ARMv6|https://dl.minio.io/server/minio/release/linux-arm6vl/minio|
|
||||
|Apple OS X|64-bit Intel|https://dl.minio.io/server/minio/release/darwin-amd64/minio|
|
||||
|Microsoft Windows|64-bit|https://dl.minio.io/server/minio/release/windows-amd64/minio.exe|
|
||||
||32-bit|https://dl.minio.io/server/minio/release/windows-386/minio.exe|
|
||||
|
||||
@@ -35,9 +35,9 @@ test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- go test -timeout 15m -v -race github.com/minio/minio/cmd...
|
||||
- go test -timeout 20m -v -race github.com/minio/minio/cmd...
|
||||
- go test -v -race github.com/minio/minio/pkg...
|
||||
- go test -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- go test -timeout 15m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
|
||||
@@ -4,34 +4,69 @@
|
||||
|
||||
## Installation
|
||||
|
||||
### Install yarn:
|
||||
### Install yarn
|
||||
```sh
|
||||
$ curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
$ yarn
|
||||
curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
yarn
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`.
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://docs.minio.io/docs/how-to-install-golang)
|
||||
|
||||
```sh
|
||||
$ go get github.com/jteeuwen/go-bindata/...
|
||||
$ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
go get github.com/jteeuwen/go-bindata/...
|
||||
go get github.com/elazarl/go-bindata-assetfs/...
|
||||
```
|
||||
|
||||
## Generating Assets.
|
||||
## Generating Assets
|
||||
|
||||
### Generate ui-assets.go
|
||||
|
||||
```sh
|
||||
$ yarn release
|
||||
yarn release
|
||||
```
|
||||
This generates ui-assets.go in the current direcotry. Now do `make` in the parent directory to build the minio binary with the newly generated ui-assets.go
|
||||
|
||||
### Run Minio Browser with live reload.
|
||||
This generates ui-assets.go in the current direcotry. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
### Run Minio Browser with live reload
|
||||
|
||||
```sh
|
||||
$ yarn dev
|
||||
yarn dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
|
||||
### Run Minio Browser with live reload on custom port
|
||||
|
||||
Edit `browser/webpack.config.js`
|
||||
|
||||
```diff
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 3ccdaba..9496c56 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -58,6 +58,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ port: 8888,
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
@@ -97,7 +98,7 @@ var exports = {
|
||||
if (process.env.NODE_ENV === 'dev') {
|
||||
exports.entry = [
|
||||
'webpack/hot/dev-server',
|
||||
- 'webpack-dev-server/client?http://localhost:8080',
|
||||
+ 'webpack-dev-server/client?http://localhost:8888',
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```sh
|
||||
yarn dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
|
||||
@@ -56,6 +56,7 @@ export const SET_POLICIES = 'SET_POLICIES'
|
||||
export const SET_SHARE_OBJECT = 'SET_SHARE_OBJECT'
|
||||
export const DELETE_CONFIRMATION = 'DELETE_CONFIRMATION'
|
||||
export const SET_PREFIX_WRITABLE = 'SET_PREFIX_WRITABLE'
|
||||
export const REMOVE_OBJECT = 'REMOVE_OBJECT'
|
||||
|
||||
export const showDeleteConfirmation = (object) => {
|
||||
return {
|
||||
@@ -206,6 +207,13 @@ export const showAlert = alert => {
|
||||
}
|
||||
}
|
||||
|
||||
export const removeObject = object => {
|
||||
return {
|
||||
type: REMOVE_OBJECT,
|
||||
object
|
||||
}
|
||||
}
|
||||
|
||||
export const setSidebarStatus = (status) => {
|
||||
return {
|
||||
type: SET_SIDEBAR_STATUS,
|
||||
@@ -227,10 +235,12 @@ export const setVisibleBuckets = visibleBuckets => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setObjects = (objects) => {
|
||||
export const setObjects = (objects, marker, istruncated) => {
|
||||
return {
|
||||
type: SET_OBJECTS,
|
||||
objects
|
||||
objects,
|
||||
marker,
|
||||
istruncated
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,22 +294,63 @@ export const selectBucket = (newCurrentBucket, prefix) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const listObjects = () => {
|
||||
return (dispatch, getState) => {
|
||||
const {currentBucket, currentPath, marker, objects, istruncated, web} = getState()
|
||||
if (!istruncated) return
|
||||
web.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPath,
|
||||
marker: marker
|
||||
})
|
||||
.then(res => {
|
||||
let objects = res.objects
|
||||
if (!objects)
|
||||
objects = []
|
||||
objects = objects.map(object => {
|
||||
object.name = object.name.replace(`${currentPath}`, '');
|
||||
return object
|
||||
})
|
||||
dispatch(setObjects(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
dispatch(setLoadBucket(''))
|
||||
dispatch(setLoadPath(''))
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(showAlert({
|
||||
type: 'danger',
|
||||
message: err.message
|
||||
}))
|
||||
dispatch(setLoadBucket(''))
|
||||
dispatch(setLoadPath(''))
|
||||
// Use browserHistory.replace instead of push so that browser back button works fine.
|
||||
browserHistory.replace(`${minioBrowserPrefix}/login`)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const selectPrefix = prefix => {
|
||||
return (dispatch, getState) => {
|
||||
const {currentBucket, web} = getState()
|
||||
dispatch(setObjects([], "", false))
|
||||
dispatch(setLoadPath(prefix))
|
||||
web.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix
|
||||
prefix,
|
||||
marker: ""
|
||||
})
|
||||
.then(res => {
|
||||
let objects = res.objects
|
||||
if (!objects)
|
||||
objects = []
|
||||
objects = objects.map(object => {
|
||||
object.name = object.name.replace(`${prefix}`, '');
|
||||
return object
|
||||
})
|
||||
dispatch(setObjects(
|
||||
utils.sortObjectsByName(objects.map(object => {
|
||||
object.name = object.name.replace(`${prefix}`, ''); return object
|
||||
}))
|
||||
objects,
|
||||
res.nextmarker,
|
||||
res.istruncated
|
||||
))
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
dispatch(setSortNameOrder(false))
|
||||
@@ -314,8 +365,8 @@ export const selectPrefix = prefix => {
|
||||
}))
|
||||
dispatch(setLoadBucket(''))
|
||||
dispatch(setLoadPath(''))
|
||||
// Use browserHistory.replace instead of push so that browser back button works fine.
|
||||
browserHistory.replace(`${minioBrowserPrefix}/login`)
|
||||
// Use browserHistory.replace instead of push so that browser back button works fine.
|
||||
browserHistory.replace(`${minioBrowserPrefix}/login`)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ import * as mime from '../mime'
|
||||
import { minioBrowserPrefix } from '../constants'
|
||||
import CopyToClipboard from 'react-copy-to-clipboard'
|
||||
import storage from 'local-storage-fallback'
|
||||
import InfiniteScroll from 'react-infinite-scroller';
|
||||
|
||||
export default class Browse extends React.Component {
|
||||
componentDidMount() {
|
||||
@@ -110,9 +111,6 @@ export default class Browse extends React.Component {
|
||||
if (!decPathname.endsWith('/'))
|
||||
decPathname += '/'
|
||||
if (decPathname === minioBrowserPrefix + '/') {
|
||||
dispatch(actions.setCurrentBucket(''))
|
||||
dispatch(actions.setCurrentPath(''))
|
||||
dispatch(actions.setObjects([]))
|
||||
return
|
||||
}
|
||||
let obj = utils.pathSlice(decPathname)
|
||||
@@ -140,6 +138,11 @@ export default class Browse extends React.Component {
|
||||
this.props.dispatch(actions.setVisibleBuckets(buckets.filter(bucket => bucket.indexOf(e.target.value) > -1)))
|
||||
}
|
||||
|
||||
listObjects() {
|
||||
const {dispatch} = this.props
|
||||
dispatch(actions.listObjects())
|
||||
}
|
||||
|
||||
selectPrefix(e, prefix) {
|
||||
e.preventDefault()
|
||||
const {dispatch, currentPath, web, currentBucket} = this.props
|
||||
@@ -231,7 +234,7 @@ export default class Browse extends React.Component {
|
||||
})
|
||||
.then(() => {
|
||||
this.hideDeleteConfirmation()
|
||||
dispatch(actions.selectPrefix(currentPath))
|
||||
dispatch(actions.removeObject(deleteConfirmation.object))
|
||||
})
|
||||
.catch(e => dispatch(actions.showAlert({
|
||||
type: 'danger',
|
||||
@@ -296,11 +299,6 @@ export default class Browse extends React.Component {
|
||||
browserHistory.push(`${minioBrowserPrefix}/login`)
|
||||
}
|
||||
|
||||
landingPage(e) {
|
||||
e.preventDefault()
|
||||
this.props.dispatch(actions.selectBucket(this.props.buckets[0]))
|
||||
}
|
||||
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
@@ -365,7 +363,6 @@ export default class Browse extends React.Component {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
render() {
|
||||
const {total, free} = this.props.storageInfo
|
||||
const {showMakeBucketModal, alert, sortNameOrder, sortSizeOrder, sortDateOrder, showAbout, showBucketPolicy} = this.props
|
||||
@@ -375,7 +372,7 @@ export default class Browse extends React.Component {
|
||||
const {policies, currentBucket, currentPath} = this.props
|
||||
const {deleteConfirmation} = this.props
|
||||
const {shareObject} = this.props
|
||||
const {web, prefixWritable} = this.props
|
||||
const {web, prefixWritable, istruncated} = this.props
|
||||
|
||||
// Don't always show the SettingsModal. This is done here instead of in
|
||||
// SettingsModal.js so as to allow for #componentWillMount to handle
|
||||
@@ -414,10 +411,10 @@ export default class Browse extends React.Component {
|
||||
let freePercent = free * 100 / total
|
||||
|
||||
if (web.LoggedIn()) {
|
||||
browserDropdownButton = <BrowserDropdown fullScreen={ this.fullScreen.bind(this) }
|
||||
showAbout={ this.showAbout.bind(this) }
|
||||
showSettings={ this.showSettings.bind(this) }
|
||||
logout={ this.logout.bind(this) } />
|
||||
browserDropdownButton = <BrowserDropdown fullScreenFunc={ this.fullScreen.bind(this) }
|
||||
aboutFunc={ this.showAbout.bind(this) }
|
||||
settingsFunc={ this.showSettings.bind(this) }
|
||||
logoutFunc={ this.logout.bind(this) } />
|
||||
} else {
|
||||
loginButton = <a className='btn btn-danger' href='/minio/login'>Login</a>
|
||||
}
|
||||
@@ -481,7 +478,6 @@ export default class Browse extends React.Component {
|
||||
</OverlayTrigger>
|
||||
</Dropdown.Menu>
|
||||
</Dropdown>
|
||||
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -489,8 +485,7 @@ export default class Browse extends React.Component {
|
||||
'file-explorer': true,
|
||||
'toggled': sidebarStatus
|
||||
}) }>
|
||||
<SideBar landingPage={ this.landingPage.bind(this) }
|
||||
searchBuckets={ this.searchBuckets.bind(this) }
|
||||
<SideBar searchBuckets={ this.searchBuckets.bind(this) }
|
||||
selectBucket={ this.selectBucket.bind(this) }
|
||||
clickOutside={ this.hideSidebar.bind(this) }
|
||||
showPolicy={ this.showBucketPolicy.bind(this) } />
|
||||
@@ -551,10 +546,18 @@ export default class Browse extends React.Component {
|
||||
</header>
|
||||
</div>
|
||||
<div className="feb-container">
|
||||
<ObjectsList dataType={ this.dataType.bind(this) }
|
||||
selectPrefix={ this.selectPrefix.bind(this) }
|
||||
showDeleteConfirmation={ this.showDeleteConfirmation.bind(this) }
|
||||
shareObject={ this.shareObject.bind(this) } />
|
||||
<InfiniteScroll loadMore={ this.listObjects.bind(this) }
|
||||
hasMore={ istruncated }
|
||||
useWindow={ true }
|
||||
initialLoad={ false }>
|
||||
<ObjectsList dataType={ this.dataType.bind(this) }
|
||||
selectPrefix={ this.selectPrefix.bind(this) }
|
||||
showDeleteConfirmation={ this.showDeleteConfirmation.bind(this) }
|
||||
shareObject={ this.shareObject.bind(this) } />
|
||||
</InfiniteScroll>
|
||||
<div className="text-center" style={ { display: istruncated ? 'block' : 'none' } }>
|
||||
<span>Loading...</span>
|
||||
</div>
|
||||
</div>
|
||||
<UploadModal />
|
||||
{ createButton }
|
||||
|
||||
@@ -18,7 +18,7 @@ import React from 'react'
|
||||
import connect from 'react-redux/lib/components/connect'
|
||||
import Dropdown from 'react-bootstrap/lib/Dropdown'
|
||||
|
||||
let BrowserDropdown = ({fullScreen, showAbout, showSettings, logout}) => {
|
||||
let BrowserDropdown = ({fullScreenFunc, aboutFunc, settingsFunc, logoutFunc}) => {
|
||||
return (
|
||||
<li>
|
||||
<Dropdown pullRight id="top-right-menu">
|
||||
@@ -30,7 +30,7 @@ let BrowserDropdown = ({fullScreen, showAbout, showSettings, logout}) => {
|
||||
<a target="_blank" href="https://github.com/minio/miniobrowser">Github <i className="fa fa-github"></i></a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={ fullScreen }>Fullscreen <i className="fa fa-expand"></i></a>
|
||||
<a href="" onClick={ fullScreenFunc }>Fullscreen <i className="fa fa-expand"></i></a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.minio.io/">Documentation <i className="fa fa-book"></i></a>
|
||||
@@ -39,13 +39,13 @@ let BrowserDropdown = ({fullScreen, showAbout, showSettings, logout}) => {
|
||||
<a target="_blank" href="https://slack.minio.io">Ask for help <i className="fa fa-question-circle"></i></a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={ showAbout }>About <i className="fa fa-info-circle"></i></a>
|
||||
<a href="" onClick={ aboutFunc }>About <i className="fa fa-info-circle"></i></a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={ showSettings }>Settings <i className="fa fa-cog"></i></a>
|
||||
<a href="" onClick={ settingsFunc }>Settings <i className="fa fa-cog"></i></a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={ logout }>Sign Out <i className="fa fa-sign-out"></i></a>
|
||||
<a href="" onClick={ logoutFunc }>Sign Out <i className="fa fa-sign-out"></i></a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
</Dropdown>
|
||||
|
||||
@@ -29,7 +29,7 @@ let Path = ({currentBucket, currentPath, selectPrefix}) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<h2><span className="main"><a onClick={ (e) => selectPrefix(e, '') } href="">{ currentBucket }</a></span>{ path }</h2>
|
||||
<h2><span className="main"><a onClick={ (e) => selectPrefix(e, '') } href="">{ currentBucket }</a></span>{ path }</h2>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import connect from 'react-redux/lib/components/connect'
|
||||
|
||||
import logo from '../../img/logo.svg'
|
||||
|
||||
let SideBar = ({visibleBuckets, loadBucket, currentBucket, selectBucket, searchBuckets, landingPage, sidebarStatus, clickOutside, showPolicy}) => {
|
||||
let SideBar = ({visibleBuckets, loadBucket, currentBucket, selectBucket, searchBuckets, sidebarStatus, clickOutside, showPolicy}) => {
|
||||
|
||||
const list = visibleBuckets.map((bucket, i) => {
|
||||
return <li className={ classNames({
|
||||
@@ -44,8 +44,8 @@ let SideBar = ({visibleBuckets, loadBucket, currentBucket, selectBucket, searchB
|
||||
'toggled': sidebarStatus
|
||||
}) }>
|
||||
<div className="fes-header clearfix hidden-sm hidden-xs">
|
||||
<a href="" onClick={ landingPage }><img src={ logo } alt="" />
|
||||
<h2>Minio Browser</h2></a>
|
||||
<img src={ logo } alt="" />
|
||||
<h2>Minio Browser</h2>
|
||||
</div>
|
||||
<div className="fes-list">
|
||||
<div className="input-group ig-dark ig-left ig-search" style={ { display: web.LoggedIn() ? 'block' : 'none' } }>
|
||||
|
||||
@@ -21,6 +21,7 @@ export default (state = {
|
||||
buckets: [],
|
||||
visibleBuckets: [],
|
||||
objects: [],
|
||||
istruncated: true,
|
||||
storageInfo: {},
|
||||
serverInfo: {},
|
||||
currentBucket: '',
|
||||
@@ -76,7 +77,15 @@ export default (state = {
|
||||
newState.currentBucket = action.currentBucket
|
||||
break
|
||||
case actions.SET_OBJECTS:
|
||||
newState.objects = action.objects
|
||||
if (!action.objects.length) {
|
||||
newState.objects = []
|
||||
newState.marker = ""
|
||||
newState.istruncated = action.istruncated
|
||||
} else {
|
||||
newState.objects = [...newState.objects, ...action.objects]
|
||||
newState.marker = action.marker
|
||||
newState.istruncated = action.istruncated
|
||||
}
|
||||
break
|
||||
case actions.SET_CURRENT_PATH:
|
||||
newState.currentPath = action.currentPath
|
||||
@@ -171,6 +180,11 @@ export default (state = {
|
||||
case actions.SET_PREFIX_WRITABLE:
|
||||
newState.prefixWritable = action.prefixWritable
|
||||
break
|
||||
case actions.REMOVE_OBJECT:
|
||||
let idx = newState.objects.findIndex(object => object.name === action.object)
|
||||
if (idx == -1) break
|
||||
newState.objects = [...newState.objects.slice(0, idx), ...newState.objects.slice(idx + 1)]
|
||||
break
|
||||
}
|
||||
return newState
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
"copy-webpack-plugin": "^0.3.3",
|
||||
"css-loader": "^0.23.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"expect": "^1.20.2",
|
||||
"history": "^1.17.0",
|
||||
@@ -77,6 +78,7 @@
|
||||
"react-custom-scrollbars": "^2.2.2",
|
||||
"react-dom": "^0.14.6",
|
||||
"react-dropzone": "^3.5.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-onclickout": "2.0.4"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -694,6 +694,10 @@ babylon@^6.11.0, babylon@^6.15.0, babylon@^6.8.0:
|
||||
version "6.15.0"
|
||||
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e"
|
||||
|
||||
babylon@6.14.1:
|
||||
version "6.14.1"
|
||||
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.14.1.tgz#956275fab72753ad9b3435d7afe58f8bf0a29815"
|
||||
|
||||
backo2@1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
|
||||
@@ -756,7 +760,7 @@ bluebird@^2.10.2, bluebird@^2.9.27:
|
||||
version "2.11.0"
|
||||
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
|
||||
|
||||
bluebird@^3.4.7:
|
||||
bluebird@^3.0.5, bluebird@^3.4.7:
|
||||
version "3.4.7"
|
||||
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.4.7.tgz#f72d760be09b7f76d08ed8fae98b289a8d05fab3"
|
||||
|
||||
@@ -1086,6 +1090,13 @@ concat-map@0.0.1:
|
||||
version "0.0.1"
|
||||
resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
|
||||
|
||||
config-chain@~1.1.5:
|
||||
version "1.1.11"
|
||||
resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
|
||||
dependencies:
|
||||
ini "^1.3.4"
|
||||
proto-list "~1.2.1"
|
||||
|
||||
connect-history-api-fallback@^1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.3.0.tgz#e51d17f8f0ef0db90a64fdb47de3051556e9f169"
|
||||
@@ -1462,6 +1473,15 @@ ecc-jsbn@~0.1.1:
|
||||
dependencies:
|
||||
jsbn "~0.1.0"
|
||||
|
||||
editorconfig@^0.13.2:
|
||||
version "0.13.2"
|
||||
resolved "https://registry.yarnpkg.com/editorconfig/-/editorconfig-0.13.2.tgz#8e57926d9ee69ab6cb999f027c2171467acceb35"
|
||||
dependencies:
|
||||
bluebird "^3.0.5"
|
||||
commander "^2.9.0"
|
||||
lru-cache "^3.2.0"
|
||||
sigmund "^1.0.1"
|
||||
|
||||
ee-first@1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
|
||||
@@ -1571,6 +1591,10 @@ escape-string-regexp@^1.0.2, escape-string-regexp@1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1"
|
||||
|
||||
esformatter-ignore@^0.1.3:
|
||||
version "0.1.3"
|
||||
resolved "https://registry.yarnpkg.com/esformatter-ignore/-/esformatter-ignore-0.1.3.tgz#04d3b875bfa49dde004cc58df6f6bbc3c0567f1e"
|
||||
|
||||
esformatter-jsx-ignore@^1.0.6:
|
||||
version "1.0.6"
|
||||
resolved "https://registry.yarnpkg.com/esformatter-jsx-ignore/-/esformatter-jsx-ignore-1.0.6.tgz#e594f6b77db6f85d8c1179ae6dc465756d422489"
|
||||
@@ -1578,6 +1602,15 @@ esformatter-jsx-ignore@^1.0.6:
|
||||
esprima-fb "^12001.1.0-dev-harmony-fb"
|
||||
fresh-falafel "^0.2.6"
|
||||
|
||||
esformatter-jsx@^7.4.1:
|
||||
version "7.4.1"
|
||||
resolved "https://registry.yarnpkg.com/esformatter-jsx/-/esformatter-jsx-7.4.1.tgz#b2209ae0908f413a747b1205727cbf4ba4249602"
|
||||
dependencies:
|
||||
babylon "6.14.1"
|
||||
esformatter-ignore "^0.1.3"
|
||||
extend "3.0.0"
|
||||
js-beautify "1.6.4"
|
||||
|
||||
esformatter-parser@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/esformatter-parser/-/esformatter-parser-1.0.0.tgz#0854072d0487539ed39cae38d8a5432c17ec11d3"
|
||||
@@ -2200,7 +2233,7 @@ inherits@2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1"
|
||||
|
||||
ini@~1.3.0:
|
||||
ini@^1.3.4, ini@~1.3.0:
|
||||
version "1.3.4"
|
||||
resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
|
||||
|
||||
@@ -2428,6 +2461,15 @@ js-base64@^2.1.9:
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce"
|
||||
|
||||
js-beautify@1.6.4:
|
||||
version "1.6.4"
|
||||
resolved "https://registry.yarnpkg.com/js-beautify/-/js-beautify-1.6.4.tgz#a9af79699742ac9a1b6fddc1fdbc78bc4d515fc3"
|
||||
dependencies:
|
||||
config-chain "~1.1.5"
|
||||
editorconfig "^0.13.2"
|
||||
mkdirp "~0.5.0"
|
||||
nopt "~3.0.1"
|
||||
|
||||
js-tokens@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.0.tgz#a2f2a969caae142fb3cd56228358c89366957bd1"
|
||||
@@ -2684,6 +2726,12 @@ lower-case@^1.1.1:
|
||||
version "1.1.3"
|
||||
resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.3.tgz#c92393d976793eee5ba4edb583cf8eae35bd9bfb"
|
||||
|
||||
lru-cache@^3.2.0:
|
||||
version "3.2.0"
|
||||
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-3.2.0.tgz#71789b3b7f5399bec8565dda38aa30d2a097efee"
|
||||
dependencies:
|
||||
pseudomap "^1.0.1"
|
||||
|
||||
lru-cache@2:
|
||||
version "2.7.3"
|
||||
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
|
||||
@@ -2902,7 +2950,7 @@ node-pre-gyp@^0.6.29:
|
||||
tar "~2.2.1"
|
||||
tar-pack "~3.3.0"
|
||||
|
||||
nopt@~3.0.6:
|
||||
nopt@~3.0.1, nopt@~3.0.6:
|
||||
version "3.0.6"
|
||||
resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
|
||||
dependencies:
|
||||
@@ -3447,6 +3495,10 @@ promise@^7.0.3, promise@^7.1.1:
|
||||
dependencies:
|
||||
asap "~2.0.3"
|
||||
|
||||
proto-list@~1.2.1:
|
||||
version "1.2.4"
|
||||
resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
|
||||
|
||||
protochain@^1.0.5:
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/protochain/-/protochain-1.0.5.tgz#991c407e99de264aadf8f81504b5e7faf7bfa260"
|
||||
@@ -3462,6 +3514,10 @@ prr@~0.0.0:
|
||||
version "0.0.0"
|
||||
resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
|
||||
|
||||
pseudomap@^1.0.1:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
|
||||
|
||||
punycode@^1.2.4, punycode@^1.4.1:
|
||||
version "1.4.1"
|
||||
resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
|
||||
@@ -3609,6 +3665,10 @@ react-dropzone@^3.5.3:
|
||||
dependencies:
|
||||
attr-accept "^1.0.3"
|
||||
|
||||
react-infinite-scroller@^1.0.6:
|
||||
version "1.0.6"
|
||||
resolved "https://registry.yarnpkg.com/react-infinite-scroller/-/react-infinite-scroller-1.0.6.tgz#bb406d70032d09fa9e4a3e2175d3adbf4a3f559d"
|
||||
|
||||
react-onclickout@2.0.4:
|
||||
version "2.0.4"
|
||||
resolved "https://registry.yarnpkg.com/react-onclickout/-/react-onclickout-2.0.4.tgz#2c7539a647e1dcdcab0b28e2f4eae3c3e00f0c64"
|
||||
@@ -4039,7 +4099,7 @@ shelljs@^0.7.0:
|
||||
interpret "^1.0.0"
|
||||
rechoir "^0.6.2"
|
||||
|
||||
sigmund@~1.0.0:
|
||||
sigmund@^1.0.1, sigmund@~1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ assert_is_supported_arch() {
|
||||
|
||||
assert_is_supported_os() {
|
||||
case "${KNAME}" in
|
||||
Linux | FreeBSD )
|
||||
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly )
|
||||
return
|
||||
;;
|
||||
Darwin )
|
||||
@@ -113,7 +113,7 @@ assert_is_supported_os() {
|
||||
*)
|
||||
echo "ERROR"
|
||||
echo "OS '${KNAME}' is not supported."
|
||||
echo "Supported OS: [Linux, FreeBSD, Darwin]"
|
||||
echo "Supported OS: [Linux, FreeBSD, OpenBSD, NetBSD, Darwin, DragonFly]"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -36,14 +35,14 @@ type mgmtQueryKey string
|
||||
|
||||
// Only valid query params for list/clear locks management APIs.
|
||||
const (
|
||||
mgmtBucket mgmtQueryKey = "bucket"
|
||||
mgmtObject mgmtQueryKey = "object"
|
||||
mgmtPrefix mgmtQueryKey = "prefix"
|
||||
mgmtOlderThan mgmtQueryKey = "older-than"
|
||||
mgmtDelimiter mgmtQueryKey = "delimiter"
|
||||
mgmtMarker mgmtQueryKey = "marker"
|
||||
mgmtMaxKey mgmtQueryKey = "max-key"
|
||||
mgmtDryRun mgmtQueryKey = "dry-run"
|
||||
mgmtBucket mgmtQueryKey = "bucket"
|
||||
mgmtObject mgmtQueryKey = "object"
|
||||
mgmtPrefix mgmtQueryKey = "prefix"
|
||||
mgmtLockDuration mgmtQueryKey = "duration"
|
||||
mgmtDelimiter mgmtQueryKey = "delimiter"
|
||||
mgmtMarker mgmtQueryKey = "marker"
|
||||
mgmtMaxKey mgmtQueryKey = "max-key"
|
||||
mgmtDryRun mgmtQueryKey = "dry-run"
|
||||
)
|
||||
|
||||
// ServerVersion - server version
|
||||
@@ -54,8 +53,8 @@ type ServerVersion struct {
|
||||
|
||||
// ServerStatus - contains the response of service status API
|
||||
type ServerStatus struct {
|
||||
StorageInfo StorageInfo `json:"storageInfo"`
|
||||
ServerVersion ServerVersion `json:"serverVersion"`
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
}
|
||||
|
||||
// ServiceStatusHandler - GET /?service
|
||||
@@ -70,15 +69,22 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch storage backend information
|
||||
storageInfo := newObjectLayerFn().StorageInfo()
|
||||
// Fetch server version
|
||||
serverVersion := ServerVersion{Version: Version, CommitID: CommitID}
|
||||
|
||||
// Fetch uptimes from all peers. This may fail to due to lack
|
||||
// of read-quorum availability.
|
||||
uptime, err := getPeerUptimes(globalAdminPeers)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
errorIf(err, "Possibly failed to get uptime from majority of servers.")
|
||||
return
|
||||
}
|
||||
|
||||
// Create API response
|
||||
serverStatus := ServerStatus{
|
||||
StorageInfo: storageInfo,
|
||||
ServerVersion: serverVersion,
|
||||
Uptime: uptime,
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
@@ -131,8 +137,8 @@ func (adminAPI adminAPIHandlers) ServiceCredentialsHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Avoid setting new credentials when they are already passed
|
||||
// by the environnement
|
||||
if globalEnvAccessKey != "" || globalEnvSecretKey != "" {
|
||||
// by the environment.
|
||||
if globalIsEnvCreds {
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -154,24 +160,25 @@ func (adminAPI adminAPIHandlers) ServiceCredentialsHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Check passed credentials
|
||||
cred, err := getCredential(req.Username, req.Password)
|
||||
switch err {
|
||||
case errInvalidAccessKeyLength:
|
||||
writeErrorResponse(w, ErrAdminInvalidAccessKey, r.URL)
|
||||
return
|
||||
case errInvalidSecretKeyLength:
|
||||
writeErrorResponse(w, ErrAdminInvalidSecretKey, r.URL)
|
||||
err = validateAuthKeys(req.Username, req.Password)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
creds := credential{
|
||||
AccessKey: req.Username,
|
||||
SecretKey: req.Password,
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to update credentials
|
||||
updateErrs := updateCredsOnPeers(cred)
|
||||
updateErrs := updateCredsOnPeers(creds)
|
||||
for peer, err := range updateErrs {
|
||||
errorIf(err, "Unable to update credentials on peer %s.", peer)
|
||||
}
|
||||
|
||||
// Update local credentials
|
||||
serverConfig.SetCredential(cred)
|
||||
// Update local credentials in memory.
|
||||
serverConfig.SetCredential(creds)
|
||||
if err = serverConfig.Save(); err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
return
|
||||
@@ -181,11 +188,103 @@ func (adminAPI adminAPIHandlers) ServiceCredentialsHandler(w http.ResponseWriter
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// ServerProperties holds some server information such as, version, region
|
||||
// uptime, etc..
|
||||
type ServerProperties struct {
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
Version string `json:"version"`
|
||||
CommitID string `json:"commitID"`
|
||||
Region string `json:"region"`
|
||||
SQSARN []string `json:"sqsARN"`
|
||||
}
|
||||
|
||||
// ServerConnStats holds transferred bytes from/to the server
|
||||
type ServerConnStats struct {
|
||||
TotalInputBytes uint64 `json:"transferred"`
|
||||
TotalOutputBytes uint64 `json:"received"`
|
||||
Throughput uint64 `json:"throughput,omitempty"`
|
||||
}
|
||||
|
||||
// ServerInfo holds the information that will be returned by ServerInfo API
|
||||
type ServerInfo struct {
|
||||
StorageInfo StorageInfo `json:"storage"`
|
||||
ConnStats ServerConnStats `json:"network"`
|
||||
Properties ServerProperties `json:"server"`
|
||||
}
|
||||
|
||||
// ServerInfoHandler - GET /?server-info
|
||||
// ----------
|
||||
// Get server information
|
||||
func (adminAPI adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Authenticate request
|
||||
adminAPIErr := checkRequestAuthType(r, "", "", "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponse(w, adminAPIErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Build storage info
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
storage := objLayer.StorageInfo()
|
||||
|
||||
// Build list of enabled ARNs queues
|
||||
var arns []string
|
||||
for queueArn := range globalEventNotifier.GetAllExternalTargets() {
|
||||
arns = append(arns, queueArn)
|
||||
}
|
||||
|
||||
// Fetch uptimes from all peers. This may fail to due to lack
|
||||
// of read-quorum availability.
|
||||
uptime, err := getPeerUptimes(globalAdminPeers)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
errorIf(err, "Unable to get uptime from majority of servers.")
|
||||
return
|
||||
}
|
||||
|
||||
// Build server properties information
|
||||
properties := ServerProperties{
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Region: serverConfig.GetRegion(),
|
||||
SQSARN: arns,
|
||||
Uptime: uptime,
|
||||
}
|
||||
|
||||
// Build network info
|
||||
connStats := ServerConnStats{
|
||||
TotalInputBytes: globalConnStats.getTotalInputBytes(),
|
||||
TotalOutputBytes: globalConnStats.getTotalOutputBytes(),
|
||||
}
|
||||
|
||||
// Build the whole returned information
|
||||
info := ServerInfo{
|
||||
StorageInfo: storage,
|
||||
ConnStats: connStats,
|
||||
Properties: properties,
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
errorIf(err, "Failed to marshal storage info into json.")
|
||||
return
|
||||
}
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
// validateLockQueryParams - Validates query params for list/clear locks management APIs.
|
||||
func validateLockQueryParams(vars url.Values) (string, string, time.Duration, APIErrorCode) {
|
||||
bucket := vars.Get(string(mgmtBucket))
|
||||
prefix := vars.Get(string(mgmtPrefix))
|
||||
relTimeStr := vars.Get(string(mgmtOlderThan))
|
||||
durationStr := vars.Get(string(mgmtLockDuration))
|
||||
|
||||
// N B empty bucket name is invalid
|
||||
if !IsValidBucketName(bucket) {
|
||||
@@ -198,24 +297,24 @@ func validateLockQueryParams(vars url.Values) (string, string, time.Duration, AP
|
||||
|
||||
// If older-than parameter was empty then set it to 0s to list
|
||||
// all locks older than now.
|
||||
if relTimeStr == "" {
|
||||
relTimeStr = "0s"
|
||||
if durationStr == "" {
|
||||
durationStr = "0s"
|
||||
}
|
||||
relTime, err := time.ParseDuration(relTimeStr)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
errorIf(err, "Failed to parse duration passed as query value.")
|
||||
return "", "", time.Duration(0), ErrInvalidDuration
|
||||
}
|
||||
|
||||
return bucket, prefix, relTime, ErrNone
|
||||
return bucket, prefix, duration, ErrNone
|
||||
}
|
||||
|
||||
// ListLocksHandler - GET /?lock&bucket=mybucket&prefix=myprefix&older-than=rel_time
|
||||
// ListLocksHandler - GET /?lock&bucket=mybucket&prefix=myprefix&duration=duration
|
||||
// - bucket is a mandatory query parameter
|
||||
// - prefix and older-than are optional query parameters
|
||||
// HTTP header x-minio-operation: list
|
||||
// ---------
|
||||
// Lists locks held on a given bucket, prefix and relative time.
|
||||
// Lists locks held on a given bucket, prefix and duration it was held for.
|
||||
func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
adminAPIErr := checkRequestAuthType(r, "", "", "")
|
||||
if adminAPIErr != ErrNone {
|
||||
@@ -224,15 +323,15 @@ func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
vars := r.URL.Query()
|
||||
bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars)
|
||||
bucket, prefix, duration, adminAPIErr := validateLockQueryParams(vars)
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponse(w, adminAPIErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch lock information of locks matching bucket/prefix that
|
||||
// are available since relTime.
|
||||
volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime)
|
||||
// are available for longer than duration.
|
||||
volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, duration)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
errorIf(err, "Failed to fetch lock information from remote nodes.")
|
||||
@@ -248,16 +347,16 @@ func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Reply with list of locks held on bucket, matching prefix
|
||||
// older than relTime supplied, as json.
|
||||
// held longer than duration supplied, as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
// ClearLocksHandler - POST /?lock&bucket=mybucket&prefix=myprefix&older-than=relTime
|
||||
// ClearLocksHandler - POST /?lock&bucket=mybucket&prefix=myprefix&duration=duration
|
||||
// - bucket is a mandatory query parameter
|
||||
// - prefix and older-than are optional query parameters
|
||||
// HTTP header x-minio-operation: clear
|
||||
// ---------
|
||||
// Clear locks held on a given bucket, prefix and relative time.
|
||||
// Clear locks held on a given bucket, prefix and duration it was held for.
|
||||
func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
adminAPIErr := checkRequestAuthType(r, "", "", "")
|
||||
if adminAPIErr != ErrNone {
|
||||
@@ -266,15 +365,15 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
vars := r.URL.Query()
|
||||
bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars)
|
||||
bucket, prefix, duration, adminAPIErr := validateLockQueryParams(vars)
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponse(w, adminAPIErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch lock information of locks matching bucket/prefix that
|
||||
// are available since relTime.
|
||||
volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime)
|
||||
// are held for longer than duration.
|
||||
volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, duration)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
errorIf(err, "Failed to fetch lock information from remote nodes.")
|
||||
@@ -289,7 +388,7 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Remove lock matching bucket/prefix older than relTime.
|
||||
// Remove lock matching bucket/prefix held longer than duration.
|
||||
for _, volLock := range volLocks {
|
||||
globalNSMutex.ForceUnlock(volLock.Bucket, volLock.Object)
|
||||
}
|
||||
@@ -541,7 +640,6 @@ func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *htt
|
||||
// Create a new set of storage instances to heal format.json.
|
||||
bootstrapDisks, err := initStorageDisks(globalEndpoints)
|
||||
if err != nil {
|
||||
fmt.Println(traceError(err))
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -549,7 +647,6 @@ func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *htt
|
||||
// Heal format.json on available storage.
|
||||
err = healFormatXL(bootstrapDisks)
|
||||
if err != nil {
|
||||
fmt.Println(traceError(err))
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -557,7 +654,6 @@ func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *htt
|
||||
// Instantiate new object layer with newly formatted storage.
|
||||
newObjectAPI, err := newXLObjects(bootstrapDisks)
|
||||
if err != nil {
|
||||
fmt.Println(traceError(err))
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
)
|
||||
@@ -43,6 +44,7 @@ type adminXLTestBed struct {
|
||||
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Initialize minio server config.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
@@ -54,6 +56,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
return nil, xlErr
|
||||
}
|
||||
|
||||
// Initialize boot time
|
||||
globalBootTime = time.Now().UTC()
|
||||
|
||||
// Set globalEndpoints for a single node XL setup.
|
||||
for _, xlDir := range xlDirs {
|
||||
globalEndpoints = append(globalEndpoints, &url.URL{
|
||||
@@ -197,7 +202,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@@ -224,14 +229,13 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
|
||||
if cmd == statusCmd {
|
||||
expectedInfo := ServerStatus{
|
||||
StorageInfo: newObjectLayerFn().StorageInfo(),
|
||||
ServerVersion: ServerVersion{Version: Version, CommitID: CommitID},
|
||||
}
|
||||
receivedInfo := ServerStatus{}
|
||||
if jsonErr := json.Unmarshal(rec.Body.Bytes(), &receivedInfo); jsonErr != nil {
|
||||
t.Errorf("Failed to unmarshal StorageInfo - %v", jsonErr)
|
||||
}
|
||||
if expectedInfo != receivedInfo {
|
||||
if expectedInfo.ServerVersion != receivedInfo.ServerVersion {
|
||||
t.Errorf("Expected storage info and received storage info differ, %v %v", expectedInfo, receivedInfo)
|
||||
}
|
||||
}
|
||||
@@ -264,7 +268,7 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@@ -294,11 +298,9 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// Set or unset environement keys
|
||||
if !testCase.EnvKeysSet {
|
||||
globalEnvAccessKey = ""
|
||||
globalEnvSecretKey = ""
|
||||
globalIsEnvCreds = false
|
||||
} else {
|
||||
globalEnvAccessKey = testCase.Username
|
||||
globalEnvSecretKey = testCase.Password
|
||||
globalIsEnvCreds = true
|
||||
}
|
||||
|
||||
// Construct setCreds request body
|
||||
@@ -336,12 +338,12 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
}
|
||||
|
||||
// mkLockQueryVal - helper function to build lock query param.
|
||||
func mkLockQueryVal(bucket, prefix, relTimeStr string) url.Values {
|
||||
func mkLockQueryVal(bucket, prefix, durationStr string) url.Values {
|
||||
qVal := url.Values{}
|
||||
qVal.Set("lock", "")
|
||||
qVal.Set(string(mgmtBucket), bucket)
|
||||
qVal.Set(string(mgmtPrefix), prefix)
|
||||
qVal.Set(string(mgmtOlderThan), relTimeStr)
|
||||
qVal.Set(string(mgmtLockDuration), durationStr)
|
||||
return qVal
|
||||
}
|
||||
|
||||
@@ -354,7 +356,7 @@ func TestListLocksHandler(t *testing.T) {
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@@ -366,41 +368,41 @@ func TestListLocksHandler(t *testing.T) {
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
prefix string
|
||||
relTime string
|
||||
duration string
|
||||
expectedStatus int
|
||||
}{
|
||||
// Test 1 - valid testcase
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myobject",
|
||||
relTime: "1s",
|
||||
duration: "1s",
|
||||
expectedStatus: http.StatusOK,
|
||||
},
|
||||
// Test 2 - invalid duration
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myprefix",
|
||||
relTime: "invalidDuration",
|
||||
duration: "invalidDuration",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 3 - invalid bucket name
|
||||
{
|
||||
bucket: `invalid\\Bucket`,
|
||||
prefix: "myprefix",
|
||||
relTime: "1h",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 4 - invalid prefix
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: `invalid\\Prefix`,
|
||||
relTime: "1h",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.relTime)
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
|
||||
req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to construct list locks request - %v", i+1, err)
|
||||
@@ -429,7 +431,7 @@ func TestClearLocksHandler(t *testing.T) {
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@@ -438,41 +440,41 @@ func TestClearLocksHandler(t *testing.T) {
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
prefix string
|
||||
relTime string
|
||||
duration string
|
||||
expectedStatus int
|
||||
}{
|
||||
// Test 1 - valid testcase
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myobject",
|
||||
relTime: "1s",
|
||||
duration: "1s",
|
||||
expectedStatus: http.StatusOK,
|
||||
},
|
||||
// Test 2 - invalid duration
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myprefix",
|
||||
relTime: "invalidDuration",
|
||||
duration: "invalidDuration",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 3 - invalid bucket name
|
||||
{
|
||||
bucket: `invalid\\Bucket`,
|
||||
prefix: "myprefix",
|
||||
relTime: "1h",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 4 - invalid prefix
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: `invalid\\Prefix`,
|
||||
relTime: "1h",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.relTime)
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
|
||||
req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to construct clear locks request - %v", i+1, err)
|
||||
|
||||
@@ -39,6 +39,9 @@ func registerAdminRouter(mux *router.Router) {
|
||||
// Service update credentials
|
||||
adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "set-credentials").HandlerFunc(adminAPI.ServiceCredentialsHandler)
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods("GET").Queries("info", "").HandlerFunc(adminAPI.ServerInfoHandler)
|
||||
|
||||
/// Lock operations
|
||||
|
||||
// List Locks
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -37,8 +38,9 @@ type remoteAdminClient struct {
|
||||
// commands like service stop and service restart.
|
||||
type adminCmdRunner interface {
|
||||
Restart() error
|
||||
ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error)
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ReInitDisks() error
|
||||
Uptime() (time.Duration, error)
|
||||
}
|
||||
|
||||
// Restart - Sends a message over channel to the go-routine
|
||||
@@ -49,8 +51,8 @@ func (lc localAdminClient) Restart() error {
|
||||
}
|
||||
|
||||
// ListLocks - Fetches lock information from local lock instrumentation.
|
||||
func (lc localAdminClient) ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) {
|
||||
return listLocksInfo(bucket, prefix, relTime), nil
|
||||
func (lc localAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
return listLocksInfo(bucket, prefix, duration), nil
|
||||
}
|
||||
|
||||
// Restart - Sends restart command to remote server via RPC.
|
||||
@@ -61,11 +63,11 @@ func (rc remoteAdminClient) Restart() error {
|
||||
}
|
||||
|
||||
// ListLocks - Sends list locks command to remote server via RPC.
|
||||
func (rc remoteAdminClient) ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) {
|
||||
func (rc remoteAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
listArgs := ListLocksQuery{
|
||||
bucket: bucket,
|
||||
prefix: prefix,
|
||||
relTime: relTime,
|
||||
bucket: bucket,
|
||||
prefix: prefix,
|
||||
duration: duration,
|
||||
}
|
||||
var reply ListLocksReply
|
||||
if err := rc.Call("Admin.ListLocks", &listArgs, &reply); err != nil {
|
||||
@@ -88,6 +90,28 @@ func (rc remoteAdminClient) ReInitDisks() error {
|
||||
return rc.Call("Admin.ReInitDisks", &args, &reply)
|
||||
}
|
||||
|
||||
// Uptime - Returns the uptime of this server. Timestamp is taken
|
||||
// after object layer is initialized.
|
||||
func (lc localAdminClient) Uptime() (time.Duration, error) {
|
||||
if globalBootTime.IsZero() {
|
||||
return time.Duration(0), errServerNotInitialized
|
||||
}
|
||||
|
||||
return time.Now().UTC().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
// Uptime - returns the uptime of the server to which the RPC call is made.
|
||||
func (rc remoteAdminClient) Uptime() (time.Duration, error) {
|
||||
args := AuthRPCArgs{}
|
||||
reply := UptimeReply{}
|
||||
err := rc.Call("Admin.Uptime", &args, &reply)
|
||||
if err != nil {
|
||||
return time.Duration(0), err
|
||||
}
|
||||
|
||||
return reply.Uptime, nil
|
||||
}
|
||||
|
||||
// adminPeer - represents an entity that implements Restart methods.
|
||||
type adminPeer struct {
|
||||
addr string
|
||||
@@ -175,8 +199,8 @@ func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
|
||||
}
|
||||
|
||||
// listPeerLocksInfo - fetch list of locks held on the given bucket,
|
||||
// matching prefix older than relTime from all peer servers.
|
||||
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) {
|
||||
// matching prefix held longer than duration from all peer servers.
|
||||
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
// Used to aggregate volume lock information from all nodes.
|
||||
allLocks := make([][]VolumeLockInfo, len(peers))
|
||||
errs := make([]error, len(peers))
|
||||
@@ -188,11 +212,11 @@ func listPeerLocksInfo(peers adminPeers, bucket, prefix string, relTime time.Dur
|
||||
go func(idx int, remotePeer adminPeer) {
|
||||
defer wg.Done()
|
||||
// `remotePeers` is right-shifted by one position relative to `peers`
|
||||
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, relTime)
|
||||
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
}(i+1, remotePeer)
|
||||
}
|
||||
wg.Wait()
|
||||
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, relTime)
|
||||
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
|
||||
// Summarizing errors received for ListLocks RPC across all
|
||||
// nodes. N B the possible unavailability of quorum in errors
|
||||
@@ -241,3 +265,74 @@ func reInitPeerDisks(peers adminPeers) error {
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// uptimeSlice - used to sort uptimes in chronological order.
|
||||
type uptimeSlice []struct {
|
||||
err error
|
||||
uptime time.Duration
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Less(i, j int) bool {
|
||||
return ts[i].uptime < ts[j].uptime
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
// getPeerUptimes - returns the uptime since the last time read quorum
|
||||
// was established on success. Otherwise returns errXLReadQuorum.
|
||||
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
// In a single node Erasure or FS backend setup the uptime of
|
||||
// the setup is the uptime of the single minio server
|
||||
// instance.
|
||||
if !globalIsDistXL {
|
||||
return time.Now().UTC().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
uptimes := make(uptimeSlice, len(peers))
|
||||
|
||||
// Get up time of all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
uptimes[idx].uptime, uptimes[idx].err = peer.cmdRunner.Uptime()
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort uptimes in chronological order.
|
||||
sort.Sort(uptimes)
|
||||
|
||||
// Pick the readQuorum'th uptime in chronological order. i.e,
|
||||
// the time at which read quorum was (re-)established.
|
||||
readQuorum := len(uptimes) / 2
|
||||
validCount := 0
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
errorIf(uptime.err, "Unable to fetch uptime")
|
||||
continue
|
||||
}
|
||||
|
||||
validCount++
|
||||
if validCount >= readQuorum {
|
||||
latestUptime = uptime.uptime
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Less than readQuorum "Admin.Uptime" RPC call returned
|
||||
// successfully, so read-quorum unavailable.
|
||||
if validCount < readQuorum {
|
||||
return time.Duration(0), InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
return latestUptime, nil
|
||||
}
|
||||
|
||||
@@ -37,9 +37,9 @@ type adminCmd struct {
|
||||
// ListLocksQuery - wraps ListLocks API's query values to send over RPC.
|
||||
type ListLocksQuery struct {
|
||||
AuthRPCArgs
|
||||
bucket string
|
||||
prefix string
|
||||
relTime time.Duration
|
||||
bucket string
|
||||
prefix string
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
// ListLocksReply - wraps ListLocks response over RPC.
|
||||
@@ -48,6 +48,12 @@ type ListLocksReply struct {
|
||||
volLocks []VolumeLockInfo
|
||||
}
|
||||
|
||||
// UptimeReply - wraps the uptime response over RPC.
|
||||
type UptimeReply struct {
|
||||
AuthRPCReply
|
||||
Uptime time.Duration
|
||||
}
|
||||
|
||||
// Restart - Restart this instance of minio server.
|
||||
func (s *adminCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
@@ -63,7 +69,7 @@ func (s *adminCmd) ListLocks(query *ListLocksQuery, reply *ListLocksReply) error
|
||||
if err := query.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
volLocks := listLocksInfo(query.bucket, query.prefix, query.relTime)
|
||||
volLocks := listLocksInfo(query.bucket, query.prefix, query.duration)
|
||||
*reply = ListLocksReply{volLocks: volLocks}
|
||||
return nil
|
||||
}
|
||||
@@ -105,6 +111,27 @@ func (s *adminCmd) ReInitDisks(args *AuthRPCArgs, reply *AuthRPCReply) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uptime - returns the time when object layer was initialized on this server.
|
||||
func (s *adminCmd) Uptime(args *AuthRPCArgs, reply *UptimeReply) error {
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalBootTime.IsZero() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// N B The uptime is computed assuming that the system time is
|
||||
// monotonic. This is not the case in time pkg in Go, see
|
||||
// https://github.com/golang/go/issues/12914. This is expected
|
||||
// to be fixed by go1.9.
|
||||
*reply = UptimeReply{
|
||||
Uptime: time.Now().UTC().Sub(globalBootTime),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerAdminRPCRouter - registers RPC methods for service status,
|
||||
// stop and restart commands.
|
||||
func registerAdminRPCRouter(mux *router.Router) error {
|
||||
|
||||
@@ -83,6 +83,7 @@ const (
|
||||
ErrInvalidPartOrder
|
||||
ErrAuthorizationHeaderMalformed
|
||||
ErrMalformedPOSTRequest
|
||||
ErrPOSTFileRequired
|
||||
ErrSignatureVersionNotSupported
|
||||
ErrBucketNotEmpty
|
||||
ErrAllAccessDisabled
|
||||
@@ -333,6 +334,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The body of your POST request is not well-formed multipart/form-data.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPOSTFileRequired: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "POST requires exactly one file upload per request.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrSignatureVersionNotSupported: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
|
||||
@@ -483,7 +489,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
},
|
||||
ErrInvalidDuration: {
|
||||
Code: "InvalidDuration",
|
||||
Description: "Relative duration provided in the request is invalid.",
|
||||
Description: "Duration provided in the request is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
@@ -606,6 +612,14 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errContentSHA256Mismatch:
|
||||
apiErr = ErrContentSHA256Mismatch
|
||||
case errDataTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case errInvalidSecretKeyLength:
|
||||
apiErr = ErrAdminInvalidSecretKey
|
||||
}
|
||||
|
||||
if apiErr != ErrNone {
|
||||
|
||||
@@ -208,6 +208,13 @@ type CopyObjectResponse struct {
|
||||
ETag string // md5sum of the copied object.
|
||||
}
|
||||
|
||||
// CopyObjectPartResponse container returns ETag and LastModified of the successfully copied object
|
||||
type CopyObjectPartResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
|
||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
ETag string // md5sum of the copied object part.
|
||||
}
|
||||
|
||||
// Initiator inherit from Owner struct, fields are same
|
||||
type Initiator Owner
|
||||
|
||||
@@ -399,6 +406,14 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
}
|
||||
}
|
||||
|
||||
// generates CopyObjectPartResponse from etag and lastModified time.
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
}
|
||||
}
|
||||
|
||||
// generates InitiateMultipartUploadResponse for given bucket, key and uploadID.
|
||||
func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse {
|
||||
return InitiateMultipartUploadResponse{
|
||||
|
||||
@@ -40,6 +40,8 @@ func registerAPIRouter(mux *router.Router) {
|
||||
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(api.HeadObjectHandler)
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectPxarts
|
||||
|
||||
@@ -125,7 +125,8 @@ func checkRequestAuthType(r *http.Request, bucket, policyAction, region string)
|
||||
|
||||
if reqAuthType == authTypeAnonymous && policyAction != "" {
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
|
||||
return enforceBucketPolicy(bucket, policyAction, r.URL)
|
||||
return enforceBucketPolicy(bucket, policyAction, r.URL.Path,
|
||||
r.Referer(), r.URL.Query())
|
||||
}
|
||||
|
||||
// By default return ErrAccessDenied
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@@ -53,7 +53,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@@ -68,7 +68,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@@ -83,7 +83,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||
@@ -96,7 +96,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@@ -315,7 +315,8 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
defer removeAll(path)
|
||||
|
||||
serverConfig.SetCredential(credential{"myuser", "mypassword"})
|
||||
creds := newCredentialWithKeys("myuser", "mypassword")
|
||||
serverConfig.SetCredential(creds)
|
||||
|
||||
// List of test cases for validating http request authentication.
|
||||
testCases := []struct {
|
||||
@@ -325,11 +326,11 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
// When request is nil, internal error is returned.
|
||||
{nil, ErrInternalError},
|
||||
// When request is unsigned, access denied is returned.
|
||||
{mustNewRequest("GET", "http://localhost:9000", 0, nil, t), ErrAccessDenied},
|
||||
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
// When request is properly signed, but has bad Content-MD5 header.
|
||||
{mustNewSignedRequest("PUT", "http://localhost:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
{mustNewSignedRequest("PUT", "http://127.0.0.1:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
// When request is properly signed, error is none.
|
||||
{mustNewSignedRequest("GET", "http://localhost:9000", 0, nil, t), ErrNone},
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
// Validates all testcases.
|
||||
|
||||
@@ -35,6 +35,19 @@ type authConfig struct {
|
||||
secureConn bool // Make TLS connection to RPC server or not.
|
||||
serviceName string // Service name of auth server.
|
||||
disableReconnect bool // Disable reconnect on failure or not.
|
||||
|
||||
/// Retry configurable values.
|
||||
|
||||
// Each retry unit multiplicative, measured in time.Duration.
|
||||
// This is the basic unit used for calculating backoffs.
|
||||
retryUnit time.Duration
|
||||
// Maximum retry duration i.e A caller would wait no more than this
|
||||
// duration to continue their loop.
|
||||
retryCap time.Duration
|
||||
|
||||
// Maximum retries an call authRPC client would do for a failed
|
||||
// RPC call.
|
||||
retryAttemptThreshold int
|
||||
}
|
||||
|
||||
// AuthRPCClient is a authenticated RPC client which does authentication before doing Call().
|
||||
@@ -47,6 +60,18 @@ type AuthRPCClient struct {
|
||||
|
||||
// newAuthRPCClient - returns a JWT based authenticated (go) rpc client, which does automatic reconnect.
|
||||
func newAuthRPCClient(config authConfig) *AuthRPCClient {
|
||||
// Check if retry params are set properly if not default them.
|
||||
emptyDuration := time.Duration(int64(0))
|
||||
if config.retryUnit == emptyDuration {
|
||||
config.retryUnit = defaultRetryUnit
|
||||
}
|
||||
if config.retryCap == emptyDuration {
|
||||
config.retryCap = defaultRetryCap
|
||||
}
|
||||
if config.retryAttemptThreshold == 0 {
|
||||
config.retryAttemptThreshold = globalAuthRPCRetryThreshold
|
||||
}
|
||||
|
||||
return &AuthRPCClient{
|
||||
rpcClient: newRPCClient(config.serverAddr, config.serviceEndpoint, config.secureConn),
|
||||
config: config,
|
||||
@@ -105,9 +130,13 @@ func (authClient *AuthRPCClient) Call(serviceMethod string, args interface {
|
||||
SetAuthToken(authToken string)
|
||||
SetRequestTime(requestTime time.Time)
|
||||
}, reply interface{}) (err error) {
|
||||
|
||||
// Done channel is used to close any lingering retry routine, as soon
|
||||
// as this function returns.
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
for i := range newRetryTimer(time.Second, 30*time.Second, MaxJitter, doneCh) {
|
||||
|
||||
for i := range newRetryTimer(authClient.config.retryUnit, authClient.config.retryCap, doneCh) {
|
||||
if err = authClient.call(serviceMethod, args, reply); err == rpc.ErrShutdown {
|
||||
// As connection at server side is closed, close the rpc client.
|
||||
authClient.Close()
|
||||
@@ -115,7 +144,7 @@ func (authClient *AuthRPCClient) Call(serviceMethod string, args interface {
|
||||
// Retry if reconnect is not disabled.
|
||||
if !authClient.config.disableReconnect {
|
||||
// Retry until threshold reaches.
|
||||
if i < globalAuthRPCRetryThreshold {
|
||||
if i < authClient.config.retryAttemptThreshold {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,11 +133,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = getMD5Hash([]byte(textPartData))
|
||||
md5Sum, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["md5Sum"], sha256sum)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["md5Sum"], sha256sum)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if md5Sum != metadata["md5Sum"] {
|
||||
if partInfo.ETag != metadata["md5Sum"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, md5Sum, metadata["md5Sum"])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +63,10 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateAuthKeys(args.Creds.AccessKey, args.Creds.SecretKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update credentials in memory
|
||||
serverConfig.SetCredential(args.Creds)
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -44,7 +43,7 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
// Marker is set validate pre-condition.
|
||||
if marker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
if !hasPrefix(marker, prefix) {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
|
||||
// Enforces bucket policies for a bucket for a given tatusaction.
|
||||
func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error APIErrorCode) {
|
||||
func enforceBucketPolicy(bucket, action, resource, referer string, queryParams url.Values) (s3Error APIErrorCode) {
|
||||
// Verify if bucket actually exists
|
||||
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
|
||||
err = errorCause(err)
|
||||
@@ -57,16 +57,21 @@ func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error
|
||||
}
|
||||
|
||||
// Construct resource in 'arn:aws:s3:::examplebucket/object' format.
|
||||
resource := bucketARNPrefix + strings.TrimSuffix(strings.TrimPrefix(reqURL.Path, "/"), "/")
|
||||
arn := bucketARNPrefix + strings.TrimSuffix(strings.TrimPrefix(resource, "/"), "/")
|
||||
|
||||
// Get conditions for policy verification.
|
||||
conditionKeyMap := make(map[string]set.StringSet)
|
||||
for queryParam := range reqURL.Query() {
|
||||
conditionKeyMap[queryParam] = set.CreateStringSet(reqURL.Query().Get(queryParam))
|
||||
for queryParam := range queryParams {
|
||||
conditionKeyMap[queryParam] = set.CreateStringSet(queryParams.Get(queryParam))
|
||||
}
|
||||
|
||||
// Add request referer to conditionKeyMap if present.
|
||||
if referer != "" {
|
||||
conditionKeyMap["referer"] = set.CreateStringSet(referer)
|
||||
}
|
||||
|
||||
// Validate action, resource and conditions with current policy statements.
|
||||
if !bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements) {
|
||||
if !bucketPolicyEvalStatements(action, arn, conditionKeyMap, policy.Statements) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
return ErrNone
|
||||
@@ -160,7 +165,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
}
|
||||
if keyMarker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !strings.HasPrefix(keyMarker, prefix) {
|
||||
if !hasPrefix(keyMarker, prefix) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -388,6 +393,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
reader, err := r.MultipartReader()
|
||||
@@ -397,12 +409,34 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
fileBody, fileName, formValues, err := extractPostPolicyFormValues(reader)
|
||||
// Read multipart data and save in memory and in the disk if needed
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to initialize multipart reader.")
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove all tmp files creating during multipart upload
|
||||
defer form.RemoveAll()
|
||||
|
||||
// Extract all form fields
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse form values.")
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if file is provided, error out otherwise.
|
||||
if fileBody == nil {
|
||||
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Close multipart file
|
||||
defer fileBody.Close()
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
formValues["Bucket"] = bucket
|
||||
|
||||
@@ -438,21 +472,20 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
// Use rangeReader to ensure that object size is within expected range.
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
// If policy restricted the size of the object.
|
||||
fileBody = &rangeReader{
|
||||
Reader: fileBody,
|
||||
Min: lengthRange.Min,
|
||||
Max: lengthRange.Max,
|
||||
if fileSize < lengthRange.Min {
|
||||
errorIf(err, "Unable to create object.")
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Default values of min/max size of the object.
|
||||
fileBody = &rangeReader{
|
||||
Reader: fileBody,
|
||||
Min: 0,
|
||||
Max: maxObjectSize,
|
||||
|
||||
if fileSize > lengthRange.Max || fileSize > maxObjectSize {
|
||||
errorIf(err, "Unable to create object.")
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -465,7 +498,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
objectLock.Lock()
|
||||
defer objectLock.Unlock()
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, -1, fileBody, metadata, sha256sum)
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, fileSize, fileBody, metadata, sha256sum)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create object.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
|
||||
@@ -133,27 +133,27 @@ func isValidQueueID(queueARN string) bool {
|
||||
// Is Queue identifier valid?.
|
||||
|
||||
if isAMQPQueue(sqsARN) { // AMQP eueue.
|
||||
amqpN := serverConfig.GetAMQPNotifyByID(sqsARN.AccountID)
|
||||
amqpN := serverConfig.Notify.GetAMQPByID(sqsARN.AccountID)
|
||||
return amqpN.Enable && amqpN.URL != ""
|
||||
} else if isNATSQueue(sqsARN) {
|
||||
natsN := serverConfig.GetNATSNotifyByID(sqsARN.AccountID)
|
||||
natsN := serverConfig.Notify.GetNATSByID(sqsARN.AccountID)
|
||||
return natsN.Enable && natsN.Address != ""
|
||||
} else if isElasticQueue(sqsARN) { // Elastic queue.
|
||||
elasticN := serverConfig.GetElasticSearchNotifyByID(sqsARN.AccountID)
|
||||
elasticN := serverConfig.Notify.GetElasticSearchByID(sqsARN.AccountID)
|
||||
return elasticN.Enable && elasticN.URL != ""
|
||||
} else if isRedisQueue(sqsARN) { // Redis queue.
|
||||
redisN := serverConfig.GetRedisNotifyByID(sqsARN.AccountID)
|
||||
redisN := serverConfig.Notify.GetRedisByID(sqsARN.AccountID)
|
||||
return redisN.Enable && redisN.Addr != ""
|
||||
} else if isPostgreSQLQueue(sqsARN) {
|
||||
pgN := serverConfig.GetPostgreSQLNotifyByID(sqsARN.AccountID)
|
||||
pgN := serverConfig.Notify.GetPostgreSQLByID(sqsARN.AccountID)
|
||||
// Postgres can work with only default conn. info.
|
||||
return pgN.Enable
|
||||
} else if isKafkaQueue(sqsARN) {
|
||||
kafkaN := serverConfig.GetKafkaNotifyByID(sqsARN.AccountID)
|
||||
kafkaN := serverConfig.Notify.GetKafkaByID(sqsARN.AccountID)
|
||||
return (kafkaN.Enable && len(kafkaN.Brokers) > 0 &&
|
||||
kafkaN.Topic != "")
|
||||
} else if isWebhookQueue(sqsARN) {
|
||||
webhookN := serverConfig.GetWebhookNotifyByID(sqsARN.AccountID)
|
||||
webhookN := serverConfig.Notify.GetWebhookByID(sqsARN.AccountID)
|
||||
return webhookN.Enable && webhookN.Endpoint != ""
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
mux "github.com/gorilla/mux"
|
||||
@@ -63,6 +65,10 @@ func bucketPolicyActionMatch(action string, statement policyStatement) bool {
|
||||
|
||||
// Match function matches wild cards in 'pattern' for resource.
|
||||
func resourceMatch(pattern, resource string) bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
// For windows specifically make sure we are case insensitive.
|
||||
return wildcard.Match(strings.ToLower(pattern), strings.ToLower(resource))
|
||||
}
|
||||
return wildcard.Match(pattern, resource)
|
||||
}
|
||||
|
||||
@@ -71,6 +77,10 @@ func actionMatch(pattern, action string) bool {
|
||||
return wildcard.MatchSimple(pattern, action)
|
||||
}
|
||||
|
||||
func refererMatch(pattern, referer string) bool {
|
||||
return wildcard.MatchSimple(pattern, referer)
|
||||
}
|
||||
|
||||
// Verify if given resource matches with policy statement.
|
||||
func bucketPolicyResourceMatch(resource string, statement policyStatement) bool {
|
||||
// the resource rule for object could contain "*" wild card.
|
||||
@@ -85,33 +95,74 @@ func bucketPolicyConditionMatch(conditions map[string]set.StringSet, statement p
|
||||
// Supports following conditions.
|
||||
// - StringEquals
|
||||
// - StringNotEquals
|
||||
// - StringLike
|
||||
// - StringNotLike
|
||||
//
|
||||
// Supported applicable condition keys for each conditions.
|
||||
// - s3:prefix
|
||||
// - s3:max-keys
|
||||
var conditionMatches = true
|
||||
// - s3:aws-Referer
|
||||
|
||||
// The following loop evaluates the logical AND of all the
|
||||
// conditions in the statement. Note: we can break out of the
|
||||
// loop if and only if a condition evaluates to false.
|
||||
for condition, conditionKeyVal := range statement.Conditions {
|
||||
prefixConditon := conditionKeyVal["s3:prefix"]
|
||||
maxKeyCondition := conditionKeyVal["s3:max-keys"]
|
||||
if condition == "StringEquals" {
|
||||
if !conditionKeyVal["s3:prefix"].Equals(conditions["prefix"]) {
|
||||
conditionMatches = false
|
||||
break
|
||||
// If there is no condition with "s3:prefix" or "s3:max-keys" condition key
|
||||
// then there is nothing to check condition against.
|
||||
if !prefixConditon.IsEmpty() && !prefixConditon.Equals(conditions["prefix"]) {
|
||||
return false
|
||||
}
|
||||
if !conditionKeyVal["s3:max-keys"].Equals(conditions["max-keys"]) {
|
||||
conditionMatches = false
|
||||
break
|
||||
if !maxKeyCondition.IsEmpty() && !maxKeyCondition.Equals(conditions["max-keys"]) {
|
||||
return false
|
||||
}
|
||||
} else if condition == "StringNotEquals" {
|
||||
if !conditionKeyVal["s3:prefix"].Equals(conditions["prefix"]) {
|
||||
conditionMatches = false
|
||||
break
|
||||
// If there is no condition with "s3:prefix" or "s3:max-keys" condition key
|
||||
// then there is nothing to check condition against.
|
||||
if !prefixConditon.IsEmpty() && prefixConditon.Equals(conditions["prefix"]) {
|
||||
return false
|
||||
}
|
||||
if !conditionKeyVal["s3:max-keys"].Equals(conditions["max-keys"]) {
|
||||
conditionMatches = false
|
||||
break
|
||||
if !maxKeyCondition.IsEmpty() && maxKeyCondition.Equals(conditions["max-keys"]) {
|
||||
return false
|
||||
}
|
||||
} else if condition == "StringLike" {
|
||||
awsReferers := conditionKeyVal["aws:Referer"]
|
||||
// Skip empty condition, it is trivially satisfied.
|
||||
if awsReferers.IsEmpty() {
|
||||
continue
|
||||
}
|
||||
// wildcard match of referer in statement was not empty.
|
||||
// StringLike has a match, i.e, condition evaluates to true.
|
||||
refererFound := false
|
||||
for referer := range conditions["referer"] {
|
||||
if !awsReferers.FuncMatch(refererMatch, referer).IsEmpty() {
|
||||
refererFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// No matching referer found, so the condition is false.
|
||||
if !refererFound {
|
||||
return false
|
||||
}
|
||||
} else if condition == "StringNotLike" {
|
||||
awsReferers := conditionKeyVal["aws:Referer"]
|
||||
// Skip empty condition, it is trivially satisfied.
|
||||
if awsReferers.IsEmpty() {
|
||||
continue
|
||||
}
|
||||
// wildcard match of referer in statement was not empty.
|
||||
// StringNotLike has a match, i.e, condition evaluates to false.
|
||||
for referer := range conditions["referer"] {
|
||||
if !awsReferers.FuncMatch(refererMatch, referer).IsEmpty() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return conditionMatches
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBucketPolicyHandler - PUT Bucket policy
|
||||
|
||||
@@ -913,7 +913,7 @@ func TestBucketPolicyConditionMatch(t *testing.T) {
|
||||
statementCondition: getStatementWithCondition("StringNotEquals", "s3:prefix", "Asia/"),
|
||||
condition: getInnerMap("prefix", "Asia/"),
|
||||
|
||||
expectedMatch: true,
|
||||
expectedMatch: false,
|
||||
},
|
||||
// Test case - 6.
|
||||
// StringNotEquals condition doesn't match.
|
||||
@@ -922,7 +922,7 @@ func TestBucketPolicyConditionMatch(t *testing.T) {
|
||||
statementCondition: getStatementWithCondition("StringNotEquals", "s3:prefix", "Asia/"),
|
||||
condition: getInnerMap("prefix", "Africa/"),
|
||||
|
||||
expectedMatch: false,
|
||||
expectedMatch: true,
|
||||
},
|
||||
// Test case - 7.
|
||||
// StringNotEquals condition matches.
|
||||
@@ -931,7 +931,7 @@ func TestBucketPolicyConditionMatch(t *testing.T) {
|
||||
statementCondition: getStatementWithCondition("StringNotEquals", "s3:max-keys", "Asia/"),
|
||||
condition: getInnerMap("max-keys", "Asia/"),
|
||||
|
||||
expectedMatch: true,
|
||||
expectedMatch: false,
|
||||
},
|
||||
// Test case - 8.
|
||||
// StringNotEquals condition doesn't match.
|
||||
@@ -940,7 +940,35 @@ func TestBucketPolicyConditionMatch(t *testing.T) {
|
||||
statementCondition: getStatementWithCondition("StringNotEquals", "s3:max-keys", "Asia/"),
|
||||
condition: getInnerMap("max-keys", "Africa/"),
|
||||
|
||||
expectedMatch: false,
|
||||
expectedMatch: true,
|
||||
},
|
||||
// Test case - 9.
|
||||
// StringLike condition matches.
|
||||
{
|
||||
statementCondition: getStatementWithCondition("StringLike", "aws:Referer", "http://www.example.com/"),
|
||||
condition: getInnerMap("referer", "http://www.example.com/"),
|
||||
expectedMatch: true,
|
||||
},
|
||||
// Test case - 10.
|
||||
// StringLike condition doesn't match.
|
||||
{
|
||||
statementCondition: getStatementWithCondition("StringLike", "aws:Referer", "http://www.example.com/"),
|
||||
condition: getInnerMap("referer", "www.somethingelse.com"),
|
||||
expectedMatch: false,
|
||||
},
|
||||
// Test case - 11.
|
||||
// StringNotLike condition evaluates to false.
|
||||
{
|
||||
statementCondition: getStatementWithCondition("StringNotLike", "aws:Referer", "http://www.example.com/"),
|
||||
condition: getInnerMap("referer", "http://www.example.com/"),
|
||||
expectedMatch: false,
|
||||
},
|
||||
// Test case - 12.
|
||||
// StringNotLike condition evaluates to true.
|
||||
{
|
||||
statementCondition: getStatementWithCondition("StringNotLike", "aws:Referer", "http://www.example.com/"),
|
||||
condition: getInnerMap("referer", "http://somethingelse.com/"),
|
||||
expectedMatch: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -949,7 +977,8 @@ func TestBucketPolicyConditionMatch(t *testing.T) {
|
||||
// call the function under test and assert the result with the expected result.
|
||||
doesMatch := bucketPolicyConditionMatch(tc.condition, tc.statementCondition)
|
||||
if tc.expectedMatch != doesMatch {
|
||||
t.Errorf("Expected the match to be `%v`; got `%v`.", tc.expectedMatch, doesMatch)
|
||||
t.Errorf("Expected the match to be `%v`; got `%v` - %v %v.",
|
||||
tc.expectedMatch, doesMatch, tc.condition, tc.statementCondition)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -29,17 +29,22 @@ import (
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
var conditionKeyActionMap = map[string]set.StringSet{
|
||||
"s3:prefix": set.CreateStringSet("s3:ListBucket"),
|
||||
"s3:max-keys": set.CreateStringSet("s3:ListBucket"),
|
||||
}
|
||||
|
||||
// supportedActionMap - lists all the actions supported by minio.
|
||||
var supportedActionMap = set.CreateStringSet("*", "s3:*", "s3:GetObject",
|
||||
"s3:ListBucket", "s3:PutObject", "s3:GetBucketLocation", "s3:DeleteObject",
|
||||
"s3:AbortMultipartUpload", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts")
|
||||
|
||||
// supported Conditions type.
|
||||
var supportedConditionsType = set.CreateStringSet("StringEquals", "StringNotEquals")
|
||||
var supportedConditionsType = set.CreateStringSet("StringEquals", "StringNotEquals", "StringLike", "StringNotLike")
|
||||
|
||||
// Validate s3:prefix, s3:max-keys are present if not
|
||||
// supported keys for the conditions.
|
||||
var supportedConditionsKey = set.CreateStringSet("s3:prefix", "s3:max-keys")
|
||||
var supportedConditionsKey = set.CreateStringSet("s3:prefix", "s3:max-keys", "aws:Referer")
|
||||
|
||||
// supportedEffectMap - supported effects.
|
||||
var supportedEffectMap = set.CreateStringSet("Allow", "Deny")
|
||||
@@ -106,12 +111,12 @@ func isValidResources(resources set.StringSet) (err error) {
|
||||
return err
|
||||
}
|
||||
for resource := range resources {
|
||||
if !strings.HasPrefix(resource, bucketARNPrefix) {
|
||||
if !hasPrefix(resource, bucketARNPrefix) {
|
||||
err = errors.New("Unsupported resource style found: ‘" + resource + "’, please validate your policy document")
|
||||
return err
|
||||
}
|
||||
resourceSuffix := strings.SplitAfter(resource, bucketARNPrefix)[1]
|
||||
if len(resourceSuffix) == 0 || strings.HasPrefix(resourceSuffix, "/") {
|
||||
if len(resourceSuffix) == 0 || hasPrefix(resourceSuffix, "/") {
|
||||
err = errors.New("Invalid resource style found: ‘" + resource + "’, please validate your policy document")
|
||||
return err
|
||||
}
|
||||
@@ -178,11 +183,12 @@ func isValidPrincipals(principal interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isValidConditions - are valid conditions.
|
||||
func isValidConditions(conditions map[string]map[string]set.StringSet) (err error) {
|
||||
// Verify conditions should be valid.
|
||||
// Validate if stringEquals, stringNotEquals are present
|
||||
// if not throw an error.
|
||||
// isValidConditions - returns nil if the given conditions valid and
|
||||
// corresponding error otherwise.
|
||||
func isValidConditions(actions set.StringSet, conditions map[string]map[string]set.StringSet) (err error) {
|
||||
// Verify conditions should be valid. Validate if only
|
||||
// supported condition keys are present and return error
|
||||
// otherwise.
|
||||
conditionKeyVal := make(map[string]set.StringSet)
|
||||
for conditionType := range conditions {
|
||||
if !supportedConditionsType.Contains(conditionType) {
|
||||
@@ -194,6 +200,15 @@ func isValidConditions(conditions map[string]map[string]set.StringSet) (err erro
|
||||
err = fmt.Errorf("Unsupported condition key '%s', please validate your policy document", conditionType)
|
||||
return err
|
||||
}
|
||||
|
||||
compatibleActions := conditionKeyActionMap[key]
|
||||
if !compatibleActions.IsEmpty() &&
|
||||
compatibleActions.Intersection(actions).IsEmpty() {
|
||||
err = fmt.Errorf("Unsupported condition key %s for the given actions %s, "+
|
||||
"please validate your policy document", key, actions)
|
||||
return err
|
||||
}
|
||||
|
||||
conditionVal, ok := conditionKeyVal[key]
|
||||
if ok && !value.Intersection(conditionVal).IsEmpty() {
|
||||
err = fmt.Errorf("Ambigious condition values for key '%s', please validate your policy document", key)
|
||||
@@ -222,8 +237,8 @@ func resourcePrefix(resource string) string {
|
||||
}
|
||||
|
||||
// checkBucketPolicyResources validates Resources in unmarshalled bucket policy structure.
|
||||
// First valation of Resources done for given set of Actions.
|
||||
// Later its validated for recursive Resources.
|
||||
// - Resources are validated against the given set of Actions.
|
||||
// -
|
||||
func checkBucketPolicyResources(bucket string, bucketPolicy *bucketPolicy) APIErrorCode {
|
||||
// Validate statements for special actions and collect resources
|
||||
// for others to validate nesting.
|
||||
@@ -267,7 +282,7 @@ func checkBucketPolicyResources(bucket string, bucketPolicy *bucketPolicy) APIEr
|
||||
// nesting. Reject such rules.
|
||||
for _, otherResource := range resources {
|
||||
// Common prefix reject such rules.
|
||||
if strings.HasPrefix(otherResource, resource) {
|
||||
if hasPrefix(otherResource, resource) {
|
||||
return ErrPolicyNesting
|
||||
}
|
||||
}
|
||||
@@ -317,7 +332,7 @@ func parseBucketPolicy(bucketPolicyReader io.Reader, policy *bucketPolicy) (err
|
||||
return err
|
||||
}
|
||||
// Statement conditions should be valid.
|
||||
if err := isValidConditions(statement.Conditions); err != nil {
|
||||
if err := isValidConditions(statement.Actions, statement.Conditions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
@@ -342,6 +343,18 @@ func TestIsValidPrincipals(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// getEmptyConditionKeyMap - returns a function that generates a
|
||||
// condition key map for a given key.
|
||||
func getEmptyConditionKeyMap(conditionKey string) func() map[string]map[string]set.StringSet {
|
||||
emptyConditonGenerator := func() map[string]map[string]set.StringSet {
|
||||
emptyMap := make(map[string]set.StringSet)
|
||||
conditions := make(map[string]map[string]set.StringSet)
|
||||
conditions[conditionKey] = emptyMap
|
||||
return conditions
|
||||
}
|
||||
return emptyConditonGenerator
|
||||
}
|
||||
|
||||
// Tests validate policyStatement condition validator.
|
||||
func TestIsValidConditions(t *testing.T) {
|
||||
// returns empty conditions map.
|
||||
@@ -350,22 +363,17 @@ func TestIsValidConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
// returns map with the "StringEquals" set to empty map.
|
||||
setEmptyStringEquals := func() map[string]map[string]set.StringSet {
|
||||
emptyMap := make(map[string]set.StringSet)
|
||||
conditions := make(map[string]map[string]set.StringSet)
|
||||
conditions["StringEquals"] = emptyMap
|
||||
return conditions
|
||||
|
||||
}
|
||||
setEmptyStringEquals := getEmptyConditionKeyMap("StringEquals")
|
||||
|
||||
// returns map with the "StringNotEquals" set to empty map.
|
||||
setEmptyStringNotEquals := func() map[string]map[string]set.StringSet {
|
||||
emptyMap := make(map[string]set.StringSet)
|
||||
conditions := make(map[string]map[string]set.StringSet)
|
||||
conditions["StringNotEquals"] = emptyMap
|
||||
return conditions
|
||||
setEmptyStringNotEquals := getEmptyConditionKeyMap("StringNotEquals")
|
||||
|
||||
// returns map with the "StringLike" set to empty map.
|
||||
setEmptyStringLike := getEmptyConditionKeyMap("StringLike")
|
||||
|
||||
// returns map with the "StringNotLike" set to empty map.
|
||||
setEmptyStringNotLike := getEmptyConditionKeyMap("StringNotLike")
|
||||
|
||||
}
|
||||
// Generate conditions.
|
||||
generateConditions := func(key1, key2, value string) map[string]map[string]set.StringSet {
|
||||
innerMap := make(map[string]set.StringSet)
|
||||
@@ -377,11 +385,11 @@ func TestIsValidConditions(t *testing.T) {
|
||||
|
||||
// generate ambigious conditions.
|
||||
generateAmbigiousConditions := func() map[string]map[string]set.StringSet {
|
||||
innerMap := make(map[string]set.StringSet)
|
||||
innerMap["s3:prefix"] = set.CreateStringSet("Asia/")
|
||||
prefixMap := make(map[string]set.StringSet)
|
||||
prefixMap["s3:prefix"] = set.CreateStringSet("Asia/")
|
||||
conditions := make(map[string]map[string]set.StringSet)
|
||||
conditions["StringEquals"] = innerMap
|
||||
conditions["StringNotEquals"] = innerMap
|
||||
conditions["StringEquals"] = prefixMap
|
||||
conditions["StringNotEquals"] = prefixMap
|
||||
return conditions
|
||||
}
|
||||
|
||||
@@ -417,13 +425,20 @@ func TestIsValidConditions(t *testing.T) {
|
||||
setEmptyConditions(),
|
||||
setEmptyStringEquals(),
|
||||
setEmptyStringNotEquals(),
|
||||
setEmptyStringLike(),
|
||||
setEmptyStringNotLike(),
|
||||
generateConditions("StringEquals", "s3:prefix", "Asia/"),
|
||||
generateConditions("StringEquals", "s3:max-keys", "100"),
|
||||
generateConditions("StringNotEquals", "s3:prefix", "Asia/"),
|
||||
generateConditions("StringNotEquals", "s3:max-keys", "100"),
|
||||
}
|
||||
|
||||
getObjectActionSet := set.CreateStringSet("s3:GetObject")
|
||||
roBucketActionSet := set.CreateStringSet(readOnlyBucketActions...)
|
||||
maxKeysConditionErr := fmt.Errorf("Unsupported condition key %s for the given actions %s, "+
|
||||
"please validate your policy document", "s3:max-keys", getObjectActionSet)
|
||||
testCases := []struct {
|
||||
inputActions set.StringSet
|
||||
inputCondition map[string]map[string]set.StringSet
|
||||
// expected result.
|
||||
expectedErr error
|
||||
@@ -433,42 +448,44 @@ func TestIsValidConditions(t *testing.T) {
|
||||
// Malformed conditions.
|
||||
// Test case - 1.
|
||||
// "StringValues" is an invalid type.
|
||||
{testConditions[0], fmt.Errorf("Unsupported condition type 'StringValues', " +
|
||||
{roBucketActionSet, testConditions[0], fmt.Errorf("Unsupported condition type 'StringValues', " +
|
||||
"please validate your policy document"), false},
|
||||
// Test case - 2.
|
||||
// "s3:Object" is an invalid key.
|
||||
{testConditions[1], fmt.Errorf("Unsupported condition key " +
|
||||
{roBucketActionSet, testConditions[1], fmt.Errorf("Unsupported condition key " +
|
||||
"'StringEquals', please validate your policy document"), false},
|
||||
// Test case - 3.
|
||||
// Test case with Ambigious conditions set.
|
||||
{testConditions[2], fmt.Errorf("Ambigious condition values for key 's3:prefix', " +
|
||||
{roBucketActionSet, testConditions[2], fmt.Errorf("Ambigious condition values for key 's3:prefix', " +
|
||||
"please validate your policy document"), false},
|
||||
// Test case - 4.
|
||||
// Test case with valid and invalid condition types.
|
||||
{testConditions[3], fmt.Errorf("Unsupported condition type 'InvalidType', " +
|
||||
{roBucketActionSet, testConditions[3], fmt.Errorf("Unsupported condition type 'InvalidType', " +
|
||||
"please validate your policy document"), false},
|
||||
// Test case - 5.
|
||||
// Test case with valid and invalid condition keys.
|
||||
{testConditions[4], fmt.Errorf("Unsupported condition key 'StringEquals', " +
|
||||
{roBucketActionSet, testConditions[4], fmt.Errorf("Unsupported condition key 'StringEquals', " +
|
||||
"please validate your policy document"), false},
|
||||
// Test cases with valid conditions.
|
||||
// Test case - 6.
|
||||
{testConditions[5], nil, true},
|
||||
{roBucketActionSet, testConditions[5], nil, true},
|
||||
// Test case - 7.
|
||||
{testConditions[6], nil, true},
|
||||
{roBucketActionSet, testConditions[6], nil, true},
|
||||
// Test case - 8.
|
||||
{testConditions[7], nil, true},
|
||||
{roBucketActionSet, testConditions[7], nil, true},
|
||||
// Test case - 9.
|
||||
{testConditions[8], nil, true},
|
||||
{roBucketActionSet, testConditions[8], nil, true},
|
||||
// Test case - 10.
|
||||
{testConditions[9], nil, true},
|
||||
{roBucketActionSet, testConditions[9], nil, true},
|
||||
// Test case - 11.
|
||||
{testConditions[10], nil, true},
|
||||
// Test case 10.
|
||||
{testConditions[11], nil, true},
|
||||
{roBucketActionSet, testConditions[10], nil, true},
|
||||
// Test case - 12.
|
||||
{roBucketActionSet, testConditions[11], nil, true},
|
||||
// Test case - 13.
|
||||
{getObjectActionSet, testConditions[11], maxKeysConditionErr, false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
actualErr := isValidConditions(testCase.inputCondition)
|
||||
actualErr := isValidConditions(testCase.inputActions, testCase.inputCondition)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, actualErr.Error())
|
||||
}
|
||||
@@ -709,3 +726,64 @@ func TestParseBucketPolicy(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSRefererCondition(t *testing.T) {
|
||||
resource := set.CreateStringSet([]string{
|
||||
fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/"+"Asia"+"*"),
|
||||
}...)
|
||||
|
||||
conditionsKeyMap := make(policy.ConditionKeyMap)
|
||||
conditionsKeyMap.Add("aws:Referer",
|
||||
set.CreateStringSet("www.example.com",
|
||||
"http://www.example.com"))
|
||||
|
||||
requestConditionKeyMap := make(map[string]set.StringSet)
|
||||
requestConditionKeyMap["referer"] = set.CreateStringSet("www.example.com")
|
||||
|
||||
testCases := []struct {
|
||||
effect string
|
||||
conditionKey string
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
effect: "Allow",
|
||||
conditionKey: "StringLike",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
effect: "Allow",
|
||||
conditionKey: "StringNotLike",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
effect: "Deny",
|
||||
conditionKey: "StringLike",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
effect: "Deny",
|
||||
conditionKey: "StringNotLike",
|
||||
match: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
conditions := make(map[string]map[string]set.StringSet)
|
||||
conditions[test.conditionKey] = conditionsKeyMap
|
||||
|
||||
allowStatement := policyStatement{
|
||||
Sid: "Testing AWS referer condition",
|
||||
Effect: test.effect,
|
||||
Principal: map[string]interface{}{
|
||||
"AWS": "*",
|
||||
},
|
||||
Resources: resource,
|
||||
Conditions: conditions,
|
||||
}
|
||||
|
||||
if result := bucketPolicyConditionMatch(requestConditionKeyMap, allowStatement); result != test.match {
|
||||
t.Errorf("Test %d - Expected conditons to evaluate to %v but got %v",
|
||||
i+1, test.match, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,25 +22,13 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Make sure that none of the other processes are listening on the
|
||||
// specified port on any of the interfaces.
|
||||
//
|
||||
// On linux if a process is listening on 127.0.0.1:9000 then Listen()
|
||||
// on ":9000" fails with the error "port already in use".
|
||||
// However on Mac OSX Listen() on ":9000" falls back to the IPv6 address.
|
||||
// This causes confusion on Mac OSX that minio server is not reachable
|
||||
// on 127.0.0.1 even though minio server is running. So before we start
|
||||
// the minio server we make sure that the port is free on each tcp network.
|
||||
//
|
||||
// Port is string on purpose here.
|
||||
// https://github.com/golang/go/issues/16142#issuecomment-245912773
|
||||
//
|
||||
// "Keep in mind that ports in Go are strings: https://play.golang.org/p/zk2WEri_E9"
|
||||
// - @bradfitz
|
||||
func checkPortAvailability(portStr string) error {
|
||||
// checkPortAvailability - check if given port is already in use.
|
||||
// Note: The check method tries to listen on given port and closes it.
|
||||
// It is possible to have a disconnected client in this tiny window of time.
|
||||
func checkPortAvailability(port string) error {
|
||||
network := [3]string{"tcp", "tcp4", "tcp6"}
|
||||
for _, n := range network {
|
||||
l, err := net.Listen(n, net.JoinHostPort("", portStr))
|
||||
l, err := net.Listen(n, net.JoinHostPort("", port))
|
||||
if err != nil {
|
||||
if isAddrInUse(err) {
|
||||
// Return error if another process is listening on the
|
||||
|
||||
@@ -16,13 +16,16 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/cli"
|
||||
import (
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/pkg/trie"
|
||||
)
|
||||
|
||||
// Collection of minio commands currently supported are.
|
||||
var commands = []cli.Command{}
|
||||
|
||||
// Collection of minio commands currently supported in a trie tree.
|
||||
var commandsTree = newTrie()
|
||||
var commandsTree = trie.NewTrie()
|
||||
|
||||
// registerCommand registers a cli command.
|
||||
func registerCommand(command cli.Command) {
|
||||
|
||||
@@ -855,7 +855,10 @@ func migrateV12ToV13() error {
|
||||
}
|
||||
|
||||
// Copy over fields from V12 into V13 config struct
|
||||
srvConfig := &serverConfigV13{}
|
||||
srvConfig := &serverConfigV13{
|
||||
Logger: &logger{},
|
||||
Notify: ¬ifier{},
|
||||
}
|
||||
srvConfig.Version = "13"
|
||||
srvConfig.Credential = cv12.Credential
|
||||
srvConfig.Region = cv12.Region
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if _, err := initConfig(); err != nil {
|
||||
if err := loadConfig(credential{}); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func TestServerConfigMigrateV2toV12(t *testing.T) {
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if _, err := initConfig(); err != nil {
|
||||
if err := loadConfig(credential{}); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
|
||||
@@ -160,11 +160,6 @@ func TestServerConfigMigrateV2toV12(t *testing.T) {
|
||||
if serverConfig.Credential.SecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, serverConfig.Credential.SecretKey)
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if _, err := initConfig(); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
|
||||
@@ -36,75 +36,97 @@ type serverConfigV13 struct {
|
||||
Region string `json:"region"`
|
||||
|
||||
// Additional error logging configuration.
|
||||
Logger logger `json:"logger"`
|
||||
Logger *logger `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify *notifier `json:"notify"`
|
||||
}
|
||||
|
||||
// initConfig - initialize server config and indicate if we are
|
||||
// creating a new file or we are just loading
|
||||
func initConfig() (bool, error) {
|
||||
if !isConfigFileExists() {
|
||||
// Initialize server config.
|
||||
srvCfg := &serverConfigV13{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
srvCfg.Region = globalMinioDefaultRegion
|
||||
srvCfg.Credential = newCredential()
|
||||
|
||||
// Enable console logger by default on a fresh run.
|
||||
srvCfg.Logger.Console = consoleLogger{
|
||||
Enable: true,
|
||||
Level: "error",
|
||||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
|
||||
srvCfg.Notify.AMQP["1"] = amqpNotify{}
|
||||
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
|
||||
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
|
||||
srvCfg.Notify.Redis = make(map[string]redisNotify)
|
||||
srvCfg.Notify.Redis["1"] = redisNotify{}
|
||||
srvCfg.Notify.NATS = make(map[string]natsNotify)
|
||||
srvCfg.Notify.NATS["1"] = natsNotify{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
|
||||
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
|
||||
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
|
||||
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
|
||||
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
|
||||
srvCfg.Notify.Webhook["1"] = webhookNotify{}
|
||||
|
||||
// Create config path.
|
||||
err := createConfigPath()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
// Save the new config globally.
|
||||
// unlock the mutex.
|
||||
serverConfigMu.Lock()
|
||||
serverConfig = srvCfg
|
||||
serverConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return true, serverConfig.Save()
|
||||
// newConfig - initialize a new server config, saves creds from env
|
||||
// if globalIsEnvCreds is set otherwise generates a new set of keys
|
||||
// and those are saved.
|
||||
func newConfig(envCreds credential) error {
|
||||
// Initialize server config.
|
||||
srvCfg := &serverConfigV13{
|
||||
Logger: &logger{},
|
||||
Notify: ¬ifier{},
|
||||
}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
srvCfg.Region = globalMinioDefaultRegion
|
||||
|
||||
// If env is set for a fresh start, save them to config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(envCreds)
|
||||
} else {
|
||||
srvCfg.SetCredential(newCredential())
|
||||
}
|
||||
|
||||
// Enable console logger by default on a fresh run.
|
||||
srvCfg.Logger.Console = consoleLogger{
|
||||
Enable: true,
|
||||
Level: "error",
|
||||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
srvCfg.Notify.AMQP = make(map[string]amqpNotify)
|
||||
srvCfg.Notify.AMQP["1"] = amqpNotify{}
|
||||
srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify)
|
||||
srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{}
|
||||
srvCfg.Notify.Redis = make(map[string]redisNotify)
|
||||
srvCfg.Notify.Redis["1"] = redisNotify{}
|
||||
srvCfg.Notify.NATS = make(map[string]natsNotify)
|
||||
srvCfg.Notify.NATS["1"] = natsNotify{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify)
|
||||
srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{}
|
||||
srvCfg.Notify.Kafka = make(map[string]kafkaNotify)
|
||||
srvCfg.Notify.Kafka["1"] = kafkaNotify{}
|
||||
srvCfg.Notify.Webhook = make(map[string]webhookNotify)
|
||||
srvCfg.Notify.Webhook["1"] = webhookNotify{}
|
||||
|
||||
// Create config path.
|
||||
if err := createConfigPath(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
// Save the new config globally.
|
||||
// unlock the mutex.
|
||||
serverConfigMu.Lock()
|
||||
serverConfig = srvCfg
|
||||
serverConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return serverConfig.Save()
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides creds from env
|
||||
// if globalIsEnvCreds is set otherwise serves the creds from loaded
|
||||
// from the disk.
|
||||
func loadConfig(envCreds credential) error {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = os.Stat(configFile); err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
srvCfg := &serverConfigV13{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
qc, err := quick.New(srvCfg)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return false, err
|
||||
|
||||
if err = qc.Load(configFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(envCreds)
|
||||
} else {
|
||||
srvCfg.SetCredential(srvCfg.Credential)
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
@@ -112,10 +134,10 @@ func initConfig() (bool, error) {
|
||||
// Save the loaded config globally.
|
||||
serverConfig = srvCfg
|
||||
serverConfigMu.Unlock()
|
||||
|
||||
// Set the version properly after the unmarshalled json is loaded.
|
||||
serverConfig.Version = globalMinioConfigVersion
|
||||
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// serverConfig server config.
|
||||
@@ -129,193 +151,6 @@ func (s serverConfigV13) GetVersion() string {
|
||||
return s.Version
|
||||
}
|
||||
|
||||
/// Logger related.
|
||||
|
||||
func (s *serverConfigV13) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.AMQP[accountID] = amqpn
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetAMQP() map[string]amqpNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.AMQP
|
||||
}
|
||||
|
||||
// GetAMQPNotify get current AMQP logger.
|
||||
func (s serverConfigV13) GetAMQPNotifyByID(accountID string) amqpNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.AMQP[accountID]
|
||||
}
|
||||
|
||||
//
|
||||
func (s *serverConfigV13) SetNATSNotifyByID(accountID string, natsn natsNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.NATS[accountID] = natsn
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetNATS() map[string]natsNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
return s.Notify.NATS
|
||||
}
|
||||
|
||||
// GetNATSNotify get current NATS logger.
|
||||
func (s serverConfigV13) GetNATSNotifyByID(accountID string) natsNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.NATS[accountID]
|
||||
}
|
||||
|
||||
func (s *serverConfigV13) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.ElasticSearch[accountID] = esNotify
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetElasticSearch() map[string]elasticSearchNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.ElasticSearch
|
||||
}
|
||||
|
||||
// GetElasticSearchNotify get current ElasicSearch logger.
|
||||
func (s serverConfigV13) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.ElasticSearch[accountID]
|
||||
}
|
||||
|
||||
func (s *serverConfigV13) SetRedisNotifyByID(accountID string, rNotify redisNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.Redis[accountID] = rNotify
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetRedis() map[string]redisNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Redis
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetWebhook() map[string]webhookNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Webhook
|
||||
}
|
||||
|
||||
// GetWebhookNotifyByID get current Webhook logger.
|
||||
func (s serverConfigV13) GetWebhookNotifyByID(accountID string) webhookNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Webhook[accountID]
|
||||
}
|
||||
|
||||
func (s *serverConfigV13) SetWebhookNotifyByID(accountID string, pgn webhookNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.Webhook[accountID] = pgn
|
||||
}
|
||||
|
||||
// GetRedisNotify get current Redis logger.
|
||||
func (s serverConfigV13) GetRedisNotifyByID(accountID string) redisNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Redis[accountID]
|
||||
}
|
||||
|
||||
func (s *serverConfigV13) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.PostgreSQL[accountID] = pgn
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetPostgreSQL() map[string]postgreSQLNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.PostgreSQL
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.PostgreSQL[accountID]
|
||||
}
|
||||
|
||||
// Kafka related functions
|
||||
func (s *serverConfigV13) SetKafkaNotifyByID(accountID string, kn kafkaNotify) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Notify.Kafka[accountID] = kn
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetKafka() map[string]kafkaNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Kafka
|
||||
}
|
||||
|
||||
func (s serverConfigV13) GetKafkaNotifyByID(accountID string) kafkaNotify {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Notify.Kafka[accountID]
|
||||
}
|
||||
|
||||
// SetFileLogger set new file logger.
|
||||
func (s *serverConfigV13) SetFileLogger(flogger fileLogger) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Logger.File = flogger
|
||||
}
|
||||
|
||||
// GetFileLogger get current file logger.
|
||||
func (s serverConfigV13) GetFileLogger() fileLogger {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Logger.File
|
||||
}
|
||||
|
||||
// SetConsoleLogger set new console logger.
|
||||
func (s *serverConfigV13) SetConsoleLogger(clogger consoleLogger) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Logger.Console = clogger
|
||||
}
|
||||
|
||||
// GetConsoleLogger get current console logger.
|
||||
func (s serverConfigV13) GetConsoleLogger() consoleLogger {
|
||||
serverConfigMu.RLock()
|
||||
defer serverConfigMu.RUnlock()
|
||||
|
||||
return s.Logger.Console
|
||||
}
|
||||
|
||||
// SetRegion set new region.
|
||||
func (s *serverConfigV13) SetRegion(region string) {
|
||||
serverConfigMu.Lock()
|
||||
@@ -337,7 +172,8 @@ func (s *serverConfigV13) SetCredential(creds credential) {
|
||||
serverConfigMu.Lock()
|
||||
defer serverConfigMu.Unlock()
|
||||
|
||||
s.Credential = creds
|
||||
// Set updated credential.
|
||||
s.Credential = newCredentialWithKeys(creds.AccessKey, creds.SecretKey)
|
||||
}
|
||||
|
||||
// GetCredentials get current credentials.
|
||||
|
||||
@@ -40,57 +40,65 @@ func TestServerConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
// Set new amqp notification id.
|
||||
serverConfig.SetAMQPNotifyByID("2", amqpNotify{})
|
||||
savedNotifyCfg1 := serverConfig.GetAMQPNotifyByID("2")
|
||||
serverConfig.Notify.SetAMQPByID("2", amqpNotify{})
|
||||
savedNotifyCfg1 := serverConfig.Notify.GetAMQPByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) {
|
||||
t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1)
|
||||
}
|
||||
|
||||
// Set new elastic search notification id.
|
||||
serverConfig.SetElasticSearchNotifyByID("2", elasticSearchNotify{})
|
||||
savedNotifyCfg2 := serverConfig.GetElasticSearchNotifyByID("2")
|
||||
serverConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{})
|
||||
savedNotifyCfg2 := serverConfig.Notify.GetElasticSearchByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) {
|
||||
t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2)
|
||||
}
|
||||
|
||||
// Set new redis notification id.
|
||||
serverConfig.SetRedisNotifyByID("2", redisNotify{})
|
||||
savedNotifyCfg3 := serverConfig.GetRedisNotifyByID("2")
|
||||
serverConfig.Notify.SetRedisByID("2", redisNotify{})
|
||||
savedNotifyCfg3 := serverConfig.Notify.GetRedisByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) {
|
||||
t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3)
|
||||
}
|
||||
|
||||
// Set new kafka notification id.
|
||||
serverConfig.SetKafkaNotifyByID("2", kafkaNotify{})
|
||||
savedNotifyCfg4 := serverConfig.GetKafkaNotifyByID("2")
|
||||
serverConfig.Notify.SetKafkaByID("2", kafkaNotify{})
|
||||
savedNotifyCfg4 := serverConfig.Notify.GetKafkaByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) {
|
||||
t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4)
|
||||
}
|
||||
|
||||
// Set new Webhook notification id.
|
||||
serverConfig.SetWebhookNotifyByID("2", webhookNotify{})
|
||||
savedNotifyCfg5 := serverConfig.GetWebhookNotifyByID("2")
|
||||
serverConfig.Notify.SetWebhookByID("2", webhookNotify{})
|
||||
savedNotifyCfg5 := serverConfig.Notify.GetWebhookByID("2")
|
||||
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
|
||||
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg3)
|
||||
}
|
||||
|
||||
// Set new console logger.
|
||||
serverConfig.SetConsoleLogger(consoleLogger{
|
||||
serverConfig.Logger.SetConsole(consoleLogger{
|
||||
Enable: true,
|
||||
})
|
||||
consoleCfg := serverConfig.GetConsoleLogger()
|
||||
consoleCfg := serverConfig.Logger.GetConsole()
|
||||
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
|
||||
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
|
||||
}
|
||||
// Set new console logger.
|
||||
serverConfig.Logger.SetConsole(consoleLogger{
|
||||
Enable: false,
|
||||
})
|
||||
|
||||
// Set new file logger.
|
||||
serverConfig.SetFileLogger(fileLogger{
|
||||
serverConfig.Logger.SetFile(fileLogger{
|
||||
Enable: true,
|
||||
})
|
||||
fileCfg := serverConfig.GetFileLogger()
|
||||
fileCfg := serverConfig.Logger.GetFile()
|
||||
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
|
||||
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
|
||||
}
|
||||
// Set new file logger.
|
||||
serverConfig.Logger.SetFile(fileLogger{
|
||||
Enable: false,
|
||||
})
|
||||
|
||||
// Match version.
|
||||
if serverConfig.GetVersion() != globalMinioConfigVersion {
|
||||
@@ -106,7 +114,7 @@ func TestServerConfig(t *testing.T) {
|
||||
setGlobalConfigPath(rootPath)
|
||||
|
||||
// Initialize server config.
|
||||
if _, err := initConfig(); err != nil {
|
||||
if err := loadConfig(credential{}); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,11 @@ package cmd
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"os"
|
||||
|
||||
"github.com/minio/mc/pkg/console"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,7 +39,7 @@ const (
|
||||
func mustGetAccessKey() string {
|
||||
keyBytes := make([]byte, accessKeyMaxLen)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
panic(err)
|
||||
console.Fatalf("Unable to generate access key. Err: %s.\n", err)
|
||||
}
|
||||
|
||||
for i := 0; i < accessKeyMaxLen; i++ {
|
||||
@@ -47,7 +52,7 @@ func mustGetAccessKey() string {
|
||||
func mustGetSecretKey() string {
|
||||
keyBytes := make([]byte, secretKeyMaxLen)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
panic(err)
|
||||
console.Fatalf("Unable to generate secret key. Err: %s.\n", err)
|
||||
}
|
||||
|
||||
return string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
|
||||
@@ -65,22 +70,70 @@ func isSecretKeyValid(secretKey string) bool {
|
||||
|
||||
// credential container for access and secret keys.
|
||||
type credential struct {
|
||||
AccessKey string `json:"accessKey"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
AccessKey string `json:"accessKey,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
secretKeyHash []byte
|
||||
}
|
||||
|
||||
// Generate a bcrypt hashed key for input secret key.
|
||||
func mustGetHashedSecretKey(secretKey string) []byte {
|
||||
hashedSecretKey, err := bcrypt.GenerateFromPassword([]byte(secretKey), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to generate secret hash for secret key. Err: %s.\n", err)
|
||||
}
|
||||
return hashedSecretKey
|
||||
}
|
||||
|
||||
// Initialize a new credential object
|
||||
func newCredential() credential {
|
||||
return credential{mustGetAccessKey(), mustGetSecretKey()}
|
||||
return newCredentialWithKeys(mustGetAccessKey(), mustGetSecretKey())
|
||||
}
|
||||
|
||||
func getCredential(accessKey, secretKey string) (credential, error) {
|
||||
func newCredentialWithKeys(accessKey, secretKey string) credential {
|
||||
secretHash := mustGetHashedSecretKey(secretKey)
|
||||
return credential{accessKey, secretKey, secretHash}
|
||||
}
|
||||
|
||||
// Validate incoming auth keys.
|
||||
func validateAuthKeys(accessKey, secretKey string) error {
|
||||
// Validate the env values before proceeding.
|
||||
if !isAccessKeyValid(accessKey) {
|
||||
return credential{}, errInvalidAccessKeyLength
|
||||
return errInvalidAccessKeyLength
|
||||
}
|
||||
|
||||
if !isSecretKeyValid(secretKey) {
|
||||
return credential{}, errInvalidSecretKeyLength
|
||||
return errInvalidSecretKeyLength
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Variant of getCredentialFromEnv but upon error fails right here.
|
||||
func mustGetCredentialFromEnv() credential {
|
||||
creds, err := getCredentialFromEnv()
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to load credentials from environment. Err: %s.\n", err)
|
||||
}
|
||||
return creds
|
||||
}
|
||||
|
||||
// Converts accessKey and secretKeys into credential object which
|
||||
// contains bcrypt secret key hash for future validation.
|
||||
func getCredentialFromEnv() (credential, error) {
|
||||
// Fetch access keys from environment variables and update the config.
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
secretKey := os.Getenv("MINIO_SECRET_KEY")
|
||||
|
||||
// Envs are set globally.
|
||||
globalIsEnvCreds = accessKey != "" && secretKey != ""
|
||||
|
||||
if globalIsEnvCreds {
|
||||
// Validate the env values before proceeding.
|
||||
if err := validateAuthKeys(accessKey, secretKey); err != nil {
|
||||
return credential{}, err
|
||||
}
|
||||
|
||||
// Return credential object.
|
||||
return newCredentialWithKeys(accessKey, secretKey), nil
|
||||
}
|
||||
|
||||
return credential{accessKey, secretKey}, nil
|
||||
return credential{}, nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
// erasureCreateFile - writes an entire stream by erasure coding to
|
||||
// all the disks, writes also calculate individual block's checksum
|
||||
// for future bit-rot protection.
|
||||
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, blockSize int64, dataBlocks int, parityBlocks int, algo string, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
|
||||
func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, allowEmpty bool, blockSize int64, dataBlocks int, parityBlocks int, algo string, writeQuorum int) (bytesWritten int64, checkSums []string, err error) {
|
||||
// Allocated blockSized buffer for reading from incoming stream.
|
||||
buf := make([]byte, blockSize)
|
||||
|
||||
@@ -47,7 +47,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
|
||||
// We have reached EOF on the first byte read, io.Reader
|
||||
// must be 0bytes, we don't need to erasure code
|
||||
// data. Will create a 0byte file instead.
|
||||
if bytesWritten == 0 {
|
||||
if bytesWritten == 0 && allowEmpty {
|
||||
blocks = make([][]byte, len(disks))
|
||||
rErr = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum)
|
||||
if rErr != nil {
|
||||
@@ -137,9 +137,5 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
|
||||
// Wait for all the appends to finish.
|
||||
wg.Wait()
|
||||
|
||||
// Do we have write quorum?.
|
||||
if !isDiskQuorum(wErrs, writeQuorum) {
|
||||
return traceError(errXLWriteQuorum)
|
||||
}
|
||||
return reduceWriteQuorumErrs(wErrs, objectOpIgnoredErrs, writeQuorum)
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestErasureCreateFile(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test when all disks are up.
|
||||
size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func TestErasureCreateFile(t *testing.T) {
|
||||
disks[5] = AppendDiskDown{disks[5].(*posix)}
|
||||
|
||||
// Test when two disks are down.
|
||||
size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func TestErasureCreateFile(t *testing.T) {
|
||||
disks[8] = AppendDiskDown{disks[8].(*posix)}
|
||||
disks[9] = AppendDiskDown{disks[9].(*posix)}
|
||||
|
||||
size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -93,7 +93,7 @@ func TestErasureCreateFile(t *testing.T) {
|
||||
|
||||
// 1 more disk down. 7 disk down in total. Should return quorum error.
|
||||
disks[10] = AppendDiskDown{disks[10].(*posix)}
|
||||
_, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
_, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if errorCause(err) != errXLWriteQuorum {
|
||||
t.Errorf("erasureCreateFile return value: expected errXLWriteQuorum, got %s", err)
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestErasureHealFile(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create a test file.
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a test file to read from.
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -354,7 +354,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a test file to read from.
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -433,7 +433,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
|
||||
iterations := 10000
|
||||
|
||||
// Create a test file to read from.
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
"github.com/minio/blake2b-simd"
|
||||
"github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// newHashWriters - inititialize a slice of hashes for the disk count.
|
||||
@@ -48,14 +48,14 @@ func newHash(algo string) (h hash.Hash) {
|
||||
// ignore the error, because New512 without a key never fails
|
||||
// New512 only returns a non-nil error, if the length of the passed
|
||||
// key > 64 bytes - but we use blake2b as hash function (no key)
|
||||
h = blake2b.New512()
|
||||
h, _ = blake2b.New512(nil)
|
||||
// Add new hashes here.
|
||||
default:
|
||||
// Default to blake2b.
|
||||
// ignore the error, because New512 without a key never fails
|
||||
// New512 only returns a non-nil error, if the length of the passed
|
||||
// key > 64 bytes - but we use blake2b as hash function (no key)
|
||||
h = blake2b.New512()
|
||||
h, _ = blake2b.New512(nil)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -162,9 +162,22 @@ func newNotificationEvent(event eventData) NotificationEvent {
|
||||
return nEvent
|
||||
}
|
||||
|
||||
// Fetch the external target. No locking needed here since this map is
|
||||
// never written after initial startup.
|
||||
// Fetch all external targets. This returns a copy of the current map of
|
||||
// external notification targets.
|
||||
func (en eventNotifier) GetAllExternalTargets() map[string]*logrus.Logger {
|
||||
en.external.rwMutex.RLock()
|
||||
defer en.external.rwMutex.RUnlock()
|
||||
targetsCopy := make(map[string]*logrus.Logger)
|
||||
for k, v := range en.external.targets {
|
||||
targetsCopy[k] = v
|
||||
}
|
||||
return targetsCopy
|
||||
}
|
||||
|
||||
// Fetch the external target.
|
||||
func (en eventNotifier) GetExternalTarget(queueARN string) *logrus.Logger {
|
||||
en.external.rwMutex.RLock()
|
||||
defer en.external.rwMutex.RUnlock()
|
||||
return en.external.targets[queueARN]
|
||||
}
|
||||
|
||||
@@ -531,7 +544,7 @@ func loadAllBucketNotifications(objAPI ObjectLayer) (map[string]*notificationCon
|
||||
func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
queueTargets := make(map[string]*logrus.Logger)
|
||||
// Load all amqp targets, initialize their respective loggers.
|
||||
for accountID, amqpN := range serverConfig.GetAMQP() {
|
||||
for accountID, amqpN := range serverConfig.Notify.GetAMQP() {
|
||||
if !amqpN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -558,7 +571,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
queueTargets[queueARN] = amqpLog
|
||||
}
|
||||
// Load all nats targets, initialize their respective loggers.
|
||||
for accountID, natsN := range serverConfig.GetNATS() {
|
||||
for accountID, natsN := range serverConfig.Notify.GetNATS() {
|
||||
if !natsN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -586,7 +599,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
}
|
||||
|
||||
// Load redis targets, initialize their respective loggers.
|
||||
for accountID, redisN := range serverConfig.GetRedis() {
|
||||
for accountID, redisN := range serverConfig.Notify.GetRedis() {
|
||||
if !redisN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -614,7 +627,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
}
|
||||
|
||||
// Load Webhook targets, initialize their respective loggers.
|
||||
for accountID, webhookN := range serverConfig.GetWebhook() {
|
||||
for accountID, webhookN := range serverConfig.Notify.GetWebhook() {
|
||||
if !webhookN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -635,7 +648,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
}
|
||||
|
||||
// Load elastic targets, initialize their respective loggers.
|
||||
for accountID, elasticN := range serverConfig.GetElasticSearch() {
|
||||
for accountID, elasticN := range serverConfig.Notify.GetElasticSearch() {
|
||||
if !elasticN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -661,7 +674,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
}
|
||||
|
||||
// Load PostgreSQL targets, initialize their respective loggers.
|
||||
for accountID, pgN := range serverConfig.GetPostgreSQL() {
|
||||
for accountID, pgN := range serverConfig.Notify.GetPostgreSQL() {
|
||||
if !pgN.Enable {
|
||||
continue
|
||||
}
|
||||
@@ -686,7 +699,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
|
||||
queueTargets[queueARN] = pgLog
|
||||
}
|
||||
// Load Kafka targets, initialize their respective loggers.
|
||||
for accountID, kafkaN := range serverConfig.GetKafka() {
|
||||
for accountID, kafkaN := range serverConfig.Notify.GetKafka() {
|
||||
if !kafkaN.Enable {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestInitEventNotifierWithPostgreSQL(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetPostgreSQLNotifyByID("1", postgreSQLNotify{Enable: true})
|
||||
serverConfig.Notify.SetPostgreSQLByID("1", postgreSQLNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("PostgreSQL config didn't fail.")
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func TestInitEventNotifierWithNATS(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetNATSNotifyByID("1", natsNotify{Enable: true})
|
||||
serverConfig.Notify.SetNATSByID("1", natsNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("NATS config didn't fail.")
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func TestInitEventNotifierWithWebHook(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetWebhookNotifyByID("1", webhookNotify{Enable: true})
|
||||
serverConfig.Notify.SetWebhookByID("1", webhookNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("WebHook config didn't fail.")
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetAMQPNotifyByID("1", amqpNotify{Enable: true})
|
||||
serverConfig.Notify.SetAMQPByID("1", amqpNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("AMQP config didn't fail.")
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetElasticSearchNotifyByID("1", elasticSearchNotify{Enable: true})
|
||||
serverConfig.Notify.SetElasticSearchByID("1", elasticSearchNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("ElasticSearch config didn't fail.")
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func TestInitEventNotifierWithRedis(t *testing.T) {
|
||||
t.Fatal("Unable to initialize FS backend.", err)
|
||||
}
|
||||
|
||||
serverConfig.SetRedisNotifyByID("1", redisNotify{Enable: true})
|
||||
serverConfig.Notify.SetRedisByID("1", redisNotify{Enable: true})
|
||||
if err := initEventNotifier(fs); err == nil {
|
||||
t.Fatal("Redis config didn't fail.")
|
||||
}
|
||||
|
||||
@@ -27,20 +27,20 @@ import (
|
||||
// windows automatically.
|
||||
func fsRemoveFile(filePath string) (err error) {
|
||||
if filePath == "" {
|
||||
return errInvalidArgument
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if err = os.Remove(preparePath(filePath)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
return traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return errFileAccessDenied
|
||||
return traceError(errFileAccessDenied)
|
||||
}
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -50,43 +50,44 @@ func fsRemoveFile(filePath string) (err error) {
|
||||
// long paths for windows automatically.
|
||||
func fsRemoveAll(dirPath string) (err error) {
|
||||
if dirPath == "" {
|
||||
return errInvalidArgument
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err = checkPathLength(dirPath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if err = removeAll(dirPath); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return errVolumeAccessDenied
|
||||
return traceError(errVolumeAccessDenied)
|
||||
}
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes a directory only if its empty, handles long
|
||||
// paths for windows automatically.
|
||||
func fsRemoveDir(dirPath string) (err error) {
|
||||
if dirPath == "" {
|
||||
return errInvalidArgument
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err = checkPathLength(dirPath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if err = os.Remove(preparePath(dirPath)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
return traceError(errVolumeNotFound)
|
||||
} else if isSysErrNotEmpty(err) {
|
||||
return errVolumeNotEmpty
|
||||
return traceError(errVolumeNotEmpty)
|
||||
}
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new directory, parent dir should exist
|
||||
@@ -95,26 +96,27 @@ func fsRemoveDir(dirPath string) (err error) {
|
||||
// are handled automatically.
|
||||
func fsMkdir(dirPath string) (err error) {
|
||||
if dirPath == "" {
|
||||
return errInvalidArgument
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err = checkPathLength(dirPath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if err = os.Mkdir(preparePath(dirPath), 0777); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return errVolumeExists
|
||||
return traceError(errVolumeExists)
|
||||
} else if os.IsPermission(err) {
|
||||
return errDiskAccessDenied
|
||||
return traceError(errDiskAccessDenied)
|
||||
} else if isSysErrNotDir(err) {
|
||||
// File path cannot be verified since
|
||||
// one of the parents is a file.
|
||||
return errDiskAccessDenied
|
||||
return traceError(errDiskAccessDenied)
|
||||
} else if isSysErrPathNotFound(err) {
|
||||
// Add specific case for windows.
|
||||
return errDiskAccessDenied
|
||||
return traceError(errDiskAccessDenied)
|
||||
}
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -124,24 +126,24 @@ func fsMkdir(dirPath string) (err error) {
|
||||
// attributes upon success.
|
||||
func fsStatDir(statDir string) (os.FileInfo, error) {
|
||||
if statDir == "" {
|
||||
return nil, errInvalidArgument
|
||||
return nil, traceError(errInvalidArgument)
|
||||
}
|
||||
if err := checkPathLength(statDir); err != nil {
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(preparePath(statDir))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errVolumeNotFound
|
||||
return nil, traceError(errVolumeNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return nil, errVolumeAccessDenied
|
||||
return nil, traceError(errVolumeAccessDenied)
|
||||
}
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
return nil, errVolumeAccessDenied
|
||||
return nil, traceError(errVolumeAccessDenied)
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
@@ -150,28 +152,28 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
|
||||
// Lookup if file exists, returns file attributes upon success
|
||||
func fsStatFile(statFile string) (os.FileInfo, error) {
|
||||
if statFile == "" {
|
||||
return nil, errInvalidArgument
|
||||
return nil, traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err := checkPathLength(statFile); err != nil {
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(preparePath(statFile))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errFileNotFound
|
||||
return nil, traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return nil, errFileAccessDenied
|
||||
return nil, traceError(errFileAccessDenied)
|
||||
} else if isSysErrNotDir(err) {
|
||||
return nil, errFileAccessDenied
|
||||
return nil, traceError(errFileAccessDenied)
|
||||
} else if isSysErrPathNotFound(err) {
|
||||
return nil, errFileNotFound
|
||||
return nil, traceError(errFileNotFound)
|
||||
}
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, errFileNotFound
|
||||
return nil, traceError(errFileNotFound)
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
@@ -180,44 +182,44 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
|
||||
// a readable stream and the size of the readable stream.
|
||||
func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
|
||||
if readPath == "" || offset < 0 {
|
||||
return nil, 0, errInvalidArgument
|
||||
return nil, 0, traceError(errInvalidArgument)
|
||||
}
|
||||
if err := checkPathLength(readPath); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, 0, traceError(err)
|
||||
}
|
||||
|
||||
fr, err := os.Open(preparePath(readPath))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, 0, errFileNotFound
|
||||
return nil, 0, traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return nil, 0, errFileAccessDenied
|
||||
return nil, 0, traceError(errFileAccessDenied)
|
||||
} else if isSysErrNotDir(err) {
|
||||
// File path cannot be verified since one of the parents is a file.
|
||||
return nil, 0, errFileAccessDenied
|
||||
return nil, 0, traceError(errFileAccessDenied)
|
||||
} else if isSysErrPathNotFound(err) {
|
||||
// Add specific case for windows.
|
||||
return nil, 0, errFileNotFound
|
||||
return nil, 0, traceError(errFileNotFound)
|
||||
}
|
||||
return nil, 0, err
|
||||
return nil, 0, traceError(err)
|
||||
}
|
||||
|
||||
// Stat to get the size of the file at path.
|
||||
st, err := fr.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, 0, traceError(err)
|
||||
}
|
||||
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
return nil, 0, errIsNotRegular
|
||||
return nil, 0, traceError(errIsNotRegular)
|
||||
}
|
||||
|
||||
// Seek to the requested offset.
|
||||
if offset > 0 {
|
||||
_, err = fr.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, 0, traceError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,22 +230,22 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
|
||||
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
|
||||
func fsCreateFile(tempObjPath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
|
||||
if tempObjPath == "" || reader == nil || buf == nil {
|
||||
return 0, errInvalidArgument
|
||||
return 0, traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
if err := checkPathLength(tempObjPath); err != nil {
|
||||
return 0, err
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
if err := mkdirAll(pathutil.Dir(tempObjPath), 0777); err != nil {
|
||||
return 0, err
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
writer, err := os.OpenFile(preparePath(tempObjPath), os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
// File path cannot be verified since one of the parents is a file.
|
||||
if isSysErrNotDir(err) {
|
||||
return 0, errFileAccessDenied
|
||||
return 0, traceError(errFileAccessDenied)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
@@ -252,13 +254,13 @@ func fsCreateFile(tempObjPath string, reader io.Reader, buf []byte, fallocSize i
|
||||
// Fallocate only if the size is final object is known.
|
||||
if fallocSize > 0 {
|
||||
if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil {
|
||||
return 0, err
|
||||
return 0, traceError(err)
|
||||
}
|
||||
}
|
||||
|
||||
bytesWritten, err := io.CopyBuffer(writer, reader, buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
return bytesWritten, nil
|
||||
@@ -267,20 +269,20 @@ func fsCreateFile(tempObjPath string, reader io.Reader, buf []byte, fallocSize i
|
||||
// Removes uploadID at destination path.
|
||||
func fsRemoveUploadIDPath(basePath, uploadIDPath string) error {
|
||||
if basePath == "" || uploadIDPath == "" {
|
||||
return errInvalidArgument
|
||||
return traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
// List all the entries in uploadID.
|
||||
entries, err := readDir(uploadIDPath)
|
||||
if err != nil && err != errFileNotFound {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
// Delete all the entries obtained from previous readdir.
|
||||
for _, entryPath := range entries {
|
||||
err = fsDeleteFile(basePath, pathJoin(uploadIDPath, entryPath))
|
||||
if err != nil && err != errFileNotFound {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,11 +327,11 @@ func fsRenameFile(sourcePath, destPath string) error {
|
||||
// this function additionally protects the basePath from being deleted.
|
||||
func fsDeleteFile(basePath, deletePath string) error {
|
||||
if err := checkPathLength(basePath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if err := checkPathLength(deletePath); err != nil {
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if basePath == deletePath {
|
||||
@@ -340,11 +342,11 @@ func fsDeleteFile(basePath, deletePath string) error {
|
||||
pathSt, err := os.Stat(preparePath(deletePath))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
return traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return errFileAccessDenied
|
||||
return traceError(errFileAccessDenied)
|
||||
}
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
if pathSt.IsDir() && !isDirEmpty(deletePath) {
|
||||
@@ -355,13 +357,13 @@ func fsDeleteFile(basePath, deletePath string) error {
|
||||
// Attempt to remove path.
|
||||
if err = os.Remove(preparePath(deletePath)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
return traceError(errFileNotFound)
|
||||
} else if os.IsPermission(err) {
|
||||
return errFileAccessDenied
|
||||
return traceError(errFileAccessDenied)
|
||||
} else if isSysErrNotEmpty(err) {
|
||||
return errVolumeNotEmpty
|
||||
return traceError(errVolumeNotEmpty)
|
||||
}
|
||||
return err
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
// Recursively go down the next path and delete again.
|
||||
|
||||
@@ -31,11 +31,11 @@ func TestFSStats(t *testing.T) {
|
||||
|
||||
// Setup test environment.
|
||||
|
||||
if err = fsMkdir(""); err != errInvalidArgument {
|
||||
if err = fsMkdir(""); errorCause(err) != errInvalidArgument {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); err != errFileNameTooLong {
|
||||
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestFSStats(t *testing.T) {
|
||||
// Seek back.
|
||||
reader.Seek(0, 0)
|
||||
|
||||
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); err != errVolumeExists {
|
||||
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errorCause(err) != errVolumeExists {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
@@ -138,11 +138,11 @@ func TestFSStats(t *testing.T) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if testCase.srcPath != "" {
|
||||
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr {
|
||||
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := fsStatDir(pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr {
|
||||
if _, err := fsStatDir(pathJoin(testCase.srcFSPath, testCase.srcVol)); errorCause(err) != testCase.expectedErr {
|
||||
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
@@ -161,11 +161,11 @@ func TestFSCreateAndOpen(t *testing.T) {
|
||||
t.Fatalf("Unable to create directory, %s", err)
|
||||
}
|
||||
|
||||
if _, err = fsCreateFile("", nil, nil, 0); err != errInvalidArgument {
|
||||
if _, err = fsCreateFile("", nil, nil, 0); errorCause(err) != errInvalidArgument {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
if _, _, err = fsOpenFile("", -1); err != errInvalidArgument {
|
||||
if _, _, err = fsOpenFile("", -1); errorCause(err) != errInvalidArgument {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
@@ -200,17 +200,17 @@ func TestFSCreateAndOpen(t *testing.T) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, buf, reader.Size())
|
||||
if err != testCase.expectedErr {
|
||||
if errorCause(err) != testCase.expectedErr {
|
||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
_, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
|
||||
if err != testCase.expectedErr {
|
||||
if errorCause(err) != testCase.expectedErr {
|
||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to open a directory.
|
||||
if _, _, err = fsOpenFile(pathJoin(path), 0); err != errIsNotRegular {
|
||||
if _, _, err = fsOpenFile(pathJoin(path), 0); errorCause(err) != errIsNotRegular {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func TestFSDeletes(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if err = fsDeleteFile(path, pathJoin(path, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr {
|
||||
if err = fsDeleteFile(path, pathJoin(path, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
@@ -374,11 +374,11 @@ func TestFSRemoves(t *testing.T) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if testCase.srcPath != "" {
|
||||
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr {
|
||||
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
} else {
|
||||
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr {
|
||||
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
@@ -388,11 +388,11 @@ func TestFSRemoves(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = fsRemoveAll(""); err != errInvalidArgument {
|
||||
if err = fsRemoveAll(""); errorCause(err) != errInvalidArgument {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); err != errFileNameTooLong {
|
||||
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errorCause(err) != errFileNameTooLong {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,18 +125,18 @@ func (m *fsMetaV1) AddObjectPart(partNumber int, partName string, partETag strin
|
||||
sort.Sort(byObjectPartNumber(m.Parts))
|
||||
}
|
||||
|
||||
func (m *fsMetaV1) WriteTo(writer io.Writer) (n int64, err error) {
|
||||
func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
||||
var metadataBytes []byte
|
||||
metadataBytes, err = json.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
if err = writer.(*lock.LockedFile).Truncate(0); err != nil {
|
||||
if err = lk.Truncate(0); err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
if _, err = writer.Write(metadataBytes); err != nil {
|
||||
if _, err = lk.Write(metadataBytes); err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
@@ -144,9 +144,14 @@ func (m *fsMetaV1) WriteTo(writer io.Writer) (n int64, err error) {
|
||||
return int64(len(metadataBytes)), nil
|
||||
}
|
||||
|
||||
func (m *fsMetaV1) ReadFrom(reader io.Reader) (n int64, err error) {
|
||||
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||
var metadataBytes []byte
|
||||
metadataBytes, err = ioutil.ReadAll(reader)
|
||||
fi, err := lk.Stat()
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
metadataBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -69,10 +68,9 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
}
|
||||
defer rlk.Close()
|
||||
|
||||
sectionReader := io.NewSectionReader(rlk, 0, rlk.Size())
|
||||
// Regular fs metadata reading, no errors expected
|
||||
fsMeta := fsMetaV1{}
|
||||
if _, err = fsMeta.ReadFrom(sectionReader); err != nil {
|
||||
if _, err = fsMeta.ReadFrom(rlk.LockedFile); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
@@ -84,7 +82,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
file.Write([]byte{'a'})
|
||||
file.Close()
|
||||
fsMeta = fsMetaV1{}
|
||||
if _, err := fsMeta.ReadFrom(sectionReader); err == nil {
|
||||
if _, err := fsMeta.ReadFrom(rlk.LockedFile); err == nil {
|
||||
t.Fatal("Should fail", err)
|
||||
}
|
||||
}
|
||||
@@ -119,10 +117,9 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
}
|
||||
defer rlk.Close()
|
||||
|
||||
sectionReader := io.NewSectionReader(rlk, 0, rlk.Size())
|
||||
// FS metadata reading, no errors expected (healthy disk)
|
||||
fsMeta := fsMetaV1{}
|
||||
_, err = fsMeta.ReadFrom(sectionReader)
|
||||
_, err = fsMeta.ReadFrom(rlk.LockedFile)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
pathutil "path"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
// Returns if the prefix is a multipart upload.
|
||||
func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
|
||||
uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile)
|
||||
_, err := fsStatFile(uploadsIDPath)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
return false
|
||||
}
|
||||
errorIf(err, "Unable to access uploads.json "+uploadsIDPath)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Delete uploads.json file wrapper handling a tricky case on windows.
|
||||
func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error {
|
||||
timeID := fmt.Sprintf("%X", time.Now().UTC().UnixNano())
|
||||
tmpPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"+"+timeID)
|
||||
|
||||
multipartBucketPath := pathJoin(fs.fsPath, minioMetaMultipartBucket)
|
||||
uploadPath := pathJoin(multipartBucketPath, bucket, object)
|
||||
uploadsMetaPath := pathJoin(uploadPath, uploadsJSONFile)
|
||||
|
||||
// Special case for windows please read through.
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
// Ordinarily windows does not permit deletion or renaming of files still
|
||||
// in use, but if all open handles to that file were opened with FILE_SHARE_DELETE
|
||||
// then it can permit renames and deletions of open files.
|
||||
//
|
||||
// There are however some gotchas with this, and it is worth listing them here.
|
||||
// Firstly, Windows never allows you to really delete an open file, rather it is
|
||||
// flagged as delete pending and its entry in its directory remains visible
|
||||
// (though no new file handles may be opened to it) and when the very last
|
||||
// open handle to the file in the system is closed, only then is it truly
|
||||
// deleted. Well, actually only sort of truly deleted, because Windows only
|
||||
// appears to remove the file entry from the directory, but in fact that
|
||||
// entry is merely hidden and actually still exists and attempting to create
|
||||
// a file with the same name will return an access denied error. How long it
|
||||
// silently exists for depends on a range of factors, but put it this way:
|
||||
// if your code loops creating and deleting the same file name as you might
|
||||
// when operating a lock file, you're going to see lots of random spurious
|
||||
// access denied errors and truly dismal lock file performance compared to POSIX.
|
||||
//
|
||||
// We work-around these un-POSIX file semantics by taking a dual step to
|
||||
// deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket
|
||||
// We always open files with FILE_SHARE_DELETE permission enabled, with that
|
||||
// flag Windows permits renaming and deletion, and because the name was changed
|
||||
// to a very random name somewhere not in its origin directory before deletion,
|
||||
// you don't see those unexpected random errors when creating files with the
|
||||
// same name as a recently deleted file as you do anywhere else on Windows.
|
||||
// Because the file is probably not in its original containing directory any more,
|
||||
// deletions of that directory will not fail with “directory not empty” as they
|
||||
// otherwise normally would either.
|
||||
fsRenameFile(uploadsMetaPath, tmpPath)
|
||||
|
||||
// Proceed to deleting the directory.
|
||||
if err := fsDeleteFile(multipartBucketPath, uploadPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally delete the renamed file.
|
||||
return fsDeleteFile(pathutil.Dir(tmpPath), tmpPath)
|
||||
}
|
||||
return fsDeleteFile(multipartBucketPath, uploadsMetaPath)
|
||||
}
|
||||
|
||||
// Removes the uploadID, called either by CompleteMultipart of AbortMultipart. If the resuling uploads
|
||||
// slice is empty then we remove/purge the file.
|
||||
func (fs fsObjects) removeUploadID(bucket, object, uploadID string, rwlk *lock.LockedFile) error {
|
||||
uploadIDs := uploadsV1{}
|
||||
_, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Removes upload id from the uploads list.
|
||||
uploadIDs.RemoveUploadID(uploadID)
|
||||
|
||||
// Check this is the last entry.
|
||||
if uploadIDs.IsEmpty() {
|
||||
// No more uploads left, so we delete `uploads.json` file.
|
||||
return fs.deleteUploadsJSON(bucket, object, uploadID)
|
||||
} // else not empty
|
||||
|
||||
// Write update `uploads.json`.
|
||||
_, err = uploadIDs.WriteTo(rwlk)
|
||||
return err
|
||||
}
|
||||
|
||||
// Adds a new uploadID if no previous `uploads.json` is
|
||||
// found we initialize a new one.
|
||||
func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.Time, rwlk *lock.LockedFile) error {
|
||||
uploadIDs := uploadsV1{}
|
||||
|
||||
_, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
|
||||
// For all unexpected errors, we return.
|
||||
if err != nil && errorCause(err) != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we couldn't read anything, we assume a default
|
||||
// (empty) upload info.
|
||||
if errorCause(err) == io.EOF {
|
||||
uploadIDs = newUploadsV1("fs")
|
||||
}
|
||||
|
||||
// Adds new upload id to the list.
|
||||
uploadIDs.AddUploadID(uploadID, initiated)
|
||||
|
||||
// Write update `uploads.json`.
|
||||
_, err = uploadIDs.WriteTo(rwlk)
|
||||
return err
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFSWriteUploadJSON - tests for writeUploadJSON for FS
|
||||
func TestFSWriteUploadJSON(t *testing.T) {
|
||||
// Prepare for tests
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
obj := initFSObjects(disk, t)
|
||||
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
_, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
// newMultipartUpload will fail.
|
||||
removeAll(disk) // Remove disk.
|
||||
_, err = obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
if _, ok := errorCause(err).(BucketNotFound); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,12 +24,128 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
pathutil "path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// Returns if the prefix is a multipart upload.
|
||||
func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
|
||||
uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile)
|
||||
_, err := fsStatFile(uploadsIDPath)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
return false
|
||||
}
|
||||
errorIf(err, "Unable to access uploads.json "+uploadsIDPath)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Delete uploads.json file wrapper handling a tricky case on windows.
|
||||
func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error {
|
||||
timeID := fmt.Sprintf("%X", time.Now().UTC().UnixNano())
|
||||
tmpPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"+"+timeID)
|
||||
|
||||
multipartBucketPath := pathJoin(fs.fsPath, minioMetaMultipartBucket)
|
||||
uploadPath := pathJoin(multipartBucketPath, bucket, object)
|
||||
uploadsMetaPath := pathJoin(uploadPath, uploadsJSONFile)
|
||||
|
||||
// Special case for windows please read through.
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
// Ordinarily windows does not permit deletion or renaming of files still
|
||||
// in use, but if all open handles to that file were opened with FILE_SHARE_DELETE
|
||||
// then it can permit renames and deletions of open files.
|
||||
//
|
||||
// There are however some gotchas with this, and it is worth listing them here.
|
||||
// Firstly, Windows never allows you to really delete an open file, rather it is
|
||||
// flagged as delete pending and its entry in its directory remains visible
|
||||
// (though no new file handles may be opened to it) and when the very last
|
||||
// open handle to the file in the system is closed, only then is it truly
|
||||
// deleted. Well, actually only sort of truly deleted, because Windows only
|
||||
// appears to remove the file entry from the directory, but in fact that
|
||||
// entry is merely hidden and actually still exists and attempting to create
|
||||
// a file with the same name will return an access denied error. How long it
|
||||
// silently exists for depends on a range of factors, but put it this way:
|
||||
// if your code loops creating and deleting the same file name as you might
|
||||
// when operating a lock file, you're going to see lots of random spurious
|
||||
// access denied errors and truly dismal lock file performance compared to POSIX.
|
||||
//
|
||||
// We work-around these un-POSIX file semantics by taking a dual step to
|
||||
// deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket
|
||||
// We always open files with FILE_SHARE_DELETE permission enabled, with that
|
||||
// flag Windows permits renaming and deletion, and because the name was changed
|
||||
// to a very random name somewhere not in its origin directory before deletion,
|
||||
// you don't see those unexpected random errors when creating files with the
|
||||
// same name as a recently deleted file as you do anywhere else on Windows.
|
||||
// Because the file is probably not in its original containing directory any more,
|
||||
// deletions of that directory will not fail with "directory not empty" as they
|
||||
// otherwise normally would either.
|
||||
fsRenameFile(uploadsMetaPath, tmpPath)
|
||||
|
||||
// Proceed to deleting the directory.
|
||||
if err := fsDeleteFile(multipartBucketPath, uploadPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally delete the renamed file.
|
||||
return fsDeleteFile(pathutil.Dir(tmpPath), tmpPath)
|
||||
}
|
||||
return fsDeleteFile(multipartBucketPath, uploadsMetaPath)
|
||||
}
|
||||
|
||||
// Removes the uploadID, called either by CompleteMultipart of AbortMultipart. If the resuling uploads
|
||||
// slice is empty then we remove/purge the file.
|
||||
func (fs fsObjects) removeUploadID(bucket, object, uploadID string, rwlk *lock.LockedFile) error {
|
||||
uploadIDs := uploadsV1{}
|
||||
_, err := uploadIDs.ReadFrom(rwlk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Removes upload id from the uploads list.
|
||||
uploadIDs.RemoveUploadID(uploadID)
|
||||
|
||||
// Check this is the last entry.
|
||||
if uploadIDs.IsEmpty() {
|
||||
// No more uploads left, so we delete `uploads.json` file.
|
||||
return fs.deleteUploadsJSON(bucket, object, uploadID)
|
||||
} // else not empty
|
||||
|
||||
// Write update `uploads.json`.
|
||||
_, err = uploadIDs.WriteTo(rwlk)
|
||||
return err
|
||||
}
|
||||
|
||||
// Adds a new uploadID if no previous `uploads.json` is
|
||||
// found we initialize a new one.
|
||||
func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.Time, rwlk *lock.LockedFile) error {
|
||||
uploadIDs := uploadsV1{}
|
||||
|
||||
_, err := uploadIDs.ReadFrom(rwlk)
|
||||
// For all unexpected errors, we return.
|
||||
if err != nil && errorCause(err) != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we couldn't read anything, we assume a default
|
||||
// (empty) upload info.
|
||||
if errorCause(err) == io.EOF {
|
||||
uploadIDs = newUploadsV1("fs")
|
||||
}
|
||||
|
||||
// Adds new upload id to the list.
|
||||
uploadIDs.AddUploadID(uploadID, initiated)
|
||||
|
||||
// Write update `uploads.json`.
|
||||
_, err = uploadIDs.WriteTo(rwlk)
|
||||
return err
|
||||
}
|
||||
|
||||
// listMultipartUploadIDs - list all the upload ids from a marker up to 'count'.
|
||||
func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarker string, count int) ([]uploadMetadata, bool, error) {
|
||||
var uploads []uploadMetadata
|
||||
@@ -52,7 +168,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
|
||||
|
||||
// Read `uploads.json`.
|
||||
uploadIDs := uploadsV1{}
|
||||
if _, err = uploadIDs.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); err != nil {
|
||||
if _, err = uploadIDs.ReadFrom(rlk.LockedFile); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@@ -335,17 +451,49 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo,
|
||||
return fsMeta.Parts[nextPartIndex], true
|
||||
}
|
||||
|
||||
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
|
||||
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
||||
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
||||
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) {
|
||||
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(gerr)
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
|
||||
if err != nil {
|
||||
return PartInfo{}, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
// Explicitly close the reader.
|
||||
pipeReader.Close()
|
||||
|
||||
return partInfo, nil
|
||||
}
|
||||
|
||||
// PutObjectPart - reads incoming data until EOF for the part file on
|
||||
// an ongoing multipart transaction. Internally incoming data is
|
||||
// written to '.minio.sys/tmp' location and safely renamed to
|
||||
// '.minio.sys/multipart' for reach parts.
|
||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (string, error) {
|
||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
|
||||
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
|
||||
return "", err
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
if _, err := fs.statBucketDir(bucket); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
return PartInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// Hold the lock so that two parallel complete-multipart-uploads
|
||||
@@ -358,9 +506,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)
|
||||
if _, err := fs.rwPool.Open(uploadsPath); err != nil {
|
||||
if err == errFileNotFound || err == errFileAccessDenied {
|
||||
return "", traceError(InvalidUploadID{UploadID: uploadID})
|
||||
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
return "", toObjectErr(traceError(err), bucket, object)
|
||||
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
}
|
||||
defer fs.rwPool.Close(uploadsPath)
|
||||
|
||||
@@ -371,16 +519,16 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
rwlk, err := fs.rwPool.Write(fsMetaPath)
|
||||
if err != nil {
|
||||
if err == errFileNotFound || err == errFileAccessDenied {
|
||||
return "", traceError(InvalidUploadID{UploadID: uploadID})
|
||||
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
return "", toObjectErr(traceError(err), bucket, object)
|
||||
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
}
|
||||
defer rwlk.Close()
|
||||
|
||||
fsMeta := fsMetaV1{}
|
||||
_, err = fsMeta.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
|
||||
_, err = fsMeta.ReadFrom(rwlk)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
|
||||
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
|
||||
}
|
||||
|
||||
partSuffix := fmt.Sprintf("object%d", partID)
|
||||
@@ -418,14 +566,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size)
|
||||
if cErr != nil {
|
||||
fsRemoveFile(fsPartPath)
|
||||
return "", toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
|
||||
return PartInfo{}, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
|
||||
}
|
||||
|
||||
// Should return IncompleteBody{} error when reader has fewer
|
||||
// bytes than specified in request header.
|
||||
if bytesWritten < size {
|
||||
fsRemoveFile(fsPartPath)
|
||||
return "", traceError(IncompleteBody{})
|
||||
return PartInfo{}, traceError(IncompleteBody{})
|
||||
}
|
||||
|
||||
// Delete temporary part in case of failure. If
|
||||
@@ -436,14 +584,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
return "", traceError(BadDigest{md5Hex, newMD5Hex})
|
||||
return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
|
||||
if sha256sum != "" {
|
||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
||||
if newSHA256sum != sha256sum {
|
||||
return "", traceError(SHA256Mismatch{})
|
||||
return PartInfo{}, traceError(SHA256Mismatch{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,14 +604,20 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
fsNSPartPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, partPath)
|
||||
if err = fsRenameFile(fsPartPath, fsNSPartPath); err != nil {
|
||||
partLock.Unlock()
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, partPath)
|
||||
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
||||
}
|
||||
|
||||
// Save the object part info in `fs.json`.
|
||||
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
|
||||
if _, err = fsMeta.WriteTo(rwlk); err != nil {
|
||||
partLock.Unlock()
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
|
||||
fi, err := fsStatFile(partNamePath)
|
||||
if err != nil {
|
||||
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
|
||||
}
|
||||
|
||||
// Append the part in background.
|
||||
@@ -477,7 +631,12 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
partLock.Unlock()
|
||||
}()
|
||||
|
||||
return newMD5Hex, nil
|
||||
return PartInfo{
|
||||
PartNumber: partID,
|
||||
LastModified: fi.ModTime(),
|
||||
ETag: newMD5Hex,
|
||||
Size: fi.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// listObjectParts - wrapper scanning through
|
||||
@@ -499,7 +658,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
|
||||
defer fs.rwPool.Close(fsMetaPath)
|
||||
|
||||
fsMeta := fsMetaV1{}
|
||||
_, err = fsMeta.ReadFrom((io.NewSectionReader(metaFile, 0, metaFile.Size())))
|
||||
_, err = fsMeta.ReadFrom(metaFile.LockedFile)
|
||||
if err != nil {
|
||||
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath)
|
||||
}
|
||||
@@ -517,9 +676,9 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
|
||||
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, part.Name)
|
||||
fi, err = fsStatFile(partNamePath)
|
||||
if err != nil {
|
||||
return ListPartsInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partNamePath)
|
||||
return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, partNamePath)
|
||||
}
|
||||
result.Parts = append(result.Parts, partInfo{
|
||||
result.Parts = append(result.Parts, PartInfo{
|
||||
PartNumber: part.Number,
|
||||
ETag: part.ETag,
|
||||
LastModified: fi.ModTime(),
|
||||
@@ -630,7 +789,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
|
||||
fsMeta := fsMetaV1{}
|
||||
// Read saved fs metadata for ongoing multipart.
|
||||
_, err = fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size()))
|
||||
_, err = fsMeta.ReadFrom(rlk.LockedFile)
|
||||
if err != nil {
|
||||
fs.rwPool.Close(fsMetaPathMultipart)
|
||||
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)
|
||||
@@ -767,7 +926,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object)
|
||||
multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID)
|
||||
if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Remove entry from `uploads.json`.
|
||||
@@ -777,7 +936,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
|
||||
fi, err := fsStatFile(fsNSObjPath)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Return object info.
|
||||
@@ -845,7 +1004,7 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
|
||||
multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object)
|
||||
multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID)
|
||||
if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil {
|
||||
return toObjectErr(traceError(err), bucket, object)
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Remove entry from `uploads.json`.
|
||||
|
||||
@@ -22,6 +22,33 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFSWriteUploadJSON - tests for writeUploadJSON for FS
|
||||
func TestFSWriteUploadJSON(t *testing.T) {
|
||||
// Prepare for tests
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer removeAll(disk)
|
||||
|
||||
obj := initFSObjects(disk, t)
|
||||
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucket(bucketName)
|
||||
_, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
// newMultipartUpload will fail.
|
||||
removeAll(disk) // Remove disk.
|
||||
_, err = obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
if _, ok := errorCause(err).(BucketNotFound); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewMultipartUploadFaultyDisk - test NewMultipartUpload with faulty disks
|
||||
func TestNewMultipartUploadFaultyDisk(t *testing.T) {
|
||||
// Prepare for tests
|
||||
|
||||
77
cmd/fs-v1.go
77
cmd/fs-v1.go
@@ -245,7 +245,7 @@ func (fs fsObjects) statBucketDir(bucket string) (os.FileInfo, error) {
|
||||
}
|
||||
st, err := fsStatDir(bucketDir)
|
||||
if err != nil {
|
||||
return nil, traceError(err)
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (fs fsObjects) MakeBucket(bucket string) error {
|
||||
}
|
||||
|
||||
if err = fsMkdir(bucketDir); err != nil {
|
||||
return toObjectErr(traceError(err), bucket)
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -283,7 +283,7 @@ func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
// ListBuckets - list all s3 compatible buckets (directories) at fsPath.
|
||||
func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
if err := checkPathLength(fs.fsPath); err != nil {
|
||||
return nil, err
|
||||
return nil, traceError(err)
|
||||
}
|
||||
var bucketInfos []BucketInfo
|
||||
entries, err := readDir(preparePath(fs.fsPath))
|
||||
@@ -301,9 +301,9 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
fi, err = fsStatDir(pathJoin(fs.fsPath, entry))
|
||||
if err != nil {
|
||||
// If the directory does not exist, skip the entry.
|
||||
if err == errVolumeNotFound {
|
||||
if errorCause(err) == errVolumeNotFound {
|
||||
continue
|
||||
} else if err == errVolumeAccessDenied {
|
||||
} else if errorCause(err) == errVolumeAccessDenied {
|
||||
// Skip the entry if its a file.
|
||||
continue
|
||||
}
|
||||
@@ -376,7 +376,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
// Stat the file to get file size.
|
||||
fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject)
|
||||
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// Check if this request is only metadata update.
|
||||
@@ -386,7 +386,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
var wlk *lock.LockedFile
|
||||
wlk, err = fs.rwPool.Write(fsMetaPath)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject)
|
||||
}
|
||||
// This close will allow for locks to be synchronized on `fs.json`.
|
||||
defer wlk.Close()
|
||||
@@ -467,7 +467,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
|
||||
fsObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||
reader, size, err := fsOpenFile(fsObjPath, offset)
|
||||
if err != nil {
|
||||
return toObjectErr(traceError(err), bucket, object)
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
@@ -505,8 +505,13 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
if err == nil {
|
||||
// Read from fs metadata only if it exists.
|
||||
defer fs.rwPool.Close(fsMetaPath)
|
||||
if _, rerr := fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); rerr != nil {
|
||||
return ObjectInfo{}, toObjectErr(rerr, bucket, object)
|
||||
if _, rerr := fsMeta.ReadFrom(rlk.LockedFile); rerr != nil {
|
||||
// `fs.json` can be empty due to previously failed
|
||||
// PutObject() transaction, if we arrive at such
|
||||
// a situation we just ignore and continue.
|
||||
if errorCause(rerr) != io.EOF {
|
||||
return ObjectInfo{}, toObjectErr(rerr, bucket, object)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -518,7 +523,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
// Stat the file to get file size.
|
||||
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
||||
@@ -569,7 +574,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
||||
wlk, err = fs.rwPool.Create(fsMetaPath)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
}
|
||||
// This close will allow for locks to be synchronized on `fs.json`.
|
||||
defer wlk.Close()
|
||||
@@ -667,7 +672,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
// Stat the file to fetch timestamp, size.
|
||||
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Success.
|
||||
@@ -694,20 +699,20 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
|
||||
defer rwlk.Close()
|
||||
}
|
||||
if lerr != nil && lerr != errFileNotFound {
|
||||
return toObjectErr(lerr, bucket, object)
|
||||
return toObjectErr(traceError(lerr), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the object.
|
||||
if err := fsDeleteFile(pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil {
|
||||
return toObjectErr(traceError(err), bucket, object)
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
if bucket != minioMetaBucket {
|
||||
// Delete the metadata object.
|
||||
err := fsDeleteFile(minioMetaBucketDir, fsMetaPath)
|
||||
if err != nil && err != errFileNotFound {
|
||||
return toObjectErr(traceError(err), bucket, object)
|
||||
if err != nil && errorCause(err) != errFileNotFound {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -726,37 +731,11 @@ func (fs fsObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
|
||||
// listDir - lists all the entries at a given prefix and given entry in the prefix.
|
||||
listDir := func(bucket, prefixDir, prefixEntry string) (entries []string, delayIsLeaf bool, err error) {
|
||||
entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir))
|
||||
if err == nil {
|
||||
// Listing needs to be sorted.
|
||||
sort.Strings(entries)
|
||||
|
||||
// Filter entries that have the prefix prefixEntry.
|
||||
entries = filterMatchingPrefix(entries, prefixEntry)
|
||||
|
||||
// Can isLeaf() check be delayed till when it has to be sent down the
|
||||
// treeWalkResult channel?
|
||||
delayIsLeaf = delayIsLeafCheck(entries)
|
||||
if delayIsLeaf {
|
||||
return entries, delayIsLeaf, nil
|
||||
}
|
||||
|
||||
// isLeaf() check has to happen here so that trailing "/" for objects can be removed.
|
||||
for i, entry := range entries {
|
||||
if isLeaf(bucket, pathJoin(prefixDir, entry)) {
|
||||
entries[i] = strings.TrimSuffix(entry, slashSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort again after removing trailing "/" for objects as the previous sort
|
||||
// does not hold good anymore.
|
||||
sort.Strings(entries)
|
||||
|
||||
// Succes.
|
||||
return entries, delayIsLeaf, nil
|
||||
} // Return error at the end.
|
||||
|
||||
// Error.
|
||||
return nil, false, err
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
entries, delayIsLeaf = filterListEntries(bucket, prefixDir, entries, prefixEntry, isLeaf)
|
||||
return entries, delayIsLeaf, nil
|
||||
}
|
||||
|
||||
// Return list factory instance.
|
||||
@@ -811,7 +790,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
var fi os.FileInfo
|
||||
fi, err = fsStatFile(pathJoin(fs.fsPath, bucket, entry))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, entry)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, entry)
|
||||
}
|
||||
fsMeta := fsMetaV1{}
|
||||
return fsMeta.ToObjectInfo(bucket, entry, fi), nil
|
||||
|
||||
@@ -77,13 +77,11 @@ func TestFSShutdown(t *testing.T) {
|
||||
removeAll(disk)
|
||||
|
||||
// Test Shutdown with faulty disk
|
||||
for i := 1; i <= 5; i++ {
|
||||
fs, disk := prepareTest()
|
||||
fs.DeleteObject(bucketName, objectName)
|
||||
removeAll(disk)
|
||||
if err := fs.Shutdown(); err != nil {
|
||||
t.Fatal(i, ", Got unexpected fs shutdown error: ", err)
|
||||
}
|
||||
fs, disk = prepareTest()
|
||||
fs.DeleteObject(bucketName, objectName)
|
||||
removeAll(disk)
|
||||
if err := fs.Shutdown(); err != nil {
|
||||
t.Fatal("Got unexpected fs shutdown error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -141,8 +143,8 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler {
|
||||
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == httpGET && guessIsBrowserReq(r) && globalIsBrowserEnabled {
|
||||
// For all browser requests set appropriate Cache-Control policies
|
||||
if strings.HasPrefix(r.URL.Path, reservedBucket+"/") {
|
||||
if strings.HasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" {
|
||||
if hasPrefix(r.URL.Path, reservedBucket+"/") {
|
||||
if hasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" {
|
||||
// For assets set cache expiry of one year. For each release, the name
|
||||
// of the asset name will change and hence it can not be served from cache.
|
||||
w.Header().Set("Cache-Control", "max-age=31536000")
|
||||
@@ -356,3 +358,52 @@ func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Serve HTTP.
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// httpResponseRecorder wraps http.ResponseWriter
|
||||
// to record some useful http response data.
|
||||
type httpResponseRecorder struct {
|
||||
http.ResponseWriter
|
||||
respStatusCode int
|
||||
}
|
||||
|
||||
// Wraps ResponseWriter's Write()
|
||||
func (rww *httpResponseRecorder) Write(b []byte) (int, error) {
|
||||
return rww.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
// Wraps ResponseWriter's Flush()
|
||||
func (rww *httpResponseRecorder) Flush() {
|
||||
rww.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// Wraps ResponseWriter's WriteHeader() and record
|
||||
// the response status code
|
||||
func (rww *httpResponseRecorder) WriteHeader(httpCode int) {
|
||||
rww.respStatusCode = httpCode
|
||||
rww.ResponseWriter.WriteHeader(httpCode)
|
||||
}
|
||||
|
||||
func (rww *httpResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return rww.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
// httpStatsHandler definition: gather HTTP statistics
|
||||
type httpStatsHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setHttpStatsHandler sets a http Stats Handler
|
||||
func setHTTPStatsHandler(h http.Handler) http.Handler {
|
||||
return httpStatsHandler{handler: h}
|
||||
}
|
||||
|
||||
func (h httpStatsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Wraps w to record http response information
|
||||
ww := &httpResponseRecorder{ResponseWriter: w}
|
||||
|
||||
// Execute the request
|
||||
h.handler.ServeHTTP(ww, r)
|
||||
|
||||
// Update http statistics
|
||||
globalHTTPStats.updateStats(r, ww)
|
||||
}
|
||||
|
||||
@@ -31,11 +31,6 @@ import (
|
||||
"github.com/minio/minio/pkg/objcache"
|
||||
)
|
||||
|
||||
// Global constants for Minio.
|
||||
const (
|
||||
minGoVersion = ">= 1.7" // Minio requires at least Go v1.7
|
||||
)
|
||||
|
||||
// minio configuration related constants.
|
||||
const (
|
||||
globalMinioConfigVersion = "13"
|
||||
@@ -51,6 +46,8 @@ const (
|
||||
globalMinioDefaultOwnerID = "minio"
|
||||
globalMinioDefaultStorageClass = "STANDARD"
|
||||
globalWindowsOSName = "windows"
|
||||
globalNetBSDOSName = "netbsd"
|
||||
globalSolarisOSName = "solaris"
|
||||
// Add new global values here.
|
||||
)
|
||||
|
||||
@@ -59,6 +56,9 @@ const (
|
||||
// can reach that size according to https://aws.amazon.com/articles/1434
|
||||
maxFormFieldSize = int64(1 * humanize.MiByte)
|
||||
|
||||
// Limit memory allocation to store multipart data
|
||||
maxFormMemory = int64(5 * humanize.MiByte)
|
||||
|
||||
// The maximum allowed difference between the request generation time and the server processing time
|
||||
globalMaxSkewTime = 15 * time.Minute
|
||||
)
|
||||
@@ -82,6 +82,9 @@ var (
|
||||
// Caching is enabled only for RAM size > 8GiB.
|
||||
globalMaxCacheSize = uint64(0)
|
||||
|
||||
// Maximum size of internal objects parts
|
||||
globalPutPartSize = int64(64 * 1024 * 1024)
|
||||
|
||||
// Cache expiry.
|
||||
globalCacheExpiry = objcache.DefaultExpiry
|
||||
|
||||
@@ -110,15 +113,21 @@ var (
|
||||
// Minio server user agent string.
|
||||
globalServerUserAgent = "Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")"
|
||||
|
||||
// Access key passed from the environment
|
||||
globalEnvAccessKey = os.Getenv("MINIO_ACCESS_KEY")
|
||||
|
||||
// Secret key passed from the environment
|
||||
globalEnvSecretKey = os.Getenv("MINIO_SECRET_KEY")
|
||||
// Set to true if credentials were passed from env, default is false.
|
||||
globalIsEnvCreds = false
|
||||
|
||||
// url.URL endpoints of disks that belong to the object storage.
|
||||
globalEndpoints = []*url.URL{}
|
||||
|
||||
// Global server's network statistics
|
||||
globalConnStats = newConnStats()
|
||||
|
||||
// Global HTTP request statisitics
|
||||
globalHTTPStats = newHTTPStats()
|
||||
|
||||
// Time when object layer was initialized on start up.
|
||||
globalBootTime time.Time
|
||||
|
||||
// Add new variable global values here.
|
||||
)
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -158,33 +157,52 @@ func extractMetadataFromForm(formValues map[string]string) map[string]string {
|
||||
}
|
||||
|
||||
// Extract form fields and file data from a HTTP POST Policy
|
||||
func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader, fileName string, formValues map[string]string, err error) {
|
||||
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues map[string]string, err error) {
|
||||
/// HTML Form values
|
||||
formValues = make(map[string]string)
|
||||
fileName = ""
|
||||
for err == nil {
|
||||
var part *multipart.Part
|
||||
part, err = reader.NextPart()
|
||||
if part != nil {
|
||||
canonicalFormName := http.CanonicalHeaderKey(part.FormName())
|
||||
if canonicalFormName != "File" {
|
||||
var buffer []byte
|
||||
limitReader := io.LimitReader(part, maxFormFieldSize+1)
|
||||
buffer, err = ioutil.ReadAll(limitReader)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
if int64(len(buffer)) > maxFormFieldSize {
|
||||
return nil, "", nil, errSizeUnexpected
|
||||
}
|
||||
formValues[canonicalFormName] = string(buffer)
|
||||
} else {
|
||||
filePart = part
|
||||
fileName = part.FileName()
|
||||
// As described in S3 spec, we expect file to be the last form field
|
||||
break
|
||||
|
||||
// Iterate over form values
|
||||
for k, v := range form.Value {
|
||||
canonicalFormName := http.CanonicalHeaderKey(k)
|
||||
// Check if value's field exceeds S3 limit
|
||||
if int64(len(v[0])) > maxFormFieldSize {
|
||||
return nil, "", 0, nil, traceError(errSizeUnexpected)
|
||||
}
|
||||
// Set the form value
|
||||
formValues[canonicalFormName] = v[0]
|
||||
}
|
||||
|
||||
// Iterator until we find a valid File field and break
|
||||
for k, v := range form.File {
|
||||
canonicalFormName := http.CanonicalHeaderKey(k)
|
||||
if canonicalFormName == "File" {
|
||||
if len(v) == 0 {
|
||||
return nil, "", 0, nil, traceError(errInvalidArgument)
|
||||
}
|
||||
// Fetch fileHeader which has the uploaded file information
|
||||
fileHeader := v[0]
|
||||
// Set filename
|
||||
fileName = fileHeader.Filename
|
||||
// Open the uploaded part
|
||||
filePart, err = fileHeader.Open()
|
||||
if err != nil {
|
||||
return nil, "", 0, nil, traceError(err)
|
||||
}
|
||||
// Compute file size
|
||||
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
|
||||
if err != nil {
|
||||
return nil, "", 0, nil, traceError(err)
|
||||
}
|
||||
// Reset Seek to the beginning
|
||||
_, err = filePart.(io.Seeker).Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, "", 0, nil, traceError(err)
|
||||
}
|
||||
// File found and ready for reading
|
||||
break
|
||||
}
|
||||
}
|
||||
return filePart, fileName, formValues, nil
|
||||
|
||||
return filePart, fileName, fileSize, formValues, nil
|
||||
}
|
||||
|
||||
29
cmd/jwt.go
29
cmd/jwt.go
@@ -38,12 +38,15 @@ const (
|
||||
defaultInterNodeJWTExpiry = 100 * 365 * 24 * time.Hour
|
||||
)
|
||||
|
||||
var errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length")
|
||||
var errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length")
|
||||
var (
|
||||
errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length")
|
||||
errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length")
|
||||
|
||||
var errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records")
|
||||
var errAuthentication = errors.New("Authentication failed, check your access credentials")
|
||||
var errNoAuthToken = errors.New("JWT token missing")
|
||||
errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records")
|
||||
errChangeCredNotAllowed = errors.New("Changing access key and secret key not allowed")
|
||||
errAuthentication = errors.New("Authentication failed, check your access credentials")
|
||||
errNoAuthToken = errors.New("JWT token missing")
|
||||
)
|
||||
|
||||
func authenticateJWT(accessKey, secretKey string, expiry time.Duration) (string, error) {
|
||||
// Trim spaces.
|
||||
@@ -65,9 +68,15 @@ func authenticateJWT(accessKey, secretKey string, expiry time.Duration) (string,
|
||||
|
||||
// Validate secret key.
|
||||
// Using bcrypt to avoid timing attacks.
|
||||
hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(serverCred.SecretKey), bcrypt.DefaultCost)
|
||||
if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil {
|
||||
return "", errAuthentication
|
||||
if serverCred.secretKeyHash != nil {
|
||||
if bcrypt.CompareHashAndPassword(serverCred.secretKeyHash, []byte(secretKey)) != nil {
|
||||
return "", errAuthentication
|
||||
}
|
||||
} else {
|
||||
// Secret key hash not set then generate and validate.
|
||||
if bcrypt.CompareHashAndPassword(mustGetHashedSecretKey(serverCred.SecretKey), []byte(secretKey)) != nil {
|
||||
return "", errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
utcNow := time.Now().UTC()
|
||||
@@ -107,13 +116,13 @@ func isAuthTokenValid(tokenString string) bool {
|
||||
}
|
||||
|
||||
func isHTTPRequestValid(req *http.Request) bool {
|
||||
return webReqestAuthenticate(req) == nil
|
||||
return webRequestAuthenticate(req) == nil
|
||||
}
|
||||
|
||||
// Check if the request is authenticated.
|
||||
// Returns nil if the request is authenticated. errNoAuthToken if token missing.
|
||||
// Returns errAuthentication for all other errors.
|
||||
func webReqestAuthenticate(req *http.Request) error {
|
||||
func webRequestAuthenticate(req *http.Request) error {
|
||||
jwtToken, err := jwtreq.ParseFromRequest(req, jwtreq.AuthorizationHeaderExtractor, keyFuncCallback)
|
||||
if err != nil {
|
||||
if err == jwtreq.ErrNoTokenInRequest {
|
||||
|
||||
@@ -75,10 +75,40 @@ func testAuthenticate(authType string, t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAuthenticate(t *testing.T) {
|
||||
func TestAuthenticateNode(t *testing.T) {
|
||||
testAuthenticate("node", t)
|
||||
}
|
||||
|
||||
func TestWebAuthenticate(t *testing.T) {
|
||||
func TestAuthenticateWeb(t *testing.T) {
|
||||
testAuthenticate("web", t)
|
||||
}
|
||||
|
||||
func BenchmarkAuthenticateNode(b *testing.B) {
|
||||
testPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer removeAll(testPath)
|
||||
|
||||
creds := serverConfig.GetCredential()
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
authenticateNode(creds.AccessKey, creds.SecretKey)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAuthenticateWeb(b *testing.B) {
|
||||
testPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer removeAll(testPath)
|
||||
|
||||
creds := serverConfig.GetCredential()
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
authenticateWeb(creds.AccessKey, creds.SecretKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,7 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// SystemLockState - Structure to fill the lock state of entire object storage.
|
||||
// That is the total locks held, total calls blocked on locks and state of all the locks for the entire system.
|
||||
@@ -68,8 +65,8 @@ type OpsLockState struct {
|
||||
Duration time.Duration `json:"duration"` // Duration since the lock was held.
|
||||
}
|
||||
|
||||
// listLocksInfo - Fetches locks held on bucket, matching prefix older than relTime.
|
||||
func listLocksInfo(bucket, prefix string, relTime time.Duration) []VolumeLockInfo {
|
||||
// listLocksInfo - Fetches locks held on bucket, matching prefix held for longer than duration.
|
||||
func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockInfo {
|
||||
globalNSMutex.lockMapMutex.Lock()
|
||||
defer globalNSMutex.lockMapMutex.Unlock()
|
||||
|
||||
@@ -82,7 +79,7 @@ func listLocksInfo(bucket, prefix string, relTime time.Duration) []VolumeLockInf
|
||||
continue
|
||||
}
|
||||
// N B empty prefix matches all param.path.
|
||||
if !strings.HasPrefix(param.path, prefix) {
|
||||
if !hasPrefix(param.path, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -95,11 +92,12 @@ func listLocksInfo(bucket, prefix string, relTime time.Duration) []VolumeLockInf
|
||||
}
|
||||
// Filter locks that are held on bucket, prefix.
|
||||
for opsID, lockInfo := range debugLock.lockInfo {
|
||||
// filter locks that were held for longer than duration.
|
||||
elapsed := timeNow.Sub(lockInfo.since)
|
||||
if elapsed < relTime {
|
||||
if elapsed < duration {
|
||||
continue
|
||||
}
|
||||
// Add locks that are older than relTime.
|
||||
// Add locks that are held for longer than duration.
|
||||
volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject,
|
||||
OpsLockState{
|
||||
OperationID: opsID,
|
||||
|
||||
@@ -45,34 +45,34 @@ func TestListLocksInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
prefix string
|
||||
relTime time.Duration
|
||||
duration time.Duration
|
||||
numLocks int
|
||||
}{
|
||||
// Test 1 - Matches all the locks acquired above.
|
||||
{
|
||||
bucket: "bucket1",
|
||||
prefix: "prefix1",
|
||||
relTime: time.Duration(0 * time.Second),
|
||||
duration: time.Duration(0 * time.Second),
|
||||
numLocks: 20,
|
||||
},
|
||||
// Test 2 - Bucket doesn't match.
|
||||
{
|
||||
bucket: "bucket",
|
||||
prefix: "prefix1",
|
||||
relTime: time.Duration(0 * time.Second),
|
||||
duration: time.Duration(0 * time.Second),
|
||||
numLocks: 0,
|
||||
},
|
||||
// Test 3 - Prefix doesn't match.
|
||||
{
|
||||
bucket: "bucket1",
|
||||
prefix: "prefix11",
|
||||
relTime: time.Duration(0 * time.Second),
|
||||
duration: time.Duration(0 * time.Second),
|
||||
numLocks: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actual := listLocksInfo(test.bucket, test.prefix, test.relTime)
|
||||
actual := listLocksInfo(test.bucket, test.prefix, test.duration)
|
||||
if len(actual) != test.numLocks {
|
||||
t.Errorf("Test %d - Expected %d locks but observed %d locks",
|
||||
i+1, test.numLocks, len(actual))
|
||||
|
||||
@@ -26,7 +26,7 @@ type consoleLogger struct {
|
||||
|
||||
// enable console logger.
|
||||
func enableConsoleLogger() {
|
||||
clogger := serverConfig.GetConsoleLogger()
|
||||
clogger := serverConfig.Logger.GetConsole()
|
||||
if !clogger.Enable {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ type localFile struct {
|
||||
}
|
||||
|
||||
func enableFileLogger() {
|
||||
flogger := serverConfig.GetFileLogger()
|
||||
flogger := serverConfig.Logger.GetFile()
|
||||
if !flogger.Enable || flogger.Filename == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -39,11 +39,41 @@ var log = struct {
|
||||
// - console [default]
|
||||
// - file
|
||||
type logger struct {
|
||||
sync.RWMutex
|
||||
Console consoleLogger `json:"console"`
|
||||
File fileLogger `json:"file"`
|
||||
// Add new loggers here.
|
||||
}
|
||||
|
||||
/// Logger related.
|
||||
|
||||
// SetFile set new file logger.
|
||||
func (l *logger) SetFile(flogger fileLogger) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.File = flogger
|
||||
}
|
||||
|
||||
// GetFileLogger get current file logger.
|
||||
func (l *logger) GetFile() fileLogger {
|
||||
l.RLock()
|
||||
defer l.RUnlock()
|
||||
return l.File
|
||||
}
|
||||
|
||||
// SetConsole set new console logger.
|
||||
func (l *logger) SetConsole(clogger consoleLogger) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.Console = clogger
|
||||
}
|
||||
|
||||
func (l *logger) GetConsole() consoleLogger {
|
||||
l.RLock()
|
||||
defer l.RUnlock()
|
||||
return l.Console
|
||||
}
|
||||
|
||||
// Get file, line, function name of the caller.
|
||||
func callerSource() string {
|
||||
pc, file, line, success := runtime.Caller(2)
|
||||
|
||||
82
cmd/main.go
82
cmd/main.go
@@ -53,24 +53,18 @@ DESCRIPTION:
|
||||
{{.Description}}
|
||||
|
||||
USAGE:
|
||||
minio {{if .Flags}}[flags] {{end}}command{{if .Flags}}{{end}} [arguments...]
|
||||
minio {{if .VisibleFlags}}[flags] {{end}}command{{if .VisibleFlags}}{{end}} [arguments...]
|
||||
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{if .Flags}}
|
||||
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
VERSION:
|
||||
` + Version +
|
||||
`{{ "\n"}}`
|
||||
|
||||
// init - check the environment before main starts
|
||||
func init() {
|
||||
// Check if minio was compiled using a supported version of Golang.
|
||||
checkGoVersion()
|
||||
}
|
||||
|
||||
func migrate() {
|
||||
// Migrate config file
|
||||
err := migrateConfig()
|
||||
@@ -94,7 +88,7 @@ func findClosestCommands(command string) []string {
|
||||
sort.Strings(closestCommands)
|
||||
// Suggest other close commands - allow missed, wrongly added and
|
||||
// even transposed characters
|
||||
for _, value := range commandsTree.walk(commandsTree.root) {
|
||||
for _, value := range commandsTree.Walk(commandsTree.Root()) {
|
||||
if sort.SearchStrings(closestCommands, value.(string)) < len(closestCommands) {
|
||||
continue
|
||||
}
|
||||
@@ -151,18 +145,40 @@ func checkMainSyntax(c *cli.Context) {
|
||||
func checkUpdate() {
|
||||
// Do not print update messages, if quiet flag is set.
|
||||
if !globalQuiet {
|
||||
updateMsg, _, err := getReleaseUpdate(minioUpdateStableURL, 1*time.Second)
|
||||
older, downloadURL, err := getUpdateInfo(1 * time.Second)
|
||||
if err != nil {
|
||||
// Ignore any errors during getReleaseUpdate(), possibly
|
||||
// because of network errors.
|
||||
// Its OK to ignore any errors during getUpdateInfo() here.
|
||||
return
|
||||
}
|
||||
if updateMsg.Update {
|
||||
console.Println(updateMsg)
|
||||
if older > time.Duration(0) {
|
||||
console.Println(colorizeUpdateMessage(downloadURL, older))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes a new config if it doesn't exist, else migrates any old config
|
||||
// to newer config and finally loads the config to memory.
|
||||
func initConfig() {
|
||||
envCreds := mustGetCredentialFromEnv()
|
||||
|
||||
// Config file does not exist, we create it fresh and return upon success.
|
||||
if !isConfigFileExists() {
|
||||
if err := newConfig(envCreds); err != nil {
|
||||
console.Fatalf("Unable to initialize minio config for the first time. Err: %s.\n", err)
|
||||
}
|
||||
console.Println("Created minio configuration file successfully at " + mustGetConfigPath())
|
||||
return
|
||||
}
|
||||
|
||||
// Migrate any old version of config / state files to newer format.
|
||||
migrate()
|
||||
|
||||
// Once we have migrated all the old config, now load them.
|
||||
if err := loadConfig(envCreds); err != nil {
|
||||
console.Fatalf("Unable to initialize minio config. Err: %s.\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generic Minio initialization to create/load config, prepare loggers, etc..
|
||||
func minioInit(ctx *cli.Context) {
|
||||
// Set global variables after parsing passed arguments
|
||||
@@ -174,43 +190,19 @@ func minioInit(ctx *cli.Context) {
|
||||
// Is TLS configured?.
|
||||
globalIsSSL = isSSL()
|
||||
|
||||
// Migrate any old version of config / state files to newer format.
|
||||
migrate()
|
||||
|
||||
// Initialize config.
|
||||
configCreated, err := initConfig()
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to initialize minio config. Err: %s.\n", err)
|
||||
}
|
||||
if configCreated {
|
||||
console.Println("Created minio configuration file at " + mustGetConfigPath())
|
||||
}
|
||||
// Initialize minio server config.
|
||||
initConfig()
|
||||
|
||||
// Enable all loggers by now so we can use errorIf() and fatalIf()
|
||||
enableLoggers()
|
||||
|
||||
// Fetch access keys from environment variables and update the config.
|
||||
if globalEnvAccessKey != "" && globalEnvSecretKey != "" {
|
||||
// Set new credentials.
|
||||
serverConfig.SetCredential(credential{
|
||||
AccessKey: globalEnvAccessKey,
|
||||
SecretKey: globalEnvSecretKey,
|
||||
})
|
||||
}
|
||||
if !isAccessKeyValid(serverConfig.GetCredential().AccessKey) {
|
||||
fatalIf(errInvalidArgument, "Invalid access key. Accept only a string starting with a alphabetic and containing from 5 to 20 characters.")
|
||||
}
|
||||
if !isSecretKeyValid(serverConfig.GetCredential().SecretKey) {
|
||||
fatalIf(errInvalidArgument, "Invalid secret key. Accept only a string containing from 8 to 40 characters.")
|
||||
}
|
||||
|
||||
// Init the error tracing module.
|
||||
initError()
|
||||
|
||||
}
|
||||
|
||||
// Main main for minio server.
|
||||
func Main() {
|
||||
func Main(args []string, exitFn func(int)) {
|
||||
app := registerApp()
|
||||
app.Before = func(c *cli.Context) error {
|
||||
// Valid input arguments to main.
|
||||
@@ -224,5 +216,7 @@ func Main() {
|
||||
}
|
||||
|
||||
// Run the app - exit on error.
|
||||
app.RunAndExitOnError()
|
||||
if err := app.Run(args); err != nil {
|
||||
exitFn(1)
|
||||
}
|
||||
}
|
||||
|
||||
228
cmd/notifier-config.go
Normal file
228
cmd/notifier-config.go
Normal file
@@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import "sync"
|
||||
|
||||
// Notifier represents collection of supported notification queues.
|
||||
type notifier struct {
|
||||
sync.RWMutex
|
||||
AMQP amqpConfigs `json:"amqp"`
|
||||
NATS natsConfigs `json:"nats"`
|
||||
ElasticSearch elasticSearchConfigs `json:"elasticsearch"`
|
||||
Redis redisConfigs `json:"redis"`
|
||||
PostgreSQL postgreSQLConfigs `json:"postgresql"`
|
||||
Kafka kafkaConfigs `json:"kafka"`
|
||||
Webhook webhookConfigs `json:"webhook"`
|
||||
// Add new notification queues.
|
||||
}
|
||||
|
||||
type amqpConfigs map[string]amqpNotify
|
||||
|
||||
func (a amqpConfigs) Clone() amqpConfigs {
|
||||
a2 := make(amqpConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type natsConfigs map[string]natsNotify
|
||||
|
||||
func (a natsConfigs) Clone() natsConfigs {
|
||||
a2 := make(natsConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type elasticSearchConfigs map[string]elasticSearchNotify
|
||||
|
||||
func (a elasticSearchConfigs) Clone() elasticSearchConfigs {
|
||||
a2 := make(elasticSearchConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type redisConfigs map[string]redisNotify
|
||||
|
||||
func (a redisConfigs) Clone() redisConfigs {
|
||||
a2 := make(redisConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type postgreSQLConfigs map[string]postgreSQLNotify
|
||||
|
||||
func (a postgreSQLConfigs) Clone() postgreSQLConfigs {
|
||||
a2 := make(postgreSQLConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type kafkaConfigs map[string]kafkaNotify
|
||||
|
||||
func (a kafkaConfigs) Clone() kafkaConfigs {
|
||||
a2 := make(kafkaConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
type webhookConfigs map[string]webhookNotify
|
||||
|
||||
func (a webhookConfigs) Clone() webhookConfigs {
|
||||
a2 := make(webhookConfigs, len(a))
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (n *notifier) SetAMQPByID(accountID string, amqpn amqpNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.AMQP[accountID] = amqpn
|
||||
}
|
||||
|
||||
func (n *notifier) GetAMQP() map[string]amqpNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.AMQP.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetAMQPByID(accountID string) amqpNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.AMQP[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetNATSByID(accountID string, natsn natsNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.NATS[accountID] = natsn
|
||||
}
|
||||
|
||||
func (n *notifier) GetNATS() map[string]natsNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.NATS.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetNATSByID(accountID string) natsNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.NATS[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetElasticSearchByID(accountID string, es elasticSearchNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.ElasticSearch[accountID] = es
|
||||
}
|
||||
|
||||
func (n *notifier) GetElasticSearchByID(accountID string) elasticSearchNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.ElasticSearch[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) GetElasticSearch() map[string]elasticSearchNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.ElasticSearch.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) SetRedisByID(accountID string, r redisNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.Redis[accountID] = r
|
||||
}
|
||||
|
||||
func (n *notifier) GetRedis() map[string]redisNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Redis.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetRedisByID(accountID string) redisNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Redis[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) GetWebhook() map[string]webhookNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Webhook.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetWebhookByID(accountID string) webhookNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Webhook[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetWebhookByID(accountID string, pgn webhookNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.Webhook[accountID] = pgn
|
||||
}
|
||||
|
||||
func (n *notifier) SetPostgreSQLByID(accountID string, pgn postgreSQLNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.PostgreSQL[accountID] = pgn
|
||||
}
|
||||
|
||||
func (n *notifier) GetPostgreSQL() map[string]postgreSQLNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.PostgreSQL.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetPostgreSQLByID(accountID string) postgreSQLNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.PostgreSQL[accountID]
|
||||
}
|
||||
|
||||
func (n *notifier) SetKafkaByID(accountID string, kn kafkaNotify) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.Kafka[accountID] = kn
|
||||
}
|
||||
|
||||
func (n *notifier) GetKafka() map[string]kafkaNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Kafka.Clone()
|
||||
}
|
||||
|
||||
func (n *notifier) GetKafkaByID(accountID string) kafkaNotify {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.Kafka[accountID]
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
//+build !amd64 noasm appengine
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,8 +14,4 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package blake2b
|
||||
|
||||
func compress(d *digest, p []uint8) {
|
||||
compressGeneric(d, p)
|
||||
}
|
||||
package cmd
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
@@ -55,24 +54,12 @@ const (
|
||||
|
||||
var errNotifyNotEnabled = errors.New("requested notifier not enabled")
|
||||
|
||||
// Notifier represents collection of supported notification queues.
|
||||
type notifier struct {
|
||||
AMQP map[string]amqpNotify `json:"amqp"`
|
||||
NATS map[string]natsNotify `json:"nats"`
|
||||
ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"`
|
||||
Redis map[string]redisNotify `json:"redis"`
|
||||
PostgreSQL map[string]postgreSQLNotify `json:"postgresql"`
|
||||
Kafka map[string]kafkaNotify `json:"kafka"`
|
||||
Webhook map[string]webhookNotify `json:"webhook"`
|
||||
// Add new notification queues.
|
||||
}
|
||||
|
||||
// Returns true if queueArn is for an AMQP queue.
|
||||
func isAMQPQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeAMQP {
|
||||
return false
|
||||
}
|
||||
amqpL := serverConfig.GetAMQPNotifyByID(sqsArn.AccountID)
|
||||
amqpL := serverConfig.Notify.GetAMQPByID(sqsArn.AccountID)
|
||||
if !amqpL.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -91,7 +78,7 @@ func isNATSQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeNATS {
|
||||
return false
|
||||
}
|
||||
natsL := serverConfig.GetNATSNotifyByID(sqsArn.AccountID)
|
||||
natsL := serverConfig.Notify.GetNATSByID(sqsArn.AccountID)
|
||||
if !natsL.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -110,7 +97,7 @@ func isWebhookQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeWebhook {
|
||||
return false
|
||||
}
|
||||
rNotify := serverConfig.GetWebhookNotifyByID(sqsArn.AccountID)
|
||||
rNotify := serverConfig.Notify.GetWebhookByID(sqsArn.AccountID)
|
||||
if !rNotify.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -122,7 +109,7 @@ func isRedisQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeRedis {
|
||||
return false
|
||||
}
|
||||
rNotify := serverConfig.GetRedisNotifyByID(sqsArn.AccountID)
|
||||
rNotify := serverConfig.Notify.GetRedisByID(sqsArn.AccountID)
|
||||
if !rNotify.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -141,7 +128,7 @@ func isElasticQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeElastic {
|
||||
return false
|
||||
}
|
||||
esNotify := serverConfig.GetElasticSearchNotifyByID(sqsArn.AccountID)
|
||||
esNotify := serverConfig.Notify.GetElasticSearchByID(sqsArn.AccountID)
|
||||
if !esNotify.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -159,7 +146,7 @@ func isPostgreSQLQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypePostgreSQL {
|
||||
return false
|
||||
}
|
||||
pgNotify := serverConfig.GetPostgreSQLNotifyByID(sqsArn.AccountID)
|
||||
pgNotify := serverConfig.Notify.GetPostgreSQLByID(sqsArn.AccountID)
|
||||
if !pgNotify.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -177,7 +164,7 @@ func isKafkaQueue(sqsArn arnSQS) bool {
|
||||
if sqsArn.Type != queueTypeKafka {
|
||||
return false
|
||||
}
|
||||
kafkaNotifyCfg := serverConfig.GetKafkaNotifyByID(sqsArn.AccountID)
|
||||
kafkaNotifyCfg := serverConfig.Notify.GetKafkaByID(sqsArn.AccountID)
|
||||
if !kafkaNotifyCfg.Enable {
|
||||
return false
|
||||
}
|
||||
@@ -206,9 +193,9 @@ func filterRuleMatch(object string, frs []filterRule) bool {
|
||||
var prefixMatch, suffixMatch = true, true
|
||||
for _, fr := range frs {
|
||||
if isValidFilterNamePrefix(fr.Name) {
|
||||
prefixMatch = strings.HasPrefix(object, fr.Value)
|
||||
prefixMatch = hasPrefix(object, fr.Value)
|
||||
} else if isValidFilterNameSuffix(fr.Name) {
|
||||
suffixMatch = strings.HasSuffix(object, fr.Value)
|
||||
suffixMatch = hasSuffix(object, fr.Value)
|
||||
}
|
||||
}
|
||||
return prefixMatch && suffixMatch
|
||||
|
||||
@@ -59,7 +59,7 @@ func dialAMQP(amqpL amqpNotify) (amqpConn, error) {
|
||||
}
|
||||
|
||||
func newAMQPNotify(accountID string) (*logrus.Logger, error) {
|
||||
amqpL := serverConfig.GetAMQPNotifyByID(accountID)
|
||||
amqpL := serverConfig.Notify.GetAMQPByID(accountID)
|
||||
|
||||
// Connect to amqp server.
|
||||
amqpC, err := dialAMQP(amqpL)
|
||||
|
||||
@@ -55,7 +55,7 @@ func dialElastic(esNotify elasticSearchNotify) (*elastic.Client, error) {
|
||||
}
|
||||
|
||||
func newElasticNotify(accountID string) (*logrus.Logger, error) {
|
||||
esNotify := serverConfig.GetElasticSearchNotifyByID(accountID)
|
||||
esNotify := serverConfig.Notify.GetElasticSearchByID(accountID)
|
||||
|
||||
// Dial to elastic search.
|
||||
client, err := dialElastic(esNotify)
|
||||
|
||||
@@ -75,7 +75,7 @@ func dialKafka(kn kafkaNotify) (kafkaConn, error) {
|
||||
}
|
||||
|
||||
func newKafkaNotify(accountID string) (*logrus.Logger, error) {
|
||||
kafkaNotifyCfg := serverConfig.GetKafkaNotifyByID(accountID)
|
||||
kafkaNotifyCfg := serverConfig.Notify.GetKafkaByID(accountID)
|
||||
|
||||
// Try connecting to the configured Kafka broker(s).
|
||||
kc, err := dialKafka(kafkaNotifyCfg)
|
||||
|
||||
@@ -127,7 +127,7 @@ func closeNATS(conn natsIOConn) {
|
||||
}
|
||||
|
||||
func newNATSNotify(accountID string) (*logrus.Logger, error) {
|
||||
natsL := serverConfig.GetNATSNotifyByID(accountID)
|
||||
natsL := serverConfig.Notify.GetNATSByID(accountID)
|
||||
|
||||
// Connect to nats server.
|
||||
natsC, err := dialNATS(natsL, false)
|
||||
|
||||
@@ -174,7 +174,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
|
||||
}
|
||||
|
||||
func newPostgreSQLNotify(accountID string) (*logrus.Logger, error) {
|
||||
pgNotify := serverConfig.GetPostgreSQLNotifyByID(accountID)
|
||||
pgNotify := serverConfig.Notify.GetPostgreSQLByID(accountID)
|
||||
|
||||
// Dial postgres
|
||||
pgC, err := dialPostgreSQL(pgNotify)
|
||||
|
||||
@@ -83,7 +83,7 @@ func dialRedis(rNotify redisNotify) (*redis.Pool, error) {
|
||||
}
|
||||
|
||||
func newRedisNotify(accountID string) (*logrus.Logger, error) {
|
||||
rNotify := serverConfig.GetRedisNotifyByID(accountID)
|
||||
rNotify := serverConfig.Notify.GetRedisByID(accountID)
|
||||
|
||||
// Dial redis.
|
||||
rPool, err := dialRedis(rNotify)
|
||||
|
||||
@@ -52,7 +52,7 @@ func lookupEndpoint(u *url.URL) error {
|
||||
|
||||
// Initializes new webhook logrus notifier.
|
||||
func newWebhookNotify(accountID string) (*logrus.Logger, error) {
|
||||
rNotify := serverConfig.GetWebhookNotifyByID(accountID)
|
||||
rNotify := serverConfig.Notify.GetWebhookByID(accountID)
|
||||
|
||||
if rNotify.Endpoint == "" {
|
||||
return nil, errInvalidArgument
|
||||
@@ -119,6 +119,9 @@ func (n httpConn) Fire(entry *logrus.Entry) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure to close the response body so the connection can be re-used.
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK &&
|
||||
resp.StatusCode != http.StatusAccepted &&
|
||||
resp.StatusCode != http.StatusContinue {
|
||||
|
||||
@@ -51,13 +51,13 @@ func TestNewWebHookNotify(t *testing.T) {
|
||||
t.Fatal("Unexpected should fail")
|
||||
}
|
||||
|
||||
serverConfig.SetWebhookNotifyByID("10", webhookNotify{Enable: true, Endpoint: "http://www."})
|
||||
serverConfig.Notify.SetWebhookByID("10", webhookNotify{Enable: true, Endpoint: "http://www."})
|
||||
_, err = newWebhookNotify("10")
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail with lookupHost")
|
||||
}
|
||||
|
||||
serverConfig.SetWebhookNotifyByID("15", webhookNotify{Enable: true, Endpoint: "http://%"})
|
||||
serverConfig.Notify.SetWebhookByID("15", webhookNotify{Enable: true, Endpoint: "http://%"})
|
||||
_, err = newWebhookNotify("15")
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail with invalid URL escape")
|
||||
@@ -66,7 +66,7 @@ func TestNewWebHookNotify(t *testing.T) {
|
||||
server := httptest.NewServer(postHandler{})
|
||||
defer server.Close()
|
||||
|
||||
serverConfig.SetWebhookNotifyByID("20", webhookNotify{Enable: true, Endpoint: server.URL})
|
||||
serverConfig.Notify.SetWebhookByID("20", webhookNotify{Enable: true, Endpoint: server.URL})
|
||||
webhook, err := newWebhookNotify("20")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected shouldn't fail", err)
|
||||
|
||||
@@ -116,8 +116,8 @@ func TestGetPath(t *testing.T) {
|
||||
{"D:\\", "d:\\"},
|
||||
{"D:", "d:"},
|
||||
{"\\", "\\"},
|
||||
{"http://localhost/d:/export", "d:/export"},
|
||||
{"https://localhost/d:/export", "d:/export"},
|
||||
{"http://127.0.0.1/d:/export", "d:/export"},
|
||||
{"https://127.0.0.1/d:/export", "d:/export"},
|
||||
}
|
||||
} else {
|
||||
testCases = []struct {
|
||||
@@ -125,8 +125,8 @@ func TestGetPath(t *testing.T) {
|
||||
path string
|
||||
}{
|
||||
{"/export", "/export"},
|
||||
{"http://localhost/export", "/export"},
|
||||
{"https://localhost/export", "/export"},
|
||||
{"http://127.0.0.1/export", "/export"},
|
||||
{"https://127.0.0.1/export", "/export"},
|
||||
}
|
||||
}
|
||||
testCasesCommon := []struct {
|
||||
|
||||
@@ -26,8 +26,8 @@ const (
|
||||
Unknown BackendType = iota
|
||||
// Filesystem backend.
|
||||
FS
|
||||
// Multi disk XL (single, distributed) backend.
|
||||
XL
|
||||
// Multi disk Erasure (single, distributed) backend.
|
||||
Erasure
|
||||
// Add your own backend.
|
||||
)
|
||||
|
||||
@@ -39,10 +39,10 @@ type StorageInfo struct {
|
||||
Free int64
|
||||
// Backend type.
|
||||
Backend struct {
|
||||
// Represents various backend types, currently on FS and XL.
|
||||
// Represents various backend types, currently on FS and Erasure.
|
||||
Type BackendType
|
||||
|
||||
// Following fields are only meaningful if BackendType is XL.
|
||||
// Following fields are only meaningful if BackendType is Erasure.
|
||||
OnlineDisks int // Online disks during server startup.
|
||||
OfflineDisks int // Offline disks during server startup.
|
||||
ReadQuorum int // Minimum disks required for successful read operations.
|
||||
@@ -145,7 +145,7 @@ type ListPartsInfo struct {
|
||||
IsTruncated bool
|
||||
|
||||
// List of all parts.
|
||||
Parts []partInfo
|
||||
Parts []PartInfo
|
||||
|
||||
EncodingType string // Not supported yet.
|
||||
}
|
||||
@@ -220,8 +220,8 @@ type ListObjectsInfo struct {
|
||||
Prefixes []string
|
||||
}
|
||||
|
||||
// partInfo - represents individual part metadata.
|
||||
type partInfo struct {
|
||||
// PartInfo - represents individual part metadata.
|
||||
type PartInfo struct {
|
||||
// Part number that identifies the part. This is a positive integer between
|
||||
// 1 and 10,000.
|
||||
PartNumber int
|
||||
|
||||
@@ -16,11 +16,7 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
import "github.com/skyrings/skyring-common/tools/uuid"
|
||||
|
||||
// Checks on GetObject arguments, bucket and object.
|
||||
func checkGetObjArgs(bucket, object string) error {
|
||||
@@ -69,7 +65,7 @@ func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer
|
||||
})
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" && !strings.HasPrefix(marker, prefix) {
|
||||
if marker != "" && !hasPrefix(marker, prefix) {
|
||||
return traceError(InvalidMarkerPrefixCombination{
|
||||
Marker: marker,
|
||||
Prefix: prefix,
|
||||
@@ -84,7 +80,7 @@ func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter
|
||||
return err
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(keyMarker, slashSeparator) {
|
||||
if hasSuffix(keyMarker, slashSeparator) {
|
||||
return traceError(InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
|
||||
@@ -41,7 +41,8 @@ type ObjectLayer interface {
|
||||
// Multipart operations.
|
||||
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (md5 string, err error)
|
||||
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
@@ -76,26 +75,30 @@ func (u *uploadsV1) IsEmpty() bool {
|
||||
return len(u.Uploads) == 0
|
||||
}
|
||||
|
||||
func (u *uploadsV1) WriteTo(writer io.Writer) (n int64, err error) {
|
||||
func (u *uploadsV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
||||
// Serialize to prepare to write to disk.
|
||||
var uplBytes []byte
|
||||
uplBytes, err = json.Marshal(u)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
if err = writer.(*lock.LockedFile).Truncate(0); err != nil {
|
||||
if err = lk.Truncate(0); err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
_, err = writer.Write(uplBytes)
|
||||
_, err = lk.Write(uplBytes)
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
return int64(len(uplBytes)), nil
|
||||
}
|
||||
|
||||
func (u *uploadsV1) ReadFrom(reader io.Reader) (n int64, err error) {
|
||||
func (u *uploadsV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||
var uploadIDBytes []byte
|
||||
uploadIDBytes, err = ioutil.ReadAll(reader)
|
||||
fi, err := lk.Stat()
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
uploadIDBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
|
||||
if err != nil {
|
||||
return 0, traceError(err)
|
||||
}
|
||||
@@ -157,45 +160,6 @@ func writeUploadJSON(u *uploadsV1, uploadsPath, tmpPath string, disk StorageAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrapper which removes all the uploaded parts.
|
||||
func cleanupUploadedParts(bucket, object, uploadID string, storageDisks ...StorageAPI) error {
|
||||
var errs = make([]error, len(storageDisks))
|
||||
var wg = &sync.WaitGroup{}
|
||||
|
||||
// Construct uploadIDPath.
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
|
||||
// Cleanup uploadID for all disks.
|
||||
for index, disk := range storageDisks {
|
||||
if disk == nil {
|
||||
errs[index] = traceError(errDiskNotFound)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
// Cleanup each uploadID in a routine.
|
||||
go func(index int, disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
err := cleanupDir(disk, minioMetaMultipartBucket, uploadIDPath)
|
||||
if err != nil {
|
||||
errs[index] = err
|
||||
return
|
||||
}
|
||||
errs[index] = nil
|
||||
}(index, disk)
|
||||
}
|
||||
|
||||
// Wait for all the cleanups to finish.
|
||||
wg.Wait()
|
||||
|
||||
// Return first error.
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listMultipartUploadIDs - list all the upload ids from a marker up to 'count'.
|
||||
func listMultipartUploadIDs(bucketName, objectName, uploadIDMarker string, count int, disk StorageAPI) ([]uploadMetadata, bool, error) {
|
||||
var uploads []uploadMetadata
|
||||
|
||||
@@ -346,7 +346,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
// Validate all the test cases.
|
||||
for i, testCase := range testCases {
|
||||
actualMd5Hex, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, testCase.inputSHA256)
|
||||
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, testCase.inputSHA256)
|
||||
// All are test cases above are expected to fail.
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
@@ -363,8 +363,8 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if testCase.inputMd5 != actualMd5Hex {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualMd5Hex)
|
||||
if testCase.inputMd5 != actualInfo.ETag {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualInfo.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1344,7 +1344,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
Object: objectNames[0],
|
||||
MaxParts: 10,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
@@ -1375,7 +1375,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
NextPartNumberMarker: 3,
|
||||
IsTruncated: true,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
@@ -1400,7 +1400,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
MaxParts: 2,
|
||||
IsTruncated: false,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
@@ -1581,7 +1581,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
Object: objectNames[0],
|
||||
MaxParts: 10,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
@@ -1612,7 +1612,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
NextPartNumberMarker: 3,
|
||||
IsTruncated: true,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
@@ -1637,7 +1637,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
MaxParts: 2,
|
||||
IsTruncated: false,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
Parts: []PartInfo{
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -91,10 +92,10 @@ func IsValidObjectName(object string) bool {
|
||||
if len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if strings.HasSuffix(object, slashSeparator) {
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(object, slashSeparator) {
|
||||
if hasPrefix(object, slashSeparator) {
|
||||
return false
|
||||
}
|
||||
return IsValidObjectPrefix(object)
|
||||
@@ -159,6 +160,26 @@ func getCompleteMultipartMD5(parts []completePart) (string, error) {
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
// Prefix matcher string matches prefix in a platform specific way.
|
||||
// For example on windows since its case insensitive we are supposed
|
||||
// to do case insensitive checks.
|
||||
func hasPrefix(s string, prefix string) bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix))
|
||||
}
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
|
||||
// Suffix matcher string matches suffix in a platform specific way.
|
||||
// For example on windows since its case insensitive we are supposed
|
||||
// to do case insensitive checks.
|
||||
func hasSuffix(s string, suffix string) bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
|
||||
}
|
||||
return strings.HasSuffix(s, suffix)
|
||||
}
|
||||
|
||||
// byBucketName is a collection satisfying sort.Interface.
|
||||
type byBucketName []BucketInfo
|
||||
|
||||
|
||||
@@ -22,6 +22,16 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart
|
||||
// operation should not proceed. Preconditions supported are:
|
||||
// x-amz-copy-source-if-modified-since
|
||||
// x-amz-copy-source-if-unmodified-since
|
||||
// x-amz-copy-source-if-match
|
||||
// x-amz-copy-source-if-none-match
|
||||
func checkCopyObjectPartPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
|
||||
return checkCopyObjectPreconditions(w, r, objInfo)
|
||||
}
|
||||
|
||||
// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
|
||||
// Preconditions supported are:
|
||||
// x-amz-copy-source-if-modified-since
|
||||
|
||||
@@ -56,10 +56,9 @@ func setGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
|
||||
func errAllowableObjectNotFound(bucket string, r *http.Request) APIErrorCode {
|
||||
if getRequestAuthType(r) == authTypeAnonymous {
|
||||
//we care about the bucket as a whole, not a particular resource
|
||||
url := *r.URL
|
||||
url.Path = "/" + bucket
|
||||
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", &url); s3Error != ErrNone {
|
||||
resource := "/" + bucket
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", resource,
|
||||
r.Referer(), r.URL.Query()); s3Error != ErrNone {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
}
|
||||
@@ -440,7 +439,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
case authTypeAnonymous:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone {
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path,
|
||||
r.Referer(), r.URL.Query()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -531,7 +531,117 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// PutObjectPartHandler - Upload part
|
||||
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
|
||||
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
dstBucket := vars["bucket"]
|
||||
dstObject := vars["object"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Copy source path.
|
||||
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
||||
if err != nil {
|
||||
// Save unescaped string as is.
|
||||
cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
|
||||
}
|
||||
|
||||
srcBucket, srcObject := path2BucketAndObject(cpSrcPath)
|
||||
// If source object is empty or bucket is empty, reply back invalid copy source.
|
||||
if srcObject == "" || srcBucket == "" {
|
||||
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
uploadID := r.URL.Query().Get("uploadId")
|
||||
partIDString := r.URL.Query().Get("partNumber")
|
||||
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInvalidPart, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check partID with maximum part ID for multipart objects
|
||||
if isMaxPartID(partID) {
|
||||
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Hold read locks on source object only if we are
|
||||
// going to read data from source object.
|
||||
objectSRLock := globalNSMutex.NewNSLock(srcBucket, srcObject)
|
||||
objectSRLock.RLock()
|
||||
defer objectSRLock.RUnlock()
|
||||
|
||||
objInfo, err := objectAPI.GetObjectInfo(srcBucket, srcObject)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get request range.
|
||||
var hrange *httpRange
|
||||
rangeHeader := r.Header.Get("x-amz-copy-source-range")
|
||||
if rangeHeader != "" {
|
||||
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil {
|
||||
// Handle only errInvalidRange
|
||||
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
||||
if err == errInvalidRange {
|
||||
writeErrorResponse(w, ErrInvalidRange, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// log the error.
|
||||
errorIf(err, "Invalid request range")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
|
||||
if checkCopyObjectPartPreconditions(w, r, objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
startOffset := int64(0)
|
||||
length := objInfo.Size
|
||||
if hrange != nil {
|
||||
startOffset = hrange.offsetBegin
|
||||
length = hrange.getLength()
|
||||
}
|
||||
|
||||
/// maximum copy size for multipart objects in a single operation
|
||||
if isMaxObjectSize(length) {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Copy source object to destination, if source and destination
|
||||
// object is same then only metadata is updated.
|
||||
partInfo, err := objectAPI.CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
|
||||
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -590,7 +700,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
var partMD5 string
|
||||
var partInfo PartInfo
|
||||
incomingMD5 := hex.EncodeToString(md5Bytes)
|
||||
sha256sum := ""
|
||||
switch rAuthType {
|
||||
@@ -600,12 +710,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
case authTypeAnonymous:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone {
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path,
|
||||
r.Referer(), r.URL.Query()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
// No need to verify signature, anonymous request access is already allowed.
|
||||
partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
@@ -614,7 +725,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5, sha256sum)
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5, sha256sum)
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
@@ -622,7 +733,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
@@ -633,7 +744,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||
}
|
||||
partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create object part.")
|
||||
@@ -641,8 +752,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
if partMD5 != "" {
|
||||
w.Header().Set("ETag", "\""+partMD5+"\"")
|
||||
if partInfo.ETag != "" {
|
||||
w.Header().Set("ETag", "\""+partInfo.ETag+"\"")
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
@@ -489,6 +489,20 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: true,
|
||||
},
|
||||
// Test case - 3
|
||||
// Empty data
|
||||
{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
data: []byte{},
|
||||
dataLen: 0,
|
||||
chunkSize: 64 * humanize.KiByte,
|
||||
expectedContent: []byte{},
|
||||
expectedRespStatus: http.StatusOK,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
shouldPass: true,
|
||||
},
|
||||
// Test case - 4
|
||||
// Invalid access key id.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -502,7 +516,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
secretKey: "",
|
||||
shouldPass: false,
|
||||
},
|
||||
// Test case - 4
|
||||
// Test case - 5
|
||||
// Wrong auth header returns as bad request.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -517,7 +531,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: false,
|
||||
removeAuthHeader: true,
|
||||
},
|
||||
// Test case - 5
|
||||
// Test case - 6
|
||||
// Large chunk size.. also passes.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -531,7 +545,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
secretKey: credentials.SecretKey,
|
||||
shouldPass: false,
|
||||
},
|
||||
// Test case - 6
|
||||
// Test case - 7
|
||||
// Chunk with malformed encoding.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -546,7 +560,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: false,
|
||||
fault: malformedEncoding,
|
||||
},
|
||||
// Test case - 7
|
||||
// Test case - 8
|
||||
// Chunk with shorter than advertised chunk data.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -561,7 +575,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: false,
|
||||
fault: unexpectedEOF,
|
||||
},
|
||||
// Test case - 8
|
||||
// Test case - 9
|
||||
// Chunk with first chunk data byte tampered.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -576,7 +590,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: false,
|
||||
fault: signatureMismatch,
|
||||
},
|
||||
// Test case - 9
|
||||
// Test case - 10
|
||||
// Different date (timestamps) used in seed signature calculation
|
||||
// and chunks signature calculation.
|
||||
{
|
||||
@@ -592,7 +606,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
shouldPass: false,
|
||||
fault: chunkDateMismatch,
|
||||
},
|
||||
// Test case - 10
|
||||
// Test case - 11
|
||||
// Set x-amz-decoded-content-length to a value too big to hold in int64.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
@@ -669,11 +683,11 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(bytesDataLen), buffer)
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
if !bytes.Equal(bytesData, buffer.Bytes()) {
|
||||
if !bytes.Equal(testCase.data, buffer.Bytes()) {
|
||||
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType)
|
||||
}
|
||||
buffer.Reset()
|
||||
@@ -918,6 +932,324 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Copy Object Part API handler tests for both XL multiple disks and single node setup.
|
||||
func TestAPICopyObjectPartHandler(t *testing.T) {
|
||||
defer DetectTestLeak(t)()
|
||||
ExecObjectLayerAPITest(t, testAPICopyObjectPartHandler, []string{"CopyObjectPart"})
|
||||
}
|
||||
|
||||
func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials credential, t *testing.T) {
|
||||
|
||||
objectName := "test-object"
|
||||
// register event notifier.
|
||||
err := initEventNotifier(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("Initializing event notifiers failed")
|
||||
}
|
||||
|
||||
// set of byte data for PutObject.
|
||||
// object has to be created before running tests for Copy Object.
|
||||
// this is required even to assert the copied object,
|
||||
bytesData := []struct {
|
||||
byteData []byte
|
||||
}{
|
||||
{generateBytesData(6 * humanize.KiByte)},
|
||||
}
|
||||
|
||||
// set of inputs for uploading the objects before tests for downloading is done.
|
||||
putObjectInputs := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
contentLength int64
|
||||
textData []byte
|
||||
metaData map[string]string
|
||||
}{
|
||||
// case - 1.
|
||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||
}
|
||||
sha256sum := ""
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Initiate Multipart upload for testing PutObjectPartHandler.
|
||||
testObject := "testobject"
|
||||
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
// test cases with inputs and expected result for Copy Object.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
copySourceHeader string // data for "X-Amz-Copy-Source" header. Contains the object to be copied in the URL.
|
||||
copySourceRange string // data for "X-Amz-Copy-Source-Range" header, contains the byte range offsets of data to be copied.
|
||||
uploadID string // uploadID of the transaction.
|
||||
invalidPartNumber bool // Sets an invalid multipart.
|
||||
maximumPartNumber bool // Sets a maximum parts.
|
||||
accessKey string
|
||||
secretKey string
|
||||
// expected output.
|
||||
expectedRespStatus int
|
||||
}{
|
||||
// Test case - 1, copy part 1 from from newObject1, ignore request headers.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
|
||||
// Test case - 2.
|
||||
// Test case with invalid source object.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/"),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// Test case - 3.
|
||||
// Test case with new object name is same as object to be copied.
|
||||
// Fail with file not found.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + testObject),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// Test case - 4.
|
||||
// Test case with valid byte range.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceRange: "bytes=500-4096",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
|
||||
// Test case - 5.
|
||||
// Test case with invalid byte range.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceRange: "bytes=6145-",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
|
||||
// Test case - 6.
|
||||
// Test case with object name missing from source.
|
||||
// fail with BadRequest.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("//123"),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// Test case - 7.
|
||||
// Test case with non-existent source file.
|
||||
// Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`.
|
||||
// Expecting the response status code to http.StatusNotFound (404).
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// Test case - 8.
|
||||
// Test case with non-existent source file.
|
||||
// Case for the purpose of failing `api.ObjectAPI.PutObjectPart`.
|
||||
// Expecting the response status code to http.StatusNotFound (404).
|
||||
{
|
||||
bucketName: "non-existent-destination-bucket",
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// Test case - 9.
|
||||
// Case with invalid AccessKey.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
accessKey: "Invalid-AccessID",
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
},
|
||||
|
||||
// Test case - 10.
|
||||
// Case with non-existent upload id.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: "-1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
// Test case - 11.
|
||||
// invalid part number.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
invalidPartNumber: true,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 12.
|
||||
// maximum part number.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
maximumPartNumber: true,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var req *http.Request
|
||||
var reqV2 *http.Request
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
if !testCase.invalidPartNumber || !testCase.maximumPartNumber {
|
||||
// construct HTTP request for copy object.
|
||||
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "1"), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
} else if testCase.invalidPartNumber {
|
||||
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "abc"), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
} else if testCase.maximumPartNumber {
|
||||
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "99999"), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err)
|
||||
}
|
||||
|
||||
// "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied.
|
||||
if testCase.copySourceHeader != "" {
|
||||
req.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader)
|
||||
}
|
||||
if testCase.copySourceRange != "" {
|
||||
req.Header.Set("X-Amz-Copy-Source-Range", testCase.copySourceRange)
|
||||
}
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler, `func (api objectAPIHandlers) CopyObjectHandler` handles the request.
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
// Assert the response code with the expected status.
|
||||
if rec.Code != testCase.expectedRespStatus {
|
||||
t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
if rec.Code == http.StatusOK {
|
||||
// See if the new part has been uploaded.
|
||||
// testing whether the copy was successful.
|
||||
var results ListPartsInfo
|
||||
results, err = obj.ListObjectParts(testCase.bucketName, testObject, testCase.uploadID, 0, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to look for copied object part: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
if len(results.Parts) != 1 {
|
||||
t.Fatalf("Test %d: %s: Expected only one entry returned %d entries", i+1, instanceType, len(results.Parts))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify response of the V2 signed HTTP request.
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
|
||||
reqV2, err = newTestRequest("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "1"), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err)
|
||||
}
|
||||
// "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied.
|
||||
if testCase.copySourceHeader != "" {
|
||||
reqV2.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader)
|
||||
}
|
||||
if testCase.copySourceRange != "" {
|
||||
reqV2.Header.Set("X-Amz-Copy-Source-Range", testCase.copySourceRange)
|
||||
}
|
||||
|
||||
err = signRequestV2(reqV2, testCase.accessKey, testCase.secretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to V2 Sign the HTTP request: %v.", err)
|
||||
}
|
||||
|
||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(recV2, reqV2)
|
||||
if recV2.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP request for testing when `ObjectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
nilBucket := "dummy-bucket"
|
||||
nilObject := "dummy-object"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("PUT", getCopyObjectPartURL("", nilBucket, nilObject, "0", "0"),
|
||||
0, bytes.NewReader([]byte("testNilObjLayer")), "", "")
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
// Below is how CopyObjectPartHandler is registered.
|
||||
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject))
|
||||
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Copy Object API handler tests for both XL multiple disks and single node setup.
|
||||
func TestAPICopyObjectHandler(t *testing.T) {
|
||||
defer DetectTestLeak(t)()
|
||||
|
||||
@@ -106,15 +106,18 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
|
||||
for i := 1; i <= 10; i++ {
|
||||
expectedMD5Sumhex := getMD5Hash(data)
|
||||
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedMD5Sumhex, "")
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedMD5Sumhex, "")
|
||||
if err != nil {
|
||||
c.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if calculatedMD5sum != expectedMD5Sumhex {
|
||||
if calcPartInfo.ETag != expectedMD5Sumhex {
|
||||
c.Errorf("MD5 Mismatch")
|
||||
}
|
||||
completedParts.Parts = append(completedParts.Parts, completePart{PartNumber: i, ETag: calculatedMD5sum})
|
||||
completedParts.Parts = append(completedParts.Parts, completePart{
|
||||
PartNumber: i,
|
||||
ETag: calcPartInfo.ETag,
|
||||
})
|
||||
}
|
||||
objInfo, err := obj.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
if err != nil {
|
||||
@@ -153,12 +156,12 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHan
|
||||
expectedMD5Sumhex := getMD5Hash([]byte(randomString))
|
||||
|
||||
metadata["md5"] = expectedMD5Sumhex
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex, "")
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex, "")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
if calculatedMD5sum != expectedMD5Sumhex {
|
||||
if calcPartInfo.ETag != expectedMD5Sumhex {
|
||||
c.Errorf("Md5 Mismatch")
|
||||
}
|
||||
parts[i] = expectedMD5Sumhex
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd
|
||||
// +build linux darwin freebsd netbsd openbsd
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
@@ -68,10 +68,6 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
||||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
// Skip special files.
|
||||
if hasPosixReservedPrefix(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch dirent.Type {
|
||||
case syscall.DT_DIR:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !linux,!darwin,!openbsd,!freebsd,!netbsd,!dragonfly
|
||||
// +build !linux,!darwin,!openbsd,!freebsd,!netbsd
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
@@ -52,10 +52,6 @@ func readDir(dirPath string) (entries []string, err error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
// Skip special files, if found.
|
||||
if hasPosixReservedPrefix(fi.Name()) {
|
||||
continue
|
||||
}
|
||||
// Stat symbolic link and follow to get the final value.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
var st os.FileInfo
|
||||
|
||||
@@ -69,26 +69,6 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) {
|
||||
return testResults
|
||||
}
|
||||
|
||||
// Test to read empty directory with only reserved names.
|
||||
func setupTestReadDirReserved(t *testing.T) (testResults []result) {
|
||||
dir := mustSetupDir(t)
|
||||
entries := []string{}
|
||||
// Create a file with reserved name.
|
||||
for _, reservedName := range posixReservedPrefix {
|
||||
if err := ioutil.WriteFile(filepath.Join(dir, reservedName), []byte{}, os.ModePerm); err != nil {
|
||||
// For cleanup, its required to add these entries into test results.
|
||||
testResults = append(testResults, result{dir, entries})
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
// entries = append(entries, reservedName) - reserved files are skipped.
|
||||
}
|
||||
sort.Strings(entries)
|
||||
|
||||
// Add entries slice for this test directory.
|
||||
testResults = append(testResults, result{dir, entries})
|
||||
return testResults
|
||||
}
|
||||
|
||||
// Test to read non-empty directory with only files.
|
||||
func setupTestReadDirFiles(t *testing.T) (testResults []result) {
|
||||
dir := mustSetupDir(t)
|
||||
@@ -198,8 +178,6 @@ func TestReadDir(t *testing.T) {
|
||||
|
||||
// Setup and capture test results for empty directory.
|
||||
testResults = append(testResults, setupTestReadDirEmpty(t)...)
|
||||
// Setup and capture test results for reserved files.
|
||||
testResults = append(testResults, setupTestReadDirReserved(t)...)
|
||||
// Setup and capture test results for directory with only files.
|
||||
testResults = append(testResults, setupTestReadDirFiles(t)...)
|
||||
// Setup and capture test results for directory with files and directories.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd solaris
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
||||
11
cmd/posix.go
11
cmd/posix.go
@@ -153,11 +153,20 @@ func getDiskInfo(diskPath string) (di disk.Info, err error) {
|
||||
return di, err
|
||||
}
|
||||
|
||||
// List of operating systems where we ignore disk space
|
||||
// verification.
|
||||
var ignoreDiskFreeOS = []string{
|
||||
globalWindowsOSName,
|
||||
globalNetBSDOSName,
|
||||
globalSolarisOSName,
|
||||
}
|
||||
|
||||
// checkDiskFree verifies if disk path has sufficient minimum free disk space and files.
|
||||
func (s *posix) checkDiskFree() (err error) {
|
||||
// We don't validate disk space or inode utilization on windows.
|
||||
// Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds.
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
// And StatFS is not supported by Go for solaris and netbsd.
|
||||
if contains(ignoreDiskFreeOS, runtime.GOOS) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -342,46 +342,66 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
}
|
||||
|
||||
testCases2 := []struct {
|
||||
objectName string
|
||||
data []byte
|
||||
expectedRespStatus int
|
||||
accessKey string
|
||||
secretKey string
|
||||
malformedBody bool
|
||||
objectName string
|
||||
data []byte
|
||||
expectedRespStatus int
|
||||
accessKey string
|
||||
secretKey string
|
||||
malformedBody bool
|
||||
ignoreContentLength bool
|
||||
}{
|
||||
// Success case.
|
||||
{
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 1025),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 1025),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
ignoreContentLength: false,
|
||||
},
|
||||
// Failed with Content-Length not specified.
|
||||
{
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 1025),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
ignoreContentLength: true,
|
||||
},
|
||||
// Failed with entity too small.
|
||||
{
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 1023),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 1023),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
ignoreContentLength: false,
|
||||
},
|
||||
// Failed with entity too large.
|
||||
{
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), (1*humanize.MiByte)+1),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), (1*humanize.MiByte)+1),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
malformedBody: false,
|
||||
ignoreContentLength: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases2 {
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
req, perr := newPostRequestV4WithContentLength("", bucketName, testCase.objectName, testCase.data, testCase.accessKey, testCase.secretKey)
|
||||
var req *http.Request
|
||||
var perr error
|
||||
if testCase.ignoreContentLength {
|
||||
req, perr = newPostRequestV4("", bucketName, testCase.objectName, testCase.data, testCase.accessKey, testCase.secretKey)
|
||||
} else {
|
||||
req, perr = newPostRequestV4WithContentLength("", bucketName, testCase.objectName, testCase.data, testCase.accessKey, testCase.secretKey)
|
||||
}
|
||||
if perr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
||||
}
|
||||
|
||||
@@ -198,10 +198,11 @@ func retryFormattingXLDisks(firstDisk bool, endpoints []*url.URL, storageDisks [
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
// Create a done channel to control 'ListObjects' go routine.
|
||||
doneCh := make(chan struct{}, 1)
|
||||
// Done channel is used to close any lingering retry routine, as soon
|
||||
// as this function returns.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
// Indicate to our retry routine to exit cleanly, upon this function return.
|
||||
defer close(doneCh)
|
||||
|
||||
// prepare getElapsedTime() to calculate elapsed time since we started trying formatting disks.
|
||||
@@ -212,7 +213,7 @@ func retryFormattingXLDisks(firstDisk bool, endpoints []*url.URL, storageDisks [
|
||||
}
|
||||
|
||||
// Wait on the jitter retry loop.
|
||||
retryTimerCh := newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh)
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case retryCount := <-retryTimerCh:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user