Compare commits

..

11 Commits

Author SHA1 Message Date
Harshavardhana
98a8a5cdec fix: initialize config once per startup (#9851) 2020-06-17 09:57:18 -07:00
Harshavardhana
8b641972a8 on darwin fallback to maximum possible rlimit (#9859)
fixes #9857
2020-06-17 09:57:11 -07:00
Klaus Post
a649589d77 Always add existing cache when skipping (#9862) 2020-06-17 09:37:54 -07:00
Harshavardhana
b85b18da09 remove unnecessary log for setMaxResources (#9856)
fixes #9855
2020-06-16 19:09:01 -07:00
Harshavardhana
2daa34653e Allow etcd, cache setup to exit when starting gateway mode (#9842)
- Initialize etcd once per call
- Fail etcd, cache setup pro-actively for gateway setups
- Support deleting/updating bucket notification,
  tagging, lifecycle, sse-encryption
2020-06-16 09:00:57 -07:00
Harshavardhana
49c8d36497 fix: export prometheus metrics for cache GC triggers (#9815)
Bonus change to use channel to serialize triggers,
instead of using atomic variables. More efficient
mechanism for synchronization.
2020-06-15 18:26:37 -07:00
Harshavardhana
a1e6a3ef64 Add logs when quorum is lost during readiness checks (#9839) 2020-06-15 13:03:16 -07:00
Minio Trusted
41a7938748 Update yaml files to latest version RELEASE.2020-06-14T18-32-17Z 2020-06-14 18:40:39 +00:00
Anis Elleuch
6105cc6ab4 fix: Avoid updating object tags on failed disks (#9819) 2020-06-14 10:50:40 -07:00
Harshavardhana
068ad33c9a preserve context per request for local locks (#9828)
In the Current bug we were re-using the context
from previously granted lockers, this would
lead to lock timeouts for existing valid
read or write locks, leading to premature
timeout of locks.

This bug affects only local lockers in FS
or standalone erasure coded mode. This issue
is rather historical as well and was present
in lsync for some time but we were lucky to
not see it.

Similar changes are done in dsync as well
to keep the code more familiar

Fixes #9827
2020-06-14 08:07:11 -07:00
ethan ho
2db5cbc7a5 Fix peer server update failure (#9824)
When updating all servers following the constructions of mc update,
only the endpoint server will be updated successfully.
All the other peer servers' updating failed due to the error below:
--------------------------------------------------------------------------
parsing time "2006-01-02T15:04:05Z07:00" as "<release version>": cannot parse "-01-02T15:04:05Z07:00" as "0-" 
--------------------------------------------------------------------------
2020-06-14 08:06:21 -07:00
938 changed files with 36509 additions and 140276 deletions

View File

@@ -1,9 +1,2 @@
.git
.github
docs
default.etcd
browser
*.gz
*.tar.gz
*.bzip2
*.zip

View File

@@ -1,12 +1,3 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: community, triage
assignees: ''
---
<!--- Provide a general summary of the issue in the Title above -->
## Expected Behavior
@@ -24,8 +15,6 @@ assignees: ''
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
1.
2.
3.
@@ -41,6 +30,8 @@ assignees: ''
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used (`minio --version`):
* Server setup and configuration:
* Version used (`minio version`):
* Environment name and version (e.g. nginx 1.9.1):
* Server type and version:
* Operating System and version (`uname -a`):
* Link to your project:

View File

@@ -24,8 +24,6 @@ assignees: ''
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
1.
2.
3.
@@ -41,6 +39,8 @@ assignees: ''
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used (`minio --version`):
* Server setup and configuration:
* Version used (`minio version`):
* Environment name and version (e.g. nginx 1.9.1):
* Server type and version:
* Operating System and version (`uname -a`):
* Link to your project:

View File

@@ -2,7 +2,7 @@ blank_issues_enabled: false
contact_links:
- name: MinIO Community Support
url: https://slack.min.io
about: Join here for Community Support
about: Please ask and answer questions here.
- name: MinIO SUBNET Support
url: https://min.io/pricing
about: Join here for Enterprise Support
about: Join this for Enterprise Support.

View File

@@ -10,10 +10,10 @@
## Types of changes
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Optimization (provides speedup with no functional changes)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
## Checklist:
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
- [ ] Documentation updated
- [ ] Unit tests added/updated
- [ ] Documentation needed
- [ ] Unit tests needed
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )

2
.github/lock.yml vendored
View File

@@ -11,7 +11,7 @@ skipCreatedBefore: false
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: true
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: >-

7
.github/stale.yml vendored
View File

@@ -1,11 +1,11 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 30
daysUntilStale: 90
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 15
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
@@ -14,7 +14,6 @@ onlyLabels: []
exemptLabels:
- "security"
- "pending discussion"
- "do not close"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
@@ -31,7 +30,7 @@ staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >-
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed after 15 days if no further activity
recent activity. It will be closed after 21 days if no further activity
occurs. Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >

51
.github/workflows/codeql.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
name: "Code scanning - action"
on:
push:
pull_request:
schedule:
- cron: '0 19 * * 0'
jobs:
CodeQL-Build:
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: go, javascript
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -4,6 +4,9 @@ on:
pull_request:
branches:
- master
push:
branches:
- master
jobs:
build:
@@ -11,8 +14,8 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.16.x]
os: [ubuntu-latest, windows-latest, macos-latest]
go-version: [1.13.x]
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
@@ -21,19 +24,12 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Build on ${{ matrix.os }}
if: matrix.os == 'macos-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
make
make test-race
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
MINIO_CI_CD: 1
run: |
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 50m ./...
@@ -42,14 +38,12 @@ jobs:
env:
CGO_ENABLED: 0
GO111MODULE: on
MINIO_CI_CD: 1
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
sudo apt-get install devscripts shellcheck
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
go list -m all | ./nancy sleuth
make
diff -au <(gofmt -s -d cmd) <(printf "")
diff -au <(gofmt -s -d pkg) <(printf "")
make test-race
make crosscompile
make verify

View File

@@ -17,19 +17,11 @@ linters:
- gosimple
- deadcode
- structcheck
- gomodguard
- gofmt
issues:
exclude-use-default: false
exclude:
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
run:
skip-dirs:
- pkg/rpc
- pkg/argon2
service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly

View File

@@ -1,167 +0,0 @@
project_name: minio
release:
name_template: "Version {{.MinIO.Version}}"
disable: true
github:
owner: minio
name: minio
env:
- CGO_ENABLED=0
- GO111MODULE=on
before:
hooks:
- make clean
- go generate ./...
- go mod tidy
- go mod download
builds:
-
goos:
- linux
- darwin
- windows
- freebsd
goarch:
- amd64
- arm64
- arm
- ppc64le
- s390x
goarm:
- 7
ignore:
- goos: darwin
goarch: arm64
- goos: darwin
goarch: arm
- goos: darwin
goarch: ppc64le
- goos: darwin
goarch: s390x
- goos: windows
goarch: arm64
- goos: windows
goarch: arm
- goos: windows
goarch: ppc64le
- goos: windows
goarch: s390x
- goos: freebsd
goarch: arm
- goos: freebsd
goarch: arm64
- goos: freebsd
goarch: ppc64le
- goos: freebsd
goarch: s390x
flags:
- -tags=kqueue
- -trimpath
ldflags:
- "-s -w -X github.com/minio/minio/cmd.Version={{.Version}} -X github.com/minio/minio/cmd.ReleaseTag={{.Tag}} -X github.com/minio/minio/cmd.CommitID={{.FullCommit}} -X github.com/minio/minio/cmd.ShortCommitID={{.ShortCommit}}"
archives:
-
format: binary
name_template: "{{ .Binary }}-release/{{ .Os }}-{{ .Arch }}/{{ .Binary }}.{{ .Version }}"
nfpms:
-
id: minio
package_name: minio
vendor: MinIO, Inc.
homepage: https://min.io/
maintainer: dev@min.io
description: MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
license: Apache 2.0
bindir: /usr/bin
formats:
- deb
- rpm
overrides:
deb:
file_name_template: "{{ .Binary }}-release/debs/{{ .ProjectName }}-{{ .Version }}_{{ .Arch }}"
replacements:
arm: armv7
files:
"NOTICE": "/usr/share/minio/NOTICE"
"CREDITS": "/usr/share/minio/CREDITS"
"LICENSE": "/usr/share/minio/LICENSE"
"README.md": "/usr/share/minio/README.md"
rpm:
file_name_template: "{{ .Binary }}-release/rpms/{{ .ProjectName }}-{{ .Version }}.{{ .Arch }}"
replacements:
amd64: x86_64
arm64: aarch64
arm: armv7
files:
"NOTICE": "/usr/share/minio/NOTICE"
"CREDITS": "/usr/share/minio/CREDITS"
"LICENSE": "/usr/share/minio/LICENSE"
"README.md": "/usr/share/minio/README.md"
checksum:
algorithm: sha256
signs:
-
signature: "${artifact}.minisig"
cmd: "sh"
args:
- '-c'
- 'minisign -s /media/${USER}/minio/minisign.key -qQSm ${artifact} < /media/${USER}/minio/minisign-passphrase'
artifacts: all
changelog:
sort: asc
filters:
exclude:
- '^Update yaml files'
dockers:
-
goos: linux
goarch: amd64
dockerfile: Dockerfile.release
image_templates:
- minio/minio:{{ .Tag }}
- minio/minio:latest
-
goos: linux
goarch: ppc64le
dockerfile: Dockerfile.ppc64le.release
image_templates:
- minio/minio:{{ .Tag }}-ppc64le
-
goos: linux
goarch: s390x
dockerfile: Dockerfile.s390x.release
image_templates:
- minio/minio:{{ .Tag }}-s390x
-
goos: linux
goarch: arm64
goarm: ''
dockerfile: Dockerfile.arm64.release
image_templates:
- minio/minio:{{ .Tag }}-arm64
-
goos: linux
goarch: arm
goarm: '7'
dockerfile: Dockerfile.arm.release
image_templates:
- minio/minio:{{ .Tag }}-arm

View File

5284
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
FROM golang:1.16-alpine as builder
FROM golang:1.13-alpine
LABEL maintainer="MinIO Inc <dev@min.io>"
@@ -9,29 +9,24 @@ ENV GO111MODULE on
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio && cd minio && \
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
FROM alpine:3.10
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
EXPOSE 9000
COPY --from=builder /go/bin/minio /usr/bin/minio
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY --from=0 /go/minio/CREDITS /third_party/
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
microdnf clean all && \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

41
Dockerfile.arm.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm32v7/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

41
Dockerfile.arm64.release Normal file
View File

@@ -0,0 +1,41 @@
FROM golang:1.13-alpine as builder
WORKDIR /home
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm64v8/alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

View File

@@ -1,42 +0,0 @@
FROM golang:1.16-alpine as builder
LABEL maintainer="MinIO Inc <dev@min.io>"
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio && cd minio && \
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ARG TARGETARCH
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
EXPOSE 9000
COPY --from=builder /go/bin/minio /usr/bin/minio
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
microdnf clean all
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio", "server", "/data"]

View File

@@ -1,25 +1,22 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ARG TARGETARCH
FROM alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY minio /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE=off \
MINIO_ACCESS_KEY_FILE=access_key \
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN microdnf update --nodocs
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
RUN microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000

View File

@@ -1,4 +1,4 @@
FROM ubuntu:20.04
FROM ubuntu
LABEL maintainer="MinIO Inc <dev@min.io>"
@@ -10,3 +10,4 @@ ENV PATH=$PATH:/root/go/bin
RUN go get github.com/go-bindata/go-bindata/go-bindata && \
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs

View File

@@ -1,43 +1,32 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
FROM golang:1.13-alpine
ARG TARGETARCH
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
ARG RELEASE
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
FROM alpine:3.10
LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY CREDITS /third_party/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \
chmod +x /usr/bin/verify-minio.sh && \
/usr/bin/verify-minio.sh
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000

View File

@@ -17,28 +17,27 @@ checks:
getdeps:
@mkdir -p ${GOPATH}/bin
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps lint check-gen
verifiers: getdeps fmt lint
check-gen:
@go generate ./... >/dev/null
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
fmt:
@echo "Running $@ check"
@GO111MODULE=on gofmt -d cmd/
@GO111MODULE=on gofmt -d pkg/
lint:
@echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
# Builds minio, runs the verifiers then runs the tests.
check: test
test: verifiers build
@echo "Running unit tests"
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
test-race: verifiers build
@echo "Running unit tests under -race"
@@ -47,7 +46,7 @@ test-race: verifiers build
# Verify minio binary
verify:
@echo "Verifying build with race"
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
# Verify healing of disks with minio binary
@@ -61,18 +60,7 @@ build: checks
@echo "Building minio binary to './minio'"
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
hotfix-vars:
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
hotfix: hotfix-vars install
docker-hotfix: hotfix checks
@echo "Building minio docker image '$(TAG)'"
@docker build -t $(TAG) . -f Dockerfile.dev
docker: build checks
@echo "Building minio docker image '$(TAG)'"
docker: build
@docker build -t $(TAG) . -f Dockerfile.dev
# Builds minio and installs it to $GOPATH/bin.

267
README.md
View File

@@ -5,175 +5,76 @@
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
# Docker Installation
Use the following commands to run a standalone MinIO server on a Docker container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Stable
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
```sh
## Docker Container
### Stable
```
docker pull minio/minio
docker run -p 9000:9000 minio/minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
## Edge
Run the following command to run the bleeding-edge image of MinIO on a Docker container using an ephemeral data volume:
### Edge
```
docker pull minio/minio:edge
docker run -p 9000:9000 minio/minio:edge server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
# macOS
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Homebrew (recommended)
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew (recommended)
Install minio packages using [Homebrew](https://brew.sh/)
```sh
brew install minio/stable/minio
minio server /data
```
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
brew install minio/stable/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
## Binary Download
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| Apple macOS | 64-bit Intel | https://dl.min.io/server/minio/release/darwin-amd64/minio |
```sh
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x minio
chmod 755 minio
./minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
# GNU/Linux
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
## GNU/Linux
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
| -------- | ------ |
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# Microsoft Windows
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
```sh
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
```
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
## Microsoft Windows
### Binary Download
| Platform | Architecture | URL |
| ---------- | -------- | ------ |
| Microsoft Windows | 64-bit | https://dl.min.io/server/minio/release/windows-amd64/minio.exe |
```sh
minio.exe server D:\
minio.exe server D:\Photos
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# FreeBSD
MinIO does not provide an official FreeBSD binary. However, FreeBSD maintains an [upstream release](https://www.freshports.org/www/minio) using [pkg](https://github.com/freebsd/pkg):
## FreeBSD
### Port
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
```sh
pkg install minio
@@ -182,36 +83,34 @@ sysrc minio_disks=/home/user/Photos
service minio start
```
# Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
## Install from Source
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
```sh
GO111MODULE=on go get github.com/minio/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
# Deployment Recommendations
## Allow port access for Firewalls
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
### iptables
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
access to port 9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
### ufw
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
@@ -246,58 +145,36 @@ Note that `permanent` makes sure the rules are persistent across firewall start,
firewall-cmd --reload
```
### iptables
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
access to port 9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## Pre-existing data
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
The above statement is also valid for all gateway backends.
# Test MinIO Connectivity
## Test using MinIO Browser
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser.png?raw=true)
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
# Upgrading MinIO
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
## Pre-existing data
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
The above statement is also valid for all gateway backends.
## Upgrading MinIO
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster and restart them, as shown in the following command from the MinIO client (mc):
```
mc admin update <minio alias, e.g., myminio>
```
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
## Important things to remember during MinIO upgrades
**Important things to remember during upgrades**:
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` until all clusters have been updated.
- If you are updating the server it is always recommended (unless explicitly mentioned in MinIO server release notes), to update `mc` once all the servers have been upgraded using `mc update`.
- `mc admin update` is disabled in docker/container environments, container environments provide their own mechanisms for updating running containers.
- If you are using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
- If you are using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
# Explore Further
## Explore Further
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
@@ -305,8 +182,8 @@ mc admin update <minio alias, e.g., myminio>
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
- [The MinIO documentation website](https://docs.min.io)
# Contribute to MinIO Project
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
# License
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fminio%2Fminio.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)

View File

@@ -7,33 +7,29 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
## Docker 容器
### 稳定版
```
docker run -p 9000:9000 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio server /data
docker pull minio/minio
docker run -p 9000:9000 minio/minio server /data
```
### 尝鲜版
```
docker run -p 9000:9000 \
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
minio/minio:edge server /data
docker pull minio/minio:edge
docker run -p 9000:9000 minio/minio:edge server /data
```
> 提示:除非你通过`-it`(TTY交互)参数启动容器否则Docker将不会显示默认的密钥。一般情况下并不推荐使用容器的默认密钥更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
## macOS
### Homebrew(推荐)
### Homebrew
使用 [Homebrew](http://brew.sh/)安装minio
```sh
brew install minio/stable/minio
minio server /data
```
#### Note
如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
> 提示:如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
```sh
```
brew uninstall minio
brew install minio/stable/minio
```
@@ -53,16 +49,6 @@ chmod 755 minio
| ---------- | -------- | ------ |
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
| 操作系统 | CPU架构 | 地址 |
| ---------- | -------- | ------ |
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
```sh
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
chmod +x minio
./minio server /data
```
@@ -78,7 +64,7 @@ minio.exe server D:\Photos
## FreeBSD
### Port
使用 [pkg](https://github.com/freebsd/pkg)进行安装MinIO官方并没有提供FreeBSD二进制文件 它由FreeBSD上游维护点击 [这里](https://www.freshports.org/www/minio)查看
使用 [pkg](https://github.com/freebsd/pkg)进行安装。
```sh
pkg install minio
@@ -89,68 +75,14 @@ service minio start
## 使用源码安装
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.16](https://golang.org/dl/#stable)
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install).
```sh
GO111MODULE=on go get github.com/minio/minio
```
## 为防火墙设置允许访问的端口
默认情况下MinIO 使用端口9000来侦听传入的连接。如果你的平台默认阻止了该端口则需要启用对该端口的访问。
### ufw
对于启用了ufw的主机基于Debian的发行版, 你可以通过`ufw`命令允许指定端口上的所有流量连接. 通过如下命令允许访问端口9000
```sh
ufw allow 9000
```
如下命令允许端口9000-9010上的所有传入流量。
```sh
ufw allow 9000:9010/tcp
```
### firewall-cmd
对于启用了firewall-cmd的主机CentOS, 你可以通过`firewall-cmd`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
```sh
firewall-cmd --get-active-zones
```
这个命令获取当前正在使用的区域。 现在,就可以为以上返回的区域应用端口规则了。 假如返回的区域是 `public`, 使用如下命令
```sh
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
这里的`permanent`参数表示持久化存储规则,可用于防火墙启动、重启和重新加载。 最后,需要防火墙重新加载,让我们刚刚的修改生效。
```sh
firewall-cmd --reload
```
### iptables
对于启用了iptables的主机RHEL, CentOS, etc, 你可以通过`iptables`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
如下命令允许端口9000-9010上的所有传入流量。
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
go get -u github.com/minio/minio
```
## 使用MinIO浏览器进行验证
MinIO Server带有一个嵌入的Web对象浏览器安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000)如果可以访问则表示minio已经安装成功。
安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000)如果可以访问则表示minio已经安装成功。
![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser.png?raw=true)
@@ -162,25 +94,6 @@ MinIO Server带有一个嵌入的Web对象浏览器安装后使用浏览器
上述描述对所有网关后端同样有效。
## 升级 MinIO
MinIO 服务端支持滚动升级, 也就是说你可以一次更新分布式集群中的一个MinIO实例。 这样可以在不停机的情况下进行升级。可以通过将二进制文件替换为最新版本并以滚动方式重新启动所有服务器来手动完成升级。但是, 我们建议所有用户从客户端使用 [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) 命令升级。 这将同时更新集群中的所有节点并重新启动它们, 如下命令所示:
```
mc admin update <minio alias, e.g., myminio>
```
> 注意: 有些发行版可能不允许滚动升级,这通常在发行说明中提到,所以建议在升级之前阅读发行说明。在这种情况下,建议使用`mc admin update`升级机制来一次升级所有服务器。
### MinIO升级时要记住的重要事项
- `mc admin update` 命令仅当运行MinIO的用户对二进制文件所在的父目录具有写权限时才工作, 比如当前二进制文件位于`/usr/local/bin/minio`, 你需要具备`/usr/local/bin`目录的写权限.
- `mc admin update` 命令同时更新并重新启动所有服务器,应用程序将在升级后重试并继续各自的操作。
- `mc admin update` 命令在 kubernetes/container 环境下是不能用的, 容器环境提供了它自己的更新机制来更新。
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
- 如果将`kes`用作MinIO的KMS只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
- 如果将Vault作为MinIO的KMS请确保已遵循如下Vault升级过程的概述https://www.vaultproject.io/docs/upgrading/index.html
- 如果将MinIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
## 了解更多
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
- [`mc`快速入门](https://docs.min.io/docs/minio-client-quickstart-guide)
@@ -190,7 +103,4 @@ mc admin update <minio alias, e.g., myminio>
- [MinIO文档](https://docs.min.io)
## 如何参与到MinIO项目
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
## 授权许可
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。

View File

@@ -1,39 +0,0 @@
## Vulnerability Management Policy
This document formally describes the process of addressing and managing a
reported vulnerability that has been found in the MinIO server code base,
any directly connected ecosystem component or a direct / indirect dependency
of the code base.
### Scope
The vulnerability management policy described in this document covers the
process of investigating, assessing and resolving a vulnerability report
opened by a MinIO employee or an external third party.
Therefore, it lists pre-conditions and actions that should be performed to
resolve and fix a reported vulnerability.
### Vulnerability Management Process
The vulnerability management process requires that the vulnerability report
contains the following information:
- The project / component that contains the reported vulnerability.
- A description of the vulnerability. In particular, the type of the
reported vulnerability and how it might be exploited. Alternatively,
a well-established vulnerability identifier, e.g. CVE number, can be
used instead.
Based on the description mentioned above, a MinIO engineer or security team
member investigates:
- Whether the reported vulnerability exists.
- The conditions that are required such that the vulnerability can be exploited.
- The steps required to fix the vulnerability.
In general, if the vulnerability exists in one of the MinIO code bases
itself - not in a code dependency - then MinIO will, if possible, fix
the vulnerability or implement reasonable countermeasures such that the
vulnerability cannot be exploited anymore.

1
browser/.gitignore vendored
View File

@@ -17,3 +17,4 @@ release
*.syso
coverage.txt
node_modules
production

View File

@@ -17,13 +17,24 @@ nvm install stable
npm install
```
### Install `go-bindata` and `go-bindata-assetfs`
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
```sh
go get github.com/go-bindata/go-bindata/go-bindata
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
```
## Generating Assets
### Generate ui-assets.go
```sh
npm run release
```
This generates `production` in the current directory.
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
## Run MinIO Browser with live reload

View File

@@ -57,6 +57,22 @@ export class BrowserDropdown extends React.Component {
const { fetchServerInfo } = this.props
fetchServerInfo()
}
fullScreen(e) {
e.preventDefault()
let el = document.documentElement
if (el.requestFullscreen) {
el.requestFullscreen()
}
if (el.mozRequestFullScreen) {
el.mozRequestFullScreen()
}
if (el.webkitRequestFullscreen) {
el.webkitRequestFullscreen()
}
if (el.msRequestFullscreen) {
el.msRequestFullscreen()
}
}
logout(e) {
e.preventDefault()
web.Logout()
@@ -71,30 +87,24 @@ export class BrowserDropdown extends React.Component {
<i className="fas fa-bars" />
</Dropdown.Toggle>
<Dropdown.Menu className="dropdown-menu-right">
<li>
<a href="" onClick={this.showChangePassword.bind(this)}>
Change Password <i className="fas fa-cog" />
</a>
{this.state.showChangePasswordModal && (
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={this.hideChangePassword.bind(this)}
/>
)}
</li>
<li>
<a target="_blank" href="https://docs.min.io/?ref=ob">
Documentation <i className="fas fa-book" />
</a>
</li>
<li>
<a target="_blank" href="https://github.com/minio/minio">
GitHub <i className="fab fa-github" />
</a>
</li>
<li>
<a target="_blank" href="https://min.io/pricing?ref=ob">
Get Support <i className="fas fa-question-circle" />
<a href="" onClick={this.fullScreen}>
Fullscreen <i className="fas fa-expand" />
</a>
</li>
<li>
<a target="_blank" href="https://docs.min.io/">
Documentation <i className="fas fa-book" />
</a>
</li>
<li>
<a target="_blank" href="https://slack.min.io">
Ask for help <i className="fas fa-question-circle" />
</a>
</li>
<li>
@@ -108,9 +118,20 @@ export class BrowserDropdown extends React.Component {
/>
)}
</li>
<li>
<a href="" onClick={this.showChangePassword.bind(this)}>
Change Password <i className="fas fa-cog" />
</a>
{this.state.showChangePasswordModal && (
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={this.hideChangePassword.bind(this)}
/>
)}
</li>
<li>
<a href="" id="logout" onClick={this.logout}>
Logout <i className="fas fa-sign-out-alt" />
Sign Out <i className="fas fa-sign-out-alt" />
</a>
</li>
</Dropdown.Menu>

View File

@@ -15,7 +15,6 @@
*/
import React from "react"
import ObjectsSearch from "../objects/ObjectsSearch"
import Path from "../objects/Path"
import StorageInfo from "./StorageInfo"
import BrowserDropdown from "./BrowserDropdown"
@@ -28,7 +27,6 @@ export const Header = () => {
<header className="fe-header">
<Path />
{loggedIn && <StorageInfo />}
{loggedIn && <ObjectsSearch />}
<ul className="feh-actions">
{loggedIn ? (
<BrowserDropdown />

View File

@@ -24,6 +24,9 @@ jest.mock("jwt-decode")
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
jest.mock("../../web", () => ({
GenerateAuth: jest.fn(() => {
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
}),
SetAuth: jest.fn(
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
if (

View File

@@ -22,8 +22,7 @@ const bucketsFilterSelector = state => state.buckets.filter
export const getFilteredBuckets = createSelector(
bucketsSelector,
bucketsFilterSelector,
(buckets, filter) => buckets.filter(
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
)
export const getCurrentBucket = state => state.buckets.currentBucket

View File

@@ -54,11 +54,8 @@ export const ObjectItem = ({
href={getDataType(name, contentType) === "folder" ? name : "#"}
onClick={e => {
e.preventDefault()
// onclick function is passed only when we have a prefix
if (onClick) {
onClick()
} else {
checked ? uncheckObject(name) : checkObject(name)
}
}}
>

View File

@@ -18,7 +18,6 @@ import React from "react"
import { connect } from "react-redux"
import InfiniteScroll from "react-infinite-scroller"
import ObjectsList from "./ObjectsList"
import { getFilteredObjects } from "./selectors"
export class ObjectsListContainer extends React.Component {
constructor(props) {
@@ -40,29 +39,22 @@ export class ObjectsListContainer extends React.Component {
})
}
}
componentDidUpdate(prevProps) {
if (this.props.filter !== prevProps.filter) {
this.setState({
page: 1
})
}
}
loadNextPage() {
this.setState(state => {
return { page: state.page + 1 }
})
}
render() {
const { filteredObjects, listLoading } = this.props
const { objects, listLoading } = this.props
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
const visibleObjects = objects.slice(0, this.state.page * 100)
return (
<div style={{ position: "relative" }}>
<InfiniteScroll
pageStart={0}
loadMore={this.loadNextPage}
hasMore={filteredObjects.length > visibleObjects.length}
hasMore={objects.length > visibleObjects.length}
useWindow={true}
initialLoad={false}
>
@@ -78,8 +70,7 @@ const mapStateToProps = state => {
return {
currentBucket: state.buckets.currentBucket,
currentPrefix: state.objects.currentPrefix,
filteredObjects: getFilteredObjects(state),
filter: state.objects.filter,
objects: state.objects.list,
sortBy: state.objects.sortBy,
sortOrder: state.objects.sortOrder,
listLoading: state.objects.listLoading

View File

@@ -1,43 +0,0 @@
/*
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import * as actionsObjects from "./actions"
export const ObjectsSearch = ({ onChange }) => (
<div
className="input-group ig-left ig-search-dark"
style={{ display: "block" }}
>
<input
className="ig-text"
type="input"
placeholder="Search Objects..."
onChange={e => onChange(e.target.value)}
/>
<i className="ig-helpers" />
</div>
)
const mapDispatchToProps = dispatch => {
return {
onChange: filter =>
dispatch(actionsObjects.setFilter(filter))
}
}
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)

View File

@@ -46,11 +46,11 @@ class PreviewObjectModal extends React.Component {
<ModalBody>
<div className="input-group">
{this.state.url && (
<object data={this.state.url} style={{ display: "block", width: "100%" }}>
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
Do not have read permissions to preview "{this.props.object.name}"
</h3>
</object>
<img
alt="Image broken"
src={this.state.url}
style={{ display: "block", width: "100%" }}
/>
)}
</div>
</ModalBody>

View File

@@ -26,7 +26,6 @@ import {
SHARE_OBJECT_EXPIRY_HOURS,
SHARE_OBJECT_EXPIRY_MINUTES
} from "../constants"
import QRCode from "react-qr-code";
export class ShareObjectModal extends React.Component {
constructor(props) {
@@ -90,7 +89,6 @@ export class ShareObjectModal extends React.Component {
<ModalHeader>Share Object</ModalHeader>
<ModalBody>
<div className="input-group copy-text">
<QRCode value={url} size={128}/>
<label>Shareable Link</label>
<input
type="text"

View File

@@ -30,10 +30,7 @@ describe("ObjectItem", () => {
it("shouldn't call onClick when the object isclicked", () => {
const onClick = jest.fn()
const checkObject = jest.fn()
const wrapper = shallow(
<ObjectItem name={"test"} checkObject={checkObject} />
)
const wrapper = shallow(<ObjectItem name={"test"} />)
wrapper.find("a").simulate("click", { preventDefault: jest.fn() })
expect(onClick).not.toHaveBeenCalled()
})
@@ -60,15 +57,9 @@ describe("ObjectItem", () => {
})
it("should call uncheckObject when the object/prefix is unchecked", () => {
const checkObject = jest.fn()
const uncheckObject = jest.fn()
const wrapper = shallow(
<ObjectItem
name={"test"}
checked={true}
checkObject={checkObject}
uncheckObject={uncheckObject}
/>
<ObjectItem name={"test"} checked={true} uncheckObject={uncheckObject} />
)
wrapper.find("input[type='checkbox']").simulate("change")
expect(uncheckObject).toHaveBeenCalledWith("test")

View File

@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
describe("ObjectsList", () => {
it("should render without crashing", () => {
shallow(<ObjectsListContainer filteredObjects={[]} />)
shallow(<ObjectsListContainer objects={[]} />)
})
it("should render ObjectsList with objects", () => {
const wrapper = shallow(
<ObjectsListContainer
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
/>
)
expect(wrapper.find("ObjectsList").length).toBe(1)
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
const wrapper = shallow(
<ObjectsListContainer
currentBucket="test1"
filteredObjects={[]}
objects={[]}
listLoading={true}
/>
)

View File

@@ -1,32 +0,0 @@
/*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow } from "enzyme"
import { ObjectsSearch } from "../ObjectsSearch"
describe("ObjectsSearch", () => {
it("should render without crashing", () => {
shallow(<ObjectsSearch />)
})
it("should call onChange with search text", () => {
const onChange = jest.fn()
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
wrapper.find("input").simulate("change", { target: { value: "test" } })
expect(onChange).toHaveBeenCalledWith("test")
})
})

View File

@@ -23,7 +23,6 @@ describe("objects reducer", () => {
const initialState = reducer(undefined, {})
expect(initialState).toEqual({
list: [],
filter: "",
listLoading: false,
sortBy: "",
sortOrder: SORT_ORDER_ASC,

View File

@@ -36,7 +36,6 @@ import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
export const SET_LIST = "objects/SET_LIST"
export const RESET_LIST = "objects/RESET_LIST"
export const SET_FILTER = "objects/SET_FILTER"
export const APPEND_LIST = "objects/APPEND_LIST"
export const REMOVE = "objects/REMOVE"
export const SET_SORT_BY = "objects/SET_SORT_BY"
@@ -58,13 +57,6 @@ export const resetList = () => ({
type: RESET_LIST,
})
export const setFilter = filter => {
return {
type: SET_FILTER,
filter
}
}
export const setListLoading = (listLoading) => ({
type: SET_LIST_LOADING,
listLoading,

View File

@@ -28,7 +28,6 @@ const removeObject = (list, objectToRemove, lookup) => {
export default (
state = {
list: [],
filter: "",
listLoading: false,
sortBy: "",
sortOrder: SORT_ORDER_ASC,
@@ -54,11 +53,6 @@ export default (
...state,
list: []
}
case actionsObjects.SET_FILTER:
return {
...state,
filter: action.filter
}
case actionsObjects.SET_LIST_LOADING:
return {
...state,

View File

@@ -21,13 +21,3 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
export const getCheckedList = state => state.objects.checkedList
export const getPrefixWritable = state => state.objects.prefixWritable
const objectsSelector = state => state.objects.list
const objectsFilterSelector = state => state.objects.filter
export const getFilteredObjects = createSelector(
objectsSelector,
objectsFilterSelector,
(objects, filter) => objects.filter(
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
)

View File

@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
// Overwrite the default styling from react-dropzone; otherwise it
// won't handle child elements correctly.
const style = {
flex: "1",
height: "100%",
borderWidth: "0",
borderStyle: "dashed",
borderColor: "#fff"

View File

@@ -138,10 +138,9 @@ describe("Uploads actions", () => {
objects: { currentPrefix: "pre1/" }
})
store.dispatch(uploadsActions.uploadFile(file))
const objectPath = encodeURIComponent("pre1/file1")
expect(open).toHaveBeenCalledWith(
"PUT",
"https://localhost:8080/upload/test1/" + objectPath,
"https://localhost:8080/upload/test1/pre1/file1",
true
)
expect(send).toHaveBeenCalledWith(file)

View File

@@ -94,7 +94,7 @@ export const uploadFile = file => {
_filePath = _filePath.substring(1)
}
const filePath = _filePath
const objectName = encodeURIComponent(`${currentPrefix}${filePath}`)
const objectName = `${currentPrefix}${filePath}`
const uploadUrl = `${
window.location.origin
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`

View File

@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
class Web {
constructor(endpoint) {
const namespace = 'web'
const namespace = 'Web'
this.JSONrpc = new JSONrpc({
endpoint,
namespace

View File

@@ -20,8 +20,7 @@
@media(max-width: @screen-sm-max) {
padding: 75px 0 80px;
}
display: flex;
flex-direction: column;
min-height:100vh;
overflow: auto;
}

View File

@@ -169,24 +169,6 @@ select.form-control {
}
}
.ig-search-dark {
&:before {
font-family: @font-family-icon;
font-weight: 900;
content: '\f002';
font-size: 15px;
position: absolute;
left: 2px;
top: 8px;
color: rgba(0, 0, 0, 0.5);
}
.ig-text {
padding-left: 25px;
.placeholder(rgba(0, 0, 0, 0.5))
}
}
.ig-search {
&:before {
font-family: @font-family-icon;
@@ -288,4 +270,4 @@ select.form-control {
.set-expire-decrease {
bottom: -27px;
.rotate(-180deg);
}
}

View File

@@ -105,7 +105,7 @@ div.fesl-row {
.fesl-item-name {
a {
cursor: pointer;
cursor: default;
}
}
@@ -114,6 +114,12 @@ div.fesl-row {
----------------------------*/
&[data-type=folder] {
.list-type(#a1d6dd, '\f07b');
.fesl-item-name {
a {
cursor: pointer;
}
}
}
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
@@ -349,7 +355,6 @@ div.fesl-row {
margin: 0;
height: 100%;
text-align: right;
white-space: nowrap;
}
.dropdown {
@@ -496,4 +501,4 @@ div.fesl-row {
.opacity(1);
right: 0;
}
}
}

View File

@@ -75,11 +75,6 @@
border-color: darken(@input-border, 5%);
}
}
svg {
display: block;
margin: 0 auto 5px;
}
}
/*--------------------------
@@ -155,4 +150,4 @@
100% {
transform: rotate(360deg);
}
}
}

View File

@@ -1,11 +0,0 @@
package browser
import "embed"
//go:embed production/*
var fs embed.FS
// GetStaticAssets returns assets
func GetStaticAssets() embed.FS {
return fs
}

View File

@@ -14,11 +14,19 @@
* limitations under the License.
*/
var moment = require('moment')
var async = require('async')
var exec = require('child_process').exec
var fs = require('fs')
var isProduction = process.env.NODE_ENV == 'production' ? true : false
var assetsFileName = ''
var commitId = ''
var date = moment.utc()
var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z'
var buildType = 'DEVELOPMENT'
if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD
rmDir = function(dirPath) {
try { var files = fs.readdirSync(dirPath); }
@@ -45,6 +53,74 @@ async.waterfall([
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
if (isProduction) {
fs.renameSync('production/index_bundle.js',
'production/index_bundle-' + releaseTag + '.js')
} else {
fs.renameSync('dev/index_bundle.js',
'dev/index_bundle-' + releaseTag + '.js')
}
var cmd = 'git log --format="%H" -n1'
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
if (!stdout) throw new Error('commitId is empty')
commitId = stdout.replace('\n', '')
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
assetsFileName = 'ui-assets.go';
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
if (!isProduction) {
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
}
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
var cmd = 'gofmt -s -w -l bindata_assetfs.go'
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
fs.renameSync('bindata_assetfs.go', assetsFileName)
fs.appendFileSync(assetsFileName, '\n')
fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' +
releaseTag + '"\n')
fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n')
fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"')
fs.appendFileSync(assetsFileName, '\n')
var contents;
if (isProduction) {
contents = fs.readFileSync(assetsFileName, 'utf8')
.replace(/_productionIndexHtml/g, '_productionIndexHTML')
.replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes')
.replace(/productionIndexHtml/g, 'productionIndexHTML')
.replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs')
.replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes')
.replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs')
.replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs')
.replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes')
.replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs');
} else {
contents = fs.readFileSync(assetsFileName, 'utf8')
.replace(/_devIndexHtml/g, '_devIndexHTML')
.replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes')
.replace(/devIndexHtml/g, 'devIndexHTML')
.replace(/_devIndex_bundleJs/g, '_devIndexBundleJs')
.replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes')
.replace(/devIndex_bundleJs/g, 'devIndexBundleJs')
.replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs')
.replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes')
.replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs');
}
contents = contents.replace(/MINIO_UI_VERSION/g, version)
contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js')
fs.writeFileSync(assetsFileName, contents, 'utf8')
console.log('UI assets file :', assetsFileName)
cb()
}
], function(err) {
if (err) return console.log(err)
})

32456
browser/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@
"test": "jest",
"dev": "NODE_ENV=dev webpack-dev-server --devtool cheap-module-eval-source-map --progress --colors --hot --content-base dev",
"build": "NODE_ENV=dev node build.js",
"release": "NODE_ENV=production node build.js",
"release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js",
"format": "esformatter -i 'app/**/*.js'"
},
"jest": {
@@ -84,7 +84,6 @@
"react-dropzone": "^11.0.1",
"react-infinite-scroller": "^1.2.4",
"react-onclickout": "^2.0.8",
"react-qr-code": "^1.1.1",
"react-redux": "^5.1.2",
"react-router-dom": "^5.2.0",
"redux": "^4.0.5",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.7 KiB

View File

@@ -1,59 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>MinIO Browser</title>
<link rel="icon" type="image/png" sizes="32x32" href="/minio/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="/minio/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="/minio/favicon-16x16.png">
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
</head>
<body>
<div class="page-load">
<div class="pl-inner">
<img src="/minio/logo.svg" alt="">
</div>
</div>
<div id="root"></div>
<!--[if lt IE 11]>
<div class="ie-warning">
<div class="iw-inner">
<i class="iwi-icon fas fa-exclamation-triangle"></i>
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
<ul>
<li>
<a href="http://www.google.com/chrome/">
<img src="chrome.png" alt="">
<div>Chrome</div>
</a>
</li>
<li>
<a href="https://www.mozilla.org/en-US/firefox/new/">
<img src="firefox.png" alt="">
<div>Firefox</div>
</a>
</li>
<li>
<a href="https://www.apple.com/safari/">
<img src="safari.png" alt="">
<div>Safari</div>
</a>
</li>
</ul>
<div class="iwi-skip">Skip & Continue</div>
</div>
</div>
<![endif]-->
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
<script src="/minio/index_bundle.js"></script>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@@ -1,98 +0,0 @@
.page-load {
position: fixed;
width: 100%;
height: 100%;
top: 0;
left: 0;
background: #002a37;
z-index: 100;
transition: opacity 200ms;
-webkit-transition: opacity 200ms;
}
.pl-0{
opacity: 0;
}
.pl-1 {
display: none;
}
.pl-inner {
position: absolute;
width: 100px;
height: 100px;
left: 50%;
margin-left: -50px;
top: 50%;
margin-top: -50px;
text-align: center;
-webkit-animation: fade-in 500ms;
animation: fade-in 500ms;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
animation-delay: 350ms;
-webkit-animation-delay: 350ms;
-webkit-backface-visibility: visible;
backface-visibility: visible;
}
.pl-inner:before {
content: '';
position: absolute;
width: 100%;
height: 100%;
left: 0;
top: 0;
display: block;
-webkit-animation: spin 1000ms infinite linear;
animation: spin 1000ms infinite linear;
border: 1px solid rgba(255, 255, 255, 0.2);;
border-left-color: #fff;
border-radius: 50%;
}
.pl-inner > img {
width: 30px;
margin-top: 21px;
}
@-webkit-keyframes fade-in {
0% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@keyframes fade-in {
0% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@-webkit-keyframes spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
@keyframes spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}

View File

@@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="93px" height="187px" viewBox="0 0 93 187" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
<title>logo</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="logo" transform="translate(0.187500, -0.683594)" fill="#FFFFFF" fill-rule="nonzero">
<path d="M91.49,46.551 C86.7827023,38.7699609 82.062696,30.9966172 77.33,23.231 C74.87,19.231 72.33,15.231 69.88,11.231 C69.57,10.731 69.18,10.291 68.88,9.831 C64.35,2.931 55.44,-1.679 46.73,2.701 C42.9729806,4.51194908 40.0995718,7.75449451 38.7536428,11.7020516 C37.4077139,15.6496086 37.701799,19.9721186 39.57,23.701 C41.08,26.641 43.57,29.121 45.91,31.581 C53.03,39.141 60.38,46.491 67.45,54.111 C72.4175495,59.4492221 74.4526451,66.8835066 72.8965704,74.0075359 C71.3404956,81.1315653 66.390952,87.0402215 59.65,89.821 C59.4938176,89.83842 59.3361824,89.83842 59.18,89.821 L59.18,54.591 C46.6388051,61.0478363 35.3944735,69.759905 26.01,80.291 C11.32,96.671 2.64,117.141 0.01,132.071 L23.96,119.821 C31.96,115.771 39.86,111.821 48.14,107.581 L48.14,175.921 L59.14,187.131 L59.14,101.831 C59.14,101.831 59.39,101.711 60.22,101.261 C63.5480598,99.6738911 66.7772674,97.8873078 69.89,95.911 C77.7130888,90.4306687 82.7479457,81.8029342 83.6709542,72.295947 C84.5939627,62.7889599 81.3127806,53.3538429 74.69,46.471 C66.49,37.891 58.24,29.351 50.05,20.761 C47.67,18.261 47.72,15.101 50.05,12.881 C52.38,10.661 55.56,10.881 57.96,13.331 L61.38,16.781 C64.1,19.681 66.79,22.611 69.53,25.481 C76.4547149,32.7389629 83.3947303,39.9823123 90.35,47.211 C90.7,47.571 91.12,47.871 91.5,48.211 L91.93,47.951 C91.8351945,47.4695902 91.6876376,47.0000911 91.49,46.551 Z M48.11,94.931 C47.9883217,95.5022568 47.6230065,95.9917791 47.11,96.271 C42.72,98.601 38.29,100.871 33.87,103.141 L17.76,111.401 C24.771203,96.7435071 35.1132853,83.9289138 47.96,73.981 C48.08,74.221 48.16,74.301 48.16,74.381 C48.15,81.231 48.17,88.081 48.11,94.931 Z" id="Shape"></path>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

638
browser/ui-assets.go Normal file

File diff suppressed because one or more lines are too long

View File

@@ -21,7 +21,7 @@ _init() {
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
GO_VERSION="1.16"
GO_VERSION="1.13"
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)

View File

@@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
}
function _build() {

69
buildscripts/gateway-tests.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/bin/bash
#
# MinIO Cloud Storage, (C) 2019 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -E
set -o pipefail
function start_minio_server()
{
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
server_pid=$!
sleep 10
echo "$server_pid"
}
function start_minio_gateway_s3()
{
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
--address 127.0.0.1:24240 > gateway.log 2>&1 &
gw_pid=$!
sleep 10
echo "$gw_pid"
}
function main()
{
sr_pid="$(start_minio_server)"
gw_pid="$(start_minio_gateway_s3)"
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
healthcheck mc minio-dotnet minio-js \
minio-py s3cmd s3select security
rv=$?
kill "$sr_pid"
kill "$gw_pid"
sleep 3
if [ "$rv" -ne 0 ]; then
echo "=========== Gateway ==========="
cat "gateway.log"
echo "=========== Server ==========="
cat "server.log"
fi
rm -f gateway.log server.log
}
main "$@"

View File

@@ -44,21 +44,10 @@ func releaseTag(version string) string {
relPrefix = prefix
}
relSuffix := ""
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
relSuffix = hotfix
}
relTag := strings.Replace(version, " ", "-", -1)
relTag = strings.Replace(relTag, ":", "-", -1)
relTag = strings.Replace(relTag, ",", "", -1)
relTag = relPrefix + "." + relTag
if relSuffix != "" {
relTag += "." + relSuffix
}
return relTag
return relPrefix + "." + relTag
}
// commitID returns the abbreviated commit-id hash of the last commit.
@@ -79,12 +68,5 @@ func commitID() string {
}
func main() {
var version string
if len(os.Args) > 1 {
version = os.Args[1]
} else {
version = time.Now().UTC().Format(time.RFC3339)
}
fmt.Println(genLDFlags(version))
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
}

View File

@@ -3,5 +3,5 @@
set -e
for d in $(go list ./... | grep -v browser); do
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
done

View File

@@ -33,7 +33,6 @@ export ACCESS_KEY="minio"
export SECRET_KEY="minio123"
export ENABLE_HTTPS=0
export GO111MODULE=on
export GOGC=25
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
@@ -46,64 +45,88 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
function start_minio_fs()
{
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
minio_pid=$!
sleep 10
echo "$minio_pid"
}
function start_minio_erasure()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
minio_pid=$!
sleep 15
echo "$minio_pid"
}
function start_minio_erasure_sets()
{
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
minio_pid=$!
sleep 15
echo "$minio_pid"
}
function start_minio_pool_erasure_sets()
function start_minio_zone_erasure_sets()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_pool_erasure_sets_ipv6()
function start_minio_zone_erasure_sets_ipv6()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_dist_erasure()
{
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
for i in $(seq 0 3); do
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
minio_pids[1]=$!
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
minio_pids[2]=$!
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
minio_pids[3]=$!
sleep 40
echo "${minio_pids[@]}"
}
function run_test_fs()
{
start_minio_fs
minio_pid="$(start_minio_fs)"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
kill "$minio_pid"
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -114,14 +137,13 @@ function run_test_fs()
return "$rv"
}
function run_test_erasure_sets()
{
start_minio_erasure_sets
function run_test_erasure_sets() {
minio_pid="$(start_minio_erasure_sets)"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
kill "$minio_pid"
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -132,51 +154,55 @@ function run_test_erasure_sets()
return "$rv"
}
function run_test_pool_erasure_sets()
function run_test_zone_erasure_sets()
{
start_minio_pool_erasure_sets
minio_pids=( $(start_minio_zone_erasure_sets) )
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-900$i.log"
cat "$WORK_DIR/zone-minio-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-900$i.log"
rm -f "$WORK_DIR/zone-minio-900$i.log"
done
return "$rv"
}
function run_test_pool_erasure_sets_ipv6()
function run_test_zone_erasure_sets_ipv6()
{
start_minio_pool_erasure_sets_ipv6
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
done
return "$rv"
@@ -184,12 +210,12 @@ function run_test_pool_erasure_sets_ipv6()
function run_test_erasure()
{
start_minio_erasure
minio_pid="$(start_minio_erasure)"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
kill "$minio_pid"
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -202,12 +228,14 @@ function run_test_erasure()
function run_test_dist_erasure()
{
start_minio_dist_erasure
minio_pids=( $(start_minio_dist_erasure) )
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
sleep 3
if [ "$rv" -ne 0 ]; then
@@ -223,7 +251,7 @@ function run_test_dist_erasure()
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
return "$rv"
return "$rv"
}
function purge()
@@ -296,14 +324,14 @@ function main()
fi
echo "Testing in Distributed Eraure expanded setup"
if ! run_test_pool_erasure_sets; then
if ! run_test_zone_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure expanded setup with ipv6"
if ! run_test_pool_erasure_sets_ipv6; then
if ! run_test_zone_erasure_sets_ipv6; then
echo "FAILED"
purge "$WORK_DIR"
exit 1

View File

@@ -28,55 +28,44 @@ WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
export GOGC=25
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
declare -a minio_pids
declare -a ARGS
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
start_port=$(shuf -i 10000-65000 -n 1)
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
done
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
disown $!
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
minio_pids[0]=$!
disown "${minio_pids[0]}"
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
disown $!
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
minio_pids[1]=$!
disown "${minio_pids[1]}"
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
disown $!
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
minio_pids[2]=$!
disown "${minio_pids[2]}"
sleep "$1"
if [ "$(pgrep -c minio)" -ne 3 ]; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1;
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
for pid in "${minio_pids[@]}"; do
if ! kill "$pid"; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
fi
# forcibly killing, to proceed further properly.
kill -9 "$pid"
sleep 1 # wait 1sec per pid
done
}

View File

@@ -20,6 +20,7 @@ import (
"encoding/xml"
"io"
"net/http"
"net/url"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
@@ -60,7 +61,7 @@ type accessControlPolicy struct {
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -124,7 +125,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
@@ -175,11 +176,11 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
@@ -239,11 +240,11 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return

View File

@@ -1,294 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
const (
bucketQuotaConfigFile = "quota.json"
bucketTargetsFile = "bucket-targets.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, err = parseBucketQuota(bucket, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}
// SetRemoteTargetHandler - sets a remote target for bucket
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
update := r.URL.Query().Get("update") == "true"
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var target madmin.BucketTarget
if err = json.Unmarshal(reqBytes, &target); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
if sameTarget && bucket == target.TargetBucket {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
return
}
target.SourceBucket = bucket
if !update {
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
}
if target.Arn == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
switch err.(type) {
case BucketRemoteConnectionErr:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
data, err := json.Marshal(target.Arn)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
// for a particular ARN type
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketTargets")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
arnType := vars["type"]
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if bucket != "" {
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
data, err := json.Marshal(targets)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
arn := vars["arn"]
if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Get current object layer instance.
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessNoContent(w)
}

View File

@@ -42,14 +42,14 @@ import (
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return auth.Credentials{}, nil
}
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return cred, nil
@@ -62,7 +62,7 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteConfigKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -104,7 +104,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -131,13 +131,12 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
return
}
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -159,16 +158,6 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
if dynamic {
// Apply dynamic values.
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
globalNotificationSys.SignalService(serviceReloadDynamic)
// If all values were dynamic, tell the client.
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
}
writeSuccessResponseHeadersOnly(w)
}
@@ -176,7 +165,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -184,7 +173,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
cfg := globalServerConfig
if newObjectLayerFn() == nil {
if globalSafeMode {
var err error
cfg, err = getValidConfig(objectAPI)
if err != nil {
@@ -214,7 +203,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -251,7 +240,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -277,12 +266,12 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
return
}
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -299,7 +288,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -339,7 +328,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKV")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -367,7 +356,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
@@ -389,12 +378,12 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
}
cfg := newServerConfig()
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
@@ -424,7 +413,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {

View File

@@ -19,15 +19,11 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@@ -39,14 +35,14 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
var adminAPIErr APIErrorCode
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, cred
}
// Validate request signature.
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
@@ -59,7 +55,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
@@ -69,7 +65,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, _, err := globalIAMSys.IsTempUser(accessKey)
ok, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -97,7 +93,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
@@ -131,44 +127,16 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
name := vars["accessKey"]
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := cred.AccessKey
if cred.ParentUser != "" {
accessKey = cred.ParentUser
}
implicitPerm := name == accessKey
if !implicitPerm {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.GetUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
userInfo, err := globalIAMSys.GetUserInfo(name)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -188,7 +156,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
@@ -233,7 +201,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
@@ -262,7 +230,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
@@ -288,7 +256,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
@@ -325,7 +293,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
@@ -336,7 +304,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
accessKey := vars["accessKey"]
status := vars["status"]
// This API is not allowed to lookup accessKey user status
// Custom IAM policies not allowed for admin user.
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -360,69 +328,22 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := path.Clean(vars["accessKey"])
accessKey := vars["accessKey"]
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Not allowed to add a user with same access key as root credential
if owner && accessKey == cred.AccessKey {
// Custom IAM policies not allowed for admin user.
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
// Incoming access key matches parent user then we should
// reject password change requests.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
implicitPerm := accessKey == cred.AccessKey
if !implicitPerm {
parentUser := cred.ParentUser
if parentUser == "" {
parentUser = cred.AccessKey
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: parentUser,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", parentUser, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
DenyOnly: true, // check if changing password is explicitly denied.
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
@@ -444,7 +365,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@@ -462,11 +383,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -477,12 +398,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
@@ -496,12 +411,18 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -541,11 +462,11 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -596,11 +517,11 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -651,15 +572,15 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
writeSuccessNoContent(w)
}
// AccountInfoHandler returns usage
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountInfo")
// AccountUsageInfoHandler returns usage
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountUsageInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
@@ -681,7 +602,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -694,7 +614,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -708,6 +627,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
return rd, wr
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
@@ -715,56 +640,13 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
logger.LogIf(ctx, err)
}
// If etcd, dns federation configured list buckets from etcd.
var buckets []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for _, dnsRecords := range dnsBuckets {
buckets = append(buckets, BucketInfo{
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
} else {
buckets, err = objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
accountName := cred.AccessKey
var policies []string
switch globalIAMSys.usersSysType {
case MinIOUsersSysType:
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
case LDAPUsersSysType:
parentUser := accountName
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
default:
err = errors.New("should not happen!")
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
if cred.ParentUser != "" {
accountName = cred.ParentUser
}
acctInfo := madmin.AccountInfo{
acctInfo := madmin.AccountUsageInfo{
AccountName: accountName,
Policy: globalIAMSys.GetCombinedPolicy(policies...),
}
for _, bucket := range buckets {
@@ -800,7 +682,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
@@ -827,7 +709,7 @@ func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
@@ -840,10 +722,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
return
}
if err = json.NewEncoder(w).Encode(policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(policy)
w.(http.Flusher).Flush()
}
@@ -851,7 +730,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
@@ -885,7 +764,7 @@ func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Re
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
@@ -919,7 +798,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
@@ -947,7 +826,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
@@ -999,7 +878,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
@@ -1012,7 +891,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, _, err := globalIAMSys.IsTempUser(entityName)
ok, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return

File diff suppressed because it is too large Load Diff

View File

@@ -33,27 +33,27 @@ import (
"github.com/minio/minio/pkg/madmin"
)
// adminErasureTestBed - encapsulates subsystems that need to be setup for
// adminXLTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests.
type adminErasureTestBed struct {
erasureDirs []string
objLayer ObjectLayer
router *mux.Router
type adminXLTestBed struct {
xlDirs []string
objLayer ObjectLayer
router *mux.Router
}
// prepareAdminErasureTestBed - helper function that setups a single-node
// Erasure backend for admin-handler tests.
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
// prepareAdminXLTestBed - helper function that setups a single-node
// XL backend for admin-handler tests.
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
// Set globalIsErasure to indicate that the setup uses an erasure
// Set globalIsXL to indicate that the setup uses an erasure
// code backend.
globalIsErasure = true
globalIsXL = true
// Initializing objectLayer for HealFormatHandler.
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
if xlErr != nil {
return nil, xlErr
}
@@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
// Initialize boot time
globalBootTime = UTCNow()
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
newAllSubsystems()
@@ -76,30 +76,36 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true)
return &adminErasureTestBed{
erasureDirs: erasureDirs,
objLayer: objLayer,
router: adminRouter,
return &adminXLTestBed{
xlDirs: xlDirs,
objLayer: objLayer,
router: adminRouter,
}, nil
}
// TearDown - method that resets the test bed for subsequent unit
// tests to start afresh.
func (atb *adminErasureTestBed) TearDown() {
removeRoots(atb.erasureDirs)
func (atb *adminXLTestBed) TearDown() {
removeRoots(atb.xlDirs)
resetTestGlobals()
}
// initTestObjLayer - Helper function to initialize an Erasure-based object
// initTestObjLayer - Helper function to initialize an XL-based object
// layer and set globalObjectAPI.
func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
erasureDirs, err := getRandomDisks(16)
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
}
endpoints := mustGetPoolEndpoints(erasureDirs...)
endpoints := mustGetNewEndpoints(xlDirs...)
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
if err != nil {
removeRoots(xlDirs)
return nil, nil, err
}
globalPolicySys = NewPolicySys()
objLayer, err := newErasureServerPools(ctx, endpoints)
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
if err != nil {
return nil, nil, err
}
@@ -108,7 +114,7 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
return objLayer, erasureDirs, nil
return objLayer, xlDirs, nil
}
// cmdType - Represents different service subcomands like status, stop
@@ -177,9 +183,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
@@ -248,9 +254,9 @@ func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
@@ -292,7 +298,7 @@ func TestToAdminAPIErrCode(t *testing.T) {
}{
// 1. Server not in quorum.
{
err: errErasureWriteQuorum,
err: errXLWriteQuorum,
expectedAPIErr: ErrAdminConfigNoQuorum,
},
// 2. No error.
@@ -320,13 +326,13 @@ func TestExtractHealInitParams(t *testing.T) {
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
v := url.Values{}
if clientToken != "" {
v.Add(mgmtClientToken, clientToken)
v.Add(string(mgmtClientToken), clientToken)
}
if forceStart {
v.Add(mgmtForceStart, "")
v.Add(string(mgmtForceStart), "")
}
if forceStop {
v.Add(mgmtForceStop, "")
v.Add(string(mgmtForceStop), "")
}
return v
}
@@ -344,11 +350,11 @@ func TestExtractHealInitParams(t *testing.T) {
}
varsArr := []map[string]string{
// Invalid cases
{mgmtPrefix: "objprefix"},
{string(mgmtPrefix): "objprefix"},
// Valid cases
{},
{mgmtBucket: "bucket"},
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
{string(mgmtBucket): "bucket"},
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
}
// Body is always valid - we do not test JSON decoding.
@@ -357,7 +363,7 @@ func TestExtractHealInitParams(t *testing.T) {
// Test all combinations!
for pIdx, parms := range qParmsArr {
for vIdx, vars := range varsArr {
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
isErrCase := false
if pIdx < 4 || vIdx < 1 {
isErrCase = true

View File

@@ -21,7 +21,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strings"
"sync"
"time"
@@ -64,7 +64,7 @@ var (
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAdminAPIErr(ctx, err)
apiErr := toAPIError(ctx, err)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
}
@@ -77,6 +77,9 @@ type healSequenceStatus struct {
FailureDetail string `json:"Detail,omitempty"`
StartTime time.Time `json:"StartTime"`
// disk information
NumDisks int `json:"NumDisks"`
// settings for the heal sequence
HealSettings madmin.HealOpts `json:"Settings"`
@@ -86,113 +89,29 @@ type healSequenceStatus struct {
// structure to hold state of all heal sequences in server memory
type allHealState struct {
sync.RWMutex
sync.Mutex
// map of heal path to heal sequence
healSeqMap map[string]*healSequence // Indexed by endpoint
healLocalDisks map[Endpoint]struct{}
healStatus map[string]healingTracker // Indexed by disk ID
healSeqMap map[string]*healSequence
}
// newHealState - initialize global heal state management
func newHealState(cleanup bool) *allHealState {
hstate := &allHealState{
healSeqMap: make(map[string]*healSequence),
healLocalDisks: map[Endpoint]struct{}{},
healStatus: make(map[string]healingTracker),
}
if cleanup {
go hstate.periodicHealSeqsClean(GlobalContext)
}
return hstate
}
func (ahs *allHealState) healDriveCount() int {
ahs.RLock()
defer ahs.RUnlock()
return len(ahs.healLocalDisks)
}
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
for id, disk := range ahs.healStatus {
for _, ep := range healLocalDisks {
if disk.Endpoint == ep.String() {
delete(ahs.healStatus, id)
}
}
}
}
// updateHealStatus will update the heal status.
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
ahs.Lock()
defer ahs.Unlock()
ahs.healStatus[tracker.ID] = *tracker
}
// Sort by zone, set and disk index
func sortDisks(disks []madmin.Disk) {
sort.Slice(disks, func(i, j int) bool {
a, b := &disks[i], &disks[j]
if a.PoolIndex != b.PoolIndex {
return a.PoolIndex < b.PoolIndex
}
if a.SetIndex != b.SetIndex {
return a.SetIndex < b.SetIndex
}
return a.DiskIndex < b.DiskIndex
})
}
// getLocalHealingDisks returns local healing disks indexed by endpoint.
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
ahs.RLock()
defer ahs.RUnlock()
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
for _, v := range ahs.healStatus {
dst[v.Endpoint] = v.toHealingDisk()
// initHealState - initialize healing apparatus
func initHealState() *allHealState {
healState := &allHealState{
healSeqMap: make(map[string]*healSequence),
}
return dst
}
go healState.periodicHealSeqsClean(GlobalContext)
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
ahs.RLock()
defer ahs.RUnlock()
var endpoints Endpoints
for ep := range ahs.healLocalDisks {
endpoints = append(endpoints, ep)
}
return endpoints
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
ahs.healLocalDisks[ep] = struct{}{}
}
return healState
}
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
periodicTimer := time.NewTimer(time.Minute * 5)
defer periodicTimer.Stop()
for {
select {
case <-periodicTimer.C:
periodicTimer.Reset(time.Minute * 5)
case <-time.After(time.Minute * 5):
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
@@ -236,17 +155,12 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "unknown",
ClientToken: "invalid",
StartTime: UTCNow(),
}
} else {
clientToken := he.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s@%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
hsp = madmin.HealStopSuccess{
ClientToken: clientToken,
ClientToken: he.clientToken,
ClientAddress: he.clientAddress,
StartTime: he.startTime,
}
@@ -275,17 +189,24 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
// `keepHealSeqStateDuration`. This function also launches a
// background routine to clean up heal results after the
// aforementioned duration.
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
respBytes []byte, apiErr APIError, errMsg string) {
if h.forceStarted {
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
if apiErr.Code != "" {
return respBytes, apiErr, ""
}
} else {
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
if exists && !oh.hasEnded() {
existsAndLive := false
he, exists := ahs.getHealSequence(h.path)
if exists {
existsAndLive = !he.hasEnded()
}
if existsAndLive {
// A heal sequence exists on the given path.
if h.forceStarted {
// stop the running heal sequence - wait for it to finish.
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
} else {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
@@ -299,9 +220,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
// Check if new heal sequence to be started overlaps with any
// existing, running sequence
hpath := pathJoin(h.bucket, h.object)
for k, hSeq := range ahs.healSeqMap {
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) {
errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k)
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
@@ -309,24 +230,19 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
}
// Add heal state and start sequence
ahs.healSeqMap[hpath] = h
ahs.healSeqMap[h.path] = h
// Launch top-level background heal go-routine
go h.healSequenceStart(objAPI)
clientToken := h.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s@%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
go h.healSequenceStart()
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: clientToken,
ClientToken: h.clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
logger.LogIf(h.ctx, err)
return nil, toAdminAPIErr(h.ctx, err), ""
return nil, toAPIError(h.ctx, err), ""
}
return b, noError, ""
}
@@ -335,17 +251,14 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
// status results from global state and returns its JSON
// representation. The clientToken helps ensure there aren't
// conflicting clients fetching status.
func (ahs *allHealState) PopHealStatusJSON(hpath string,
func (ahs *allHealState) PopHealStatusJSON(path string,
clientToken string) ([]byte, APIErrorCode) {
// fetch heal state for given path
h, exists := ahs.getHealSequence(hpath)
h, exists := ahs.getHealSequence(path)
if !exists {
// heal sequence doesn't exist, must have finished.
jbytes, err := json.Marshal(healSequenceStatus{
Summary: healFinishedStatus,
})
return jbytes, toAdminAPIErrCode(GlobalContext, err)
// If there is no such heal sequence, return error.
return nil, ErrHealNoSuchProcess
}
// Check if client-token is valid
@@ -383,17 +296,18 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
// healSource denotes single entity and heal option.
type healSource struct {
bucket string
object string
versionID string
opts *madmin.HealOpts // optional heal option overrides default setting
path string // entity path (format, buckets, objects) to heal
opts *madmin.HealOpts // optional heal option overrides default setting
}
// healSequence - state for each heal sequence initiated on the
// server.
type healSequence struct {
// bucket, and object on which heal seq. was initiated
bucket, object string
// bucket, and prefix on which heal seq. was initiated
bucket, objPrefix string
// path is just pathJoin(bucket, objPrefix)
path string
// A channel of entities (format, buckets, objects) to heal
sourceCh chan healSource
@@ -454,27 +368,27 @@ type healSequence struct {
// NewHealSequence - creates healSettings, assumes bucket and
// objPrefix are already validated.
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
hs madmin.HealOpts, forceStart bool) *healSequence {
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
clientToken := mustGetUUID()
return &healSequence{
respCh: make(chan healResult),
bucket: bucket,
object: objPrefix,
objPrefix: objPrefix,
path: pathJoin(bucket, objPrefix),
reportProgress: true,
startTime: UTCNow(),
clientToken: clientToken,
clientToken: mustGetUUID(),
clientAddress: clientAddr,
forceStarted: forceStart,
settings: hs,
currentStatus: healSequenceStatus{
Summary: healNotStartedStatus,
HealSettings: hs,
NumDisks: numDisks,
},
traverseAndHealDoneCh: make(chan error),
cancelCtx: cancel,
@@ -570,12 +484,9 @@ func (h *healSequence) isQuitting() bool {
// check if the heal sequence has ended
func (h *healSequence) hasEnded() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
// background heal never ends
if h.clientToken == bgHealingUUID {
return false
}
return !h.endTime.IsZero()
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
h.mutex.RUnlock()
return ended
}
// stops the heal sequence - safe to call multiple times.
@@ -656,7 +567,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// routine for completion, and (2) listens for external stop
// signals. When either event happens, it sets the finish status for
// the heal-sequence.
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
func (h *healSequence) healSequenceStart() {
// Set status as running
h.mutex.Lock()
h.currentStatus.Summary = healRunningStatus
@@ -664,7 +575,7 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
h.mutex.Unlock()
if h.sourceCh == nil {
go h.traverseAndHeal(objAPI)
go h.traverseAndHeal()
} else {
go h.healFromSourceCh()
}
@@ -689,7 +600,8 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
case <-h.ctx.Done():
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.Summary = healFinishedStatus
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
h.mutex.Unlock()
// drain traverse channel so the traversal
@@ -703,41 +615,16 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
}
}
func (h *healSequence) logHeal(healType madmin.HealItemType) {
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
globalHealConfigMu.Lock()
opts := globalHealConfig
globalHealConfigMu.Unlock()
// Send heal request
task := healTask{
bucket: source.bucket,
object: source.object,
versionID: source.versionID,
path: source.path,
opts: h.settings,
responseCh: h.respCh,
}
if source.opts != nil {
task.opts = *source.opts
}
if opts.Bitrot {
task.opts.ScanMode = madmin.HealDeepScan
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
globalBackgroundHealRoutine.queueHealTask(task)
select {
@@ -745,11 +632,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
if !h.reportProgress {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and
// return the error and not calculate this object
// as part of the metrics.
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
// Return the error so that caller can handle it.
return res.err
// return success.
if isErrObjectNotFound(res.err) {
return nil
}
h.mutex.Lock()
@@ -776,7 +661,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
if isErrObjectNotFound(res.err) {
return nil
}
// Only report object error
@@ -792,37 +677,35 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
}
func (h *healSequence) healItemsFromSourceCh() error {
bucketsOnly := true // heal buckets only, not objects.
if err := h.healItems(bucketsOnly); err != nil {
logger.LogIf(h.ctx, err)
}
for {
select {
case source, ok := <-h.sourceCh:
if !ok {
return nil
}
var itemType madmin.HealItemType
switch source.bucket {
case nopHeal:
switch {
case source.path == nopHeal:
continue
case SlashSeparator:
case source.path == SlashSeparator:
itemType = madmin.HealItemMetadata
case !strings.Contains(source.path, SlashSeparator):
itemType = madmin.HealItemBucket
default:
if source.object == "" {
itemType = madmin.HealItemBucket
} else {
itemType = madmin.HealItemObject
}
itemType = madmin.HealItemObject
}
if err := h.queueHealTask(source, itemType); err != nil {
switch err.(type) {
case ObjectExistsAsDirectory:
case ObjectNotFound:
case VersionNotFound:
default:
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
pathJoin(source.bucket, source.object), err))
}
logger.LogIf(h.ctx, err)
}
h.scannedItemsMap[itemType]++
h.lastHealActivity = UTCNow()
case <-h.ctx.Done():
return nil
}
@@ -833,28 +716,24 @@ func (h *healSequence) healFromSourceCh() {
h.healItemsFromSourceCh()
}
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
// Try to pro-actively heal backend-encrypted file.
if err := h.queueHealTask(healSource{
bucket: minioMetaBucket,
object: backendEncryptedFile,
}, madmin.HealItemBucketMetadata); err != nil {
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return err
}
func (h *healSequence) healItems(bucketsOnly bool) error {
// Start with format healing
if err := h.healDiskFormat(); err != nil {
return err
}
// Start healing the config prefix.
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
}
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
return err
}
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
if err := h.healDiskMeta(objAPI); err != nil {
// Start healing the bucket config prefix.
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
return err
}
// Heal buckets and objects
return h.healBuckets(objAPI, bucketsOnly)
return h.healBuckets(bucketsOnly)
}
// traverseAndHeal - traverses on-disk data and performs healing
@@ -864,35 +743,37 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
// quit signal is received, this routine cannot quit immediately and
// has to wait until a safe point is reached, such as between scanning
// two objects.
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
func (h *healSequence) traverseAndHeal() {
bucketsOnly := false // Heals buckets and objects also.
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
close(h.traverseAndHealDoneCh)
}
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
// which in-turn heals the respective meta directory path and any files in int.
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
return func() error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
if h.isQuitting() {
return errHealStopSignalled
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemBucketMetadata)
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
if isErrObjectNotFound(herr) {
return nil
}
return err
return herr
})
}
}
@@ -904,32 +785,39 @@ func (h *healSequence) healDiskFormat() error {
return errHealStopSignalled
}
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
}
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
func (h *healSequence) healBuckets(bucketsOnly bool) error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(objAPI, h.bucket, bucketsOnly)
return h.healBucket(h.bucket, bucketsOnly)
}
buckets, err := objAPI.ListBuckets(h.ctx)
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Created.After(buckets[j].Created)
})
for _, bucket := range buckets {
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
return err
}
}
@@ -938,11 +826,15 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
}
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return err
}
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
return err
}
if bucketsOnly {
@@ -950,15 +842,12 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
}
if !h.settings.Recursive {
if h.object != "" {
if h.objPrefix != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
if err == nil {
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return nil
}
if err = h.healObject(bucket, h.objPrefix); err != nil {
return err
}
}
@@ -967,26 +856,23 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
return nil
}
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
return errFnHealFromAPIErr(h.ctx, err)
}
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
return nil
}
// healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object, versionID string) error {
func (h *healSequence) healObject(bucket, object string) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if h.isQuitting() {
return errHealStopSignalled
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemObject)
return err
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
}

118
cmd/admin-quota-handlers.go Normal file
View File

@@ -0,0 +1,118 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, err = parseBucketQuota(bucket, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}

View File

@@ -20,6 +20,8 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
@@ -62,7 +64,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
if globalIsDistErasure || globalIsErasure {
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
@@ -110,10 +112,11 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
@@ -168,32 +171,22 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
}
if globalIsDistErasure || globalIsErasure {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
// Quota operations
if globalIsXL || globalIsDistXL {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
}
if globalIsDistErasure {
// Top locks
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
// Force unlocks paths
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
Queries("paths", "{paths:.*}").HandlerFunc(httpTraceHdrs(adminAPI.ForceUnlockHandler))
}
// HTTP Trace
@@ -204,22 +197,26 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- KMS APIs --
//
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(httpTraceAll(adminAPI.KMSCreateKeyHandler)).Queries("key-id", "{key-id:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
if !globalIsGateway {
// Keep obdinfo for backward compatibility with mc
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
// -- OBD API --
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
Queries("perfdrive", "{perfdrive:true|false}",
"perfnet", "{perfnet:true|false}",
"minioinfo", "{minioinfo:true|false}",
"minioconfig", "{minioconfig:true|false}",
"syscpu", "{syscpu:true|false}",
"sysdiskhw", "{sysdiskhw:true|false}",
"sysosinfo", "{sysosinfo:true|false}",
"sysmem", "{sysmem:true|false}",
"sysprocess", "{sysprocess:true|false}",
)
}
}
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
}

View File

@@ -17,24 +17,23 @@
package cmd
import (
"context"
"net/http"
"time"
"os"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/madmin"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
var localEndpoints Endpoints
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var disks []madmin.Disk
addr := r.Host
if globalIsDistErasure {
addr = GetLocalPeer(endpointServerPools)
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
network := make(map[string]string)
for _, ep := range endpointServerPools {
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
nodeName := endpoint.Host
if nodeName == "" {
@@ -42,39 +41,46 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = string(madmin.ItemOnline)
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
network[nodeName] = "online"
var di = madmin.Disk{
DrivePath: endpoint.Path,
}
diInfo, err := disk.GetInfo(endpoint.Path)
if err != nil {
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
di.State = madmin.DriveStateMissing
} else {
di.State = madmin.DriveStateCorrupt
}
} else {
network[nodeName] = string(madmin.ItemOffline)
// log once the error
logger.LogOnceIf(context.Background(), err, nodeName)
di.State = madmin.DriveStateOk
di.DrivePath = endpoint.Path
di.TotalSpace = diInfo.Total
di.UsedSpace = diInfo.Total - diInfo.Free
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
}
disks = append(disks, di)
} else {
_, present := network[nodeName]
if !present {
err := IsServerResolvable(endpoint)
if err == nil {
network[nodeName] = "online"
} else {
network[nodeName] = "offline"
}
}
}
}
}
props := madmin.ServerProperties{
State: string(madmin.ItemInitializing),
return madmin.ServerProperties{
State: "ok",
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
CommitID: CommitID,
Network: network,
Disks: disks,
}
objLayer := newObjectLayerFn()
if objLayer != nil && !globalIsGateway {
// only need Disks information in server mode.
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
}
return props
}

View File

@@ -18,53 +18,11 @@ package cmd
import (
"encoding/xml"
"time"
)
// DeletedObject objects deleted
type DeletedObject struct {
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
ObjectName string `xml:"Key,omitempty"`
VersionID string `xml:"VersionId,omitempty"`
// MinIO extensions to support delete marker replication
// Replication status of DeleteMarker
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
// MTime of DeleteMarker on source that needs to be propagated to replica
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
// Status of versioned delete (of object or DeleteMarker)
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
// PurgeTransitioned is nonempty if object is in transition tier
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
}
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
type DeleteMarkerMTime struct {
time.Time
}
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if t.Time.IsZero() {
return nil
}
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
}
// ObjectToDelete carries key name for the object to delete.
type ObjectToDelete struct {
// ObjectIdentifier carries key name for the object to delete.
type ObjectIdentifier struct {
ObjectName string `xml:"Key"`
VersionID string `xml:"VersionId"`
// Replication status of DeleteMarker
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
// Status of versioned delete (of object or DeleteMarker)
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
// Version ID of delete marker
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
// PurgeTransitioned is nonempty if object is in transition tier
PurgeTransitioned string `xml:"PurgeTransitioned"`
}
// createBucketConfiguration container for bucket configuration request from client.
@@ -79,5 +37,5 @@ type DeleteObjectsRequest struct {
// Element to enable quiet mode for the request
Quiet bool
// List of objects to be deleted
Objects []ObjectToDelete `xml:"Object"`
Objects []ObjectIdentifier `xml:"Object"`
}

View File

@@ -27,18 +27,15 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/config/dns"
minio "github.com/minio/minio-go/v6"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/cmd/config/etcd/dns"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/replication"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
)
@@ -87,7 +84,6 @@ const (
ErrInvalidMaxUploads
ErrInvalidMaxParts
ErrInvalidPartNumberMarker
ErrInvalidPartNumber
ErrInvalidRequestBody
ErrInvalidCopySource
ErrInvalidMetadataDirective
@@ -107,24 +103,8 @@ const (
ErrNoSuchCORSConfiguration
ErrNoSuchWebsiteConfiguration
ErrReplicationConfigurationNotFoundError
ErrRemoteDestinationNotFoundError
ErrReplicationDestinationMissingLock
ErrRemoteTargetNotFoundError
ErrReplicationRemoteConnectionError
ErrBucketRemoteIdenticalToSource
ErrBucketRemoteAlreadyExists
ErrBucketRemoteLabelInUse
ErrBucketRemoteArnTypeInvalid
ErrBucketRemoteArnInvalid
ErrBucketRemoteRemoveDisallowed
ErrRemoteTargetNotVersionedError
ErrReplicationSourceNotVersionedError
ErrReplicationNeedsVersioningError
ErrReplicationBucketNeedsVersioningError
ErrObjectRestoreAlreadyInProgress
ErrNoSuchKey
ErrNoSuchUpload
ErrInvalidVersionID
ErrNoSuchVersion
ErrNotImplemented
ErrPreconditionFailed
@@ -235,7 +215,6 @@ const (
ErrInvalidResourceName
ErrServerNotInitialized
ErrOperationTimedOut
ErrClientDisconnected
ErrOperationMaxedOut
ErrInvalidRequest
// MinIO storage class error codes
@@ -263,6 +242,7 @@ const (
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
ErrAdminNoSuchQuotaConfiguration
ErrAdminBucketQuotaDisabled
ErrHealNotImplemented
ErrHealNoSuchProcess
@@ -363,7 +343,6 @@ const (
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAdminAccountNotEligible
ErrAccountNotEligible
ErrServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat
)
@@ -438,11 +417,6 @@ var errorCodes = errorCodeMap{
Description: "Argument partNumberMarker must be an integer.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidPartNumber: {
Code: "InvalidPartNumber",
Description: "The requested partnumber is not satisfiable",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrInvalidPolicyDocument: {
Code: "InvalidPolicyDocument",
Description: "The content of the form does not meet the conditions specified in the policy document.",
@@ -563,14 +537,9 @@ var errorCodes = errorCodeMap{
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
HTTPStatusCode: http.StatusNotFound,
},
ErrInvalidVersionID: {
Code: "InvalidArgument",
Description: "Invalid version id specified",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchVersion: {
Code: "NoSuchVersion",
Description: "The specified version does not exist.",
Description: "Indicates that the version ID specified in the request does not match an existing version.",
HTTPStatusCode: http.StatusNotFound,
},
ErrNotImplemented: {
@@ -678,9 +647,20 @@ var errorCodes = errorCodeMap{
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
// Need changes to make sure variable messages can be constructed.
ErrMalformedCredentialDate: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
// Need changes to make sure variable messages can be constructed.
ErrMalformedCredentialRegion: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRegion: {
@@ -688,6 +668,9 @@ var errorCodes = errorCodeMap{
Description: "Region does not match.",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
// Need changes to make sure variable messages can be constructed.
ErrInvalidServiceS3: {
Code: "AuthorizationParametersError",
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
@@ -698,6 +681,9 @@ var errorCodes = errorCodeMap{
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
// Need changes to make sure variable messages can be constructed.
ErrInvalidRequestVersion: {
Code: "AuthorizationQueryParametersError",
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
@@ -768,6 +754,8 @@ var errorCodes = errorCodeMap{
Description: "Your key is too long",
HTTPStatusCode: http.StatusBadRequest,
},
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
ErrUnsignedHeaders: {
Code: "AccessDenied",
Description: "There were headers present in the request which were not signed",
@@ -823,76 +811,6 @@ var errorCodes = errorCodeMap{
Description: "The replication configuration was not found",
HTTPStatusCode: http.StatusNotFound,
},
ErrRemoteDestinationNotFoundError: {
Code: "RemoteDestinationNotFoundError",
Description: "The remote destination bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrReplicationDestinationMissingLock: {
Code: "ReplicationDestinationMissingLockError",
Description: "The replication destination bucket does not have object locking enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRemoteTargetNotFoundError: {
Code: "XMinioAdminRemoteTargetNotFoundError",
Description: "The remote target does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrReplicationRemoteConnectionError: {
Code: "XMinioAdminReplicationRemoteConnectionError",
Description: "Remote service connection error - please check remote service credentials and target bucket",
HTTPStatusCode: http.StatusNotFound,
},
ErrBucketRemoteIdenticalToSource: {
Code: "XMinioAdminRemoteIdenticalToSource",
Description: "The remote target cannot be identical to source",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteAlreadyExists: {
Code: "XMinioAdminBucketRemoteAlreadyExists",
Description: "The remote target already exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteLabelInUse: {
Code: "XMinioAdminBucketRemoteLabelInUse",
Description: "The remote target with this label already exists",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteRemoveDisallowed: {
Code: "XMinioAdminRemoteRemoveDisallowed",
Description: "This ARN is in use by an existing configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteArnTypeInvalid: {
Code: "XMinioAdminRemoteARNTypeInvalid",
Description: "The bucket remote ARN type is not valid",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketRemoteArnInvalid: {
Code: "XMinioAdminRemoteArnInvalid",
Description: "The bucket remote ARN does not have correct format",
HTTPStatusCode: http.StatusBadRequest,
},
ErrRemoteTargetNotVersionedError: {
Code: "RemoteTargetNotVersionedError",
Description: "The remote target does not have versioning enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationSourceNotVersionedError: {
Code: "ReplicationSourceNotVersionedError",
Description: "The replication source does not have versioning enabled",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationNeedsVersioningError: {
Code: "InvalidRequest",
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
HTTPStatusCode: http.StatusBadRequest,
},
ErrReplicationBucketNeedsVersioningError: {
Code: "InvalidRequest",
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration",
@@ -923,11 +841,6 @@ var errorCodes = errorCodeMap{
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
HTTPStatusCode: http.StatusBadRequest,
},
ErrObjectRestoreAlreadyInProgress: {
Code: "RestoreAlreadyInProgress",
Description: "Object restore is already in progress",
HTTPStatusCode: http.StatusConflict,
},
/// Bucket notification related errors.
ErrEventNotification: {
Code: "InvalidArgument",
@@ -990,7 +903,7 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusBadRequest,
},
ErrMetadataTooLarge: {
Code: "MetadataTooLarge",
Code: "InvalidArgument",
Description: "Your metadata headers exceed the maximum allowed metadata size.",
HTTPStatusCode: http.StatusBadRequest,
},
@@ -1214,6 +1127,11 @@ var errorCodes = errorCodeMap{
Description: "The quota configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminBucketQuotaDisabled: {
Code: "XMinioAdminBucketQuotaDisabled",
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureClientRequest: {
Code: "XMinioInsecureClientRequest",
Description: "Cannot respond to plain-text request from TLS-encrypted server",
@@ -1224,11 +1142,6 @@ var errorCodes = errorCodeMap{
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrClientDisconnected: {
Code: "ClientDisconnected",
Description: "Client disconnected before response was ready",
HTTPStatusCode: 499, // No official code, use nginx value.
},
ErrOperationMaxedOut: {
Code: "SlowDown",
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
@@ -1727,17 +1640,12 @@ var errorCodes = errorCodeMap{
ErrAddUserInvalidArgument: {
Code: "XMinioInvalidIAMCredentials",
Description: "User is not allowed to be same as admin access key",
HTTPStatusCode: http.StatusForbidden,
HTTPStatusCode: http.StatusConflict,
},
ErrAdminAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The administrator key is not eligible for this operation",
HTTPStatusCode: http.StatusForbidden,
},
ErrAccountNotEligible: {
Code: "XMinioInvalidIAMCredentials",
Description: "The account key is not eligible for this operation",
HTTPStatusCode: http.StatusForbidden,
HTTPStatusCode: http.StatusConflict,
},
ErrServiceAccountNotFound: {
Code: "XMinioInvalidIAMCredentials",
@@ -1760,16 +1668,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
return ErrNone
}
// Only return ErrClientDisconnected if the provided context is actually canceled.
// This way downstream context.Canceled will still report ErrOperationTimedOut
select {
case <-ctx.Done():
if ctx.Err() == context.Canceled {
return ErrClientDisconnected
}
default:
}
switch err {
case errInvalidArgument:
apiErr = ErrAdminInvalidArgument
@@ -1884,12 +1782,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrBucketAlreadyOwnedByYou
case ObjectNotFound:
apiErr = ErrNoSuchKey
case MethodNotAllowed:
apiErr = ErrMethodNotAllowed
case InvalidVersionID:
apiErr = ErrInvalidVersionID
case VersionNotFound:
apiErr = ErrNoSuchVersion
case ObjectAlreadyExists:
apiErr = ErrMethodNotAllowed
case ObjectNameInvalid:
@@ -1940,30 +1832,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrObjectLockConfigurationNotFound
case BucketQuotaConfigNotFound:
apiErr = ErrAdminNoSuchQuotaConfiguration
case BucketReplicationConfigNotFound:
apiErr = ErrReplicationConfigurationNotFoundError
case BucketRemoteDestinationNotFound:
apiErr = ErrRemoteDestinationNotFoundError
case BucketReplicationDestinationMissingLock:
apiErr = ErrReplicationDestinationMissingLock
case BucketRemoteTargetNotFound:
apiErr = ErrRemoteTargetNotFoundError
case BucketRemoteConnectionErr:
apiErr = ErrReplicationRemoteConnectionError
case BucketRemoteAlreadyExists:
apiErr = ErrBucketRemoteAlreadyExists
case BucketRemoteLabelInUse:
apiErr = ErrBucketRemoteLabelInUse
case BucketRemoteArnTypeInvalid:
apiErr = ErrBucketRemoteArnTypeInvalid
case BucketRemoteArnInvalid:
apiErr = ErrBucketRemoteArnInvalid
case BucketRemoteRemoveDisallowed:
apiErr = ErrBucketRemoteRemoveDisallowed
case BucketRemoteTargetNotVersioned:
apiErr = ErrRemoteTargetNotVersionedError
case BucketReplicationSourceNotVersioned:
apiErr = ErrReplicationSourceNotVersionedError
case BucketQuotaExceeded:
apiErr = ErrAdminBucketQuotaExceeded
case *event.ErrInvalidEventName:
@@ -1994,8 +1862,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrBackendDown
case ObjectNameTooLong:
apiErr = ErrKeyTooLongError
case dns.ErrInvalidBucketName:
apiErr = ErrInvalidBucketName
default:
var ie, iw int
// This work-around is to handle the issue golang/go#30648
@@ -2032,24 +1898,12 @@ func toAPIError(ctx context.Context, err error) APIError {
}
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
e, ok := err.(dns.ErrInvalidBucketName)
if ok {
code := toAPIErrorCode(ctx, e)
apiErr = errorCodes.ToAPIErrWithErr(code, e)
}
if apiErr.Code == "InternalError" {
// If we see an internal error try to interpret
// any underlying errors if possible depending on
// their internal error types. This code is only
// useful with gateway implementations.
switch e := err.(type) {
case InvalidArgument:
apiErr = APIError{
Code: "InvalidArgument",
Description: e.Error(),
HTTPStatusCode: errorCodes[ErrInvalidRequest].HTTPStatusCode,
}
case *xml.SyntaxError:
apiErr = APIError{
Code: "MalformedXML",
@@ -2064,24 +1918,12 @@ func toAPIError(ctx context.Context, err error) APIError {
e.Error()),
HTTPStatusCode: http.StatusBadRequest,
}
case versioning.Error:
apiErr = APIError{
Code: "IllegalVersioningConfigurationException",
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
HTTPStatusCode: http.StatusBadRequest,
}
case lifecycle.Error:
apiErr = APIError{
Code: "InvalidRequest",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case replication.Error:
apiErr = APIError{
Code: "MalformedXML",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case tags.Error:
apiErr = APIError{
Code: e.Code(),
@@ -2125,12 +1967,6 @@ func toAPIError(ctx context.Context, err error) APIError {
HTTPStatusCode: e.Response().StatusCode,
}
// Add more Gateway SDKs here if any in future.
default:
apiErr = APIError{
Code: apiErr.Code,
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
HTTPStatusCode: apiErr.HTTPStatusCode,
}
}
}

View File

@@ -29,7 +29,6 @@ import (
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/bucket/lifecycle"
)
// Returns a hexadecimal representation of time at the
@@ -38,18 +37,9 @@ func mustGetRequestID(t time.Time) string {
return fmt.Sprintf("%X", t.UnixNano())
}
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
func setEventStreamHeaders(w http.ResponseWriter) {
w.Header().Set(xhttp.ContentType, "text/event-stream")
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
}
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
// Set the "Server" http header.
w.Header().Set(xhttp.ServerInfo, "MinIO")
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalServerRegion; region != "" {
@@ -86,7 +76,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
// set common headers
setCommonHeaders(w)
@@ -117,11 +107,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
}
// Set tag count if object has tags
if len(objInfo.UserTags) > 0 {
tags, _ := url.ParseQuery(objInfo.UserTags)
if len(tags) > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
}
tags, _ := url.ParseQuery(objInfo.UserTags)
tagCount := len(tags)
if tagCount > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
}
// Set all other user defined metadata.
@@ -131,43 +120,31 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
// values to client.
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
if !strings.HasPrefix(k, userMetadataPrefix) {
continue
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
}
if !isSet {
w.Header().Set(k, v)
}
}
var start, rangeLen int64
totalObjectSize, err := objInfo.GetActualSize()
if err != nil {
return err
}
// For providing ranged content
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
// for providing ranged content
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
}
// Set content length.
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
if rs != nil {
@@ -175,35 +152,5 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header().Set(xhttp.ContentRange, contentRange)
}
// Set the relevant version ID as part of the response header.
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
if objInfo.ReplicationStatus.String() != "" {
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
if opts.VersionID == "" {
if ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
SuccessorModTime: objInfo.SuccessorModTime,
}); !expiryTime.IsZero() {
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
}
}
}
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
}
}
return nil
}

View File

@@ -36,8 +36,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("marker"))
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
@@ -56,8 +56,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("key-marker"))
prefix = values.Get("prefix")
marker = values.Get("key-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
@@ -86,8 +86,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
maxkeys = maxObjectList
}
prefix = trimLeadingSlash(values.Get("prefix"))
startAfter = trimLeadingSlash(values.Get("start-after"))
prefix = values.Get("prefix")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
@@ -117,8 +117,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
maxUploads = maxUploadsList
}
prefix = trimLeadingSlash(values.Get("prefix"))
keyMarker = trimLeadingSlash(values.Get("key-marker"))
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")

View File

@@ -20,7 +20,6 @@ import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"path"
@@ -35,11 +34,11 @@ import (
const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = 50000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
)
// LocationResponse - format for location response.
@@ -48,12 +47,6 @@ type LocationResponse struct {
Location string `xml:",chardata"`
}
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
type PolicyStatus struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
IsPublic string
}
// ListVersionsResponse - format for list bucket versions response.
type ListVersionsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
@@ -242,23 +235,10 @@ type Bucket struct {
// ObjectVersion container for object version metadata
type ObjectVersion struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
Object
IsLatest bool
VersionID string `xml:"VersionId"`
isDeleteMarker bool
}
// MarshalXML - marshal ObjectVersion
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if o.isDeleteMarker {
start.Name.Local = "DeleteMarker"
} else {
start.Name.Local = "Version"
}
type objectVersionWrapper ObjectVersion
return e.EncodeElement(objectVersionWrapper(o), start)
IsLatest bool
}
// StringMap is a map[string]string.
@@ -353,10 +333,9 @@ type CompleteMultipartUploadResponse struct {
// DeleteError structure.
type DeleteError struct {
Code string
Message string
Key string
VersionID string `xml:"VersionId"`
Code string
Message string
Key string
}
// DeleteObjectsResponse container for multiple object deletes.
@@ -364,7 +343,7 @@ type DeleteObjectsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
// Collection of all deleted objects
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
// Collection of errors deleting certain objects.
Errors []DeleteError `xml:"Error,omitempty"`
@@ -394,7 +373,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
}
proto := handlers.GetSourceScheme(r)
if proto == "" {
proto = getURLScheme(globalIsTLS)
proto = getURLScheme(globalIsSSL)
}
u := &url.URL{
Host: r.Host,
@@ -403,7 +382,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
}
// If domain is set then we need to use bucket DNS style.
for _, domain := range domains {
if strings.HasPrefix(r.Host, bucket+"."+domain) {
if strings.Contains(r.Host, domain) {
u.Host = bucket + "." + r.Host
u.Path = path.Join(SlashSeparator, object)
break
}
@@ -414,13 +394,11 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
// generates ListBucketsResponse from array of BucketInfo which can be
// serialized to match XML and JSON API spec output.
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
listbuckets := make([]Bucket, 0, len(buckets))
var listbuckets []Bucket
var data = ListBucketsResponse{}
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var owner = Owner{}
owner.ID = globalMinioDefaultOwnerID
for _, bucket := range buckets {
var listbucket = Bucket{}
listbucket.Name = bucket.Name
@@ -435,14 +413,13 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
var versions []ObjectVersion
var prefixes []CommonPrefix
var owner = Owner{}
var data = ListVersionsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = ObjectVersion{}
if object.Name == "" {
@@ -459,13 +436,10 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
content.VersionID = object.VersionID
if content.VersionID == "" {
content.VersionID = nullVersionID
}
content.IsLatest = object.IsLatest
content.isDeleteMarker = object.DeleteMarker
content.VersionID = "null"
content.IsLatest = true
versions = append(versions, content)
}
@@ -478,11 +452,8 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
data.MaxKeys = maxKeys
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
data.NextVersionIDMarker = resp.NextVersionIDMarker
data.VersionIDMarker = versionIDMarker
data.IsTruncated = resp.IsTruncated
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -494,13 +465,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
contents := make([]Object, 0, len(resp.Objects))
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var contents []Object
var prefixes []CommonPrefix
var owner = Owner{}
var data = ListObjectsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = Object{}
if object.Name == "" {
@@ -528,10 +498,9 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
data.Marker = s3EncodeName(marker, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.MaxKeys = maxKeys
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
data.IsTruncated = resp.IsTruncated
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -543,13 +512,15 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
contents := make([]Object, 0, len(objects))
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var contents []Object
var commonPrefixes []CommonPrefix
var owner = Owner{}
var data = ListObjectsV2Response{}
if fetchOwner {
owner.ID = globalMinioDefaultOwnerID
}
for _, object := range objects {
var content = Object{}
if object.Name == "" {
@@ -575,10 +546,6 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
// values to client.
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
content.UserMetadata[k] = v
}
}
@@ -595,8 +562,6 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
data.IsTruncated = isTruncated
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
for _, prefix := range prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
@@ -650,16 +615,8 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
// Dumb values not meaningful
listPartsResponse.Initiator = Initiator{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.Owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
@@ -709,14 +666,11 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
}
// generate multi objects delete response.
func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse {
deleteResp := DeleteObjectsResponse{}
if !quiet {
deleteResp.DeletedObjects = deletedObjects
}
if len(errs) == len(deletedObjects) {
deleteResp.DeletedObjects = nil
}
deleteResp.Errors = errs
return deleteResp
}
@@ -780,10 +734,6 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "InvalidRegion":
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
case "AuthorizationHeaderMalformed":
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
@@ -838,3 +788,38 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseXML - similar to writeErrorResponse,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalBrowserEnabled {
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
}
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}

View File

@@ -77,7 +77,7 @@ func TestObjectLocation(t *testing.T) {
// Server with virtual domain name.
{
request: &http.Request{
Host: "mybucket.mys3.bucket.org",
Host: "mys3.bucket.org",
Header: map[string][]string{},
},
domains: []string{"mys3.bucket.org"},
@@ -87,7 +87,7 @@ func TestObjectLocation(t *testing.T) {
},
{
request: &http.Request{
Host: "mybucket.mys3.bucket.org",
Host: "mys3.bucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
@@ -98,14 +98,11 @@ func TestObjectLocation(t *testing.T) {
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
}
})
for i, testCase := range testCases {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
}
}
}

View File

@@ -17,102 +17,73 @@
package cmd
import (
"net"
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/wildcard"
"github.com/rs/cors"
)
func newHTTPServerFn() *xhttp.Server {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
return globalHTTPServer
}
func setHTTPServer(h *xhttp.Server) {
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
globalObjLayerMutex.Lock()
globalHTTPServer = h
globalObjLayerMutex.Unlock()
defer globalObjLayerMutex.Unlock()
return globalObjectAPI
}
func newObjectLayerFn() ObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
return globalObjectAPI
}
func newCachedObjectLayerFn() CacheObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
return globalCacheObjectAPI
}
func setCacheObjectLayer(c CacheObjectLayer) {
globalObjLayerMutex.Lock()
globalCacheObjectAPI = c
globalObjLayerMutex.Unlock()
}
func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Lock()
globalObjectAPI = o
globalObjLayerMutex.Unlock()
}
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
// Returns true if handlers allow SSE-KMS encryption headers.
AllowSSEKMS func() bool
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCachedObjectLayerFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
AllowSSEKMS: func() bool {
return allowSSEKMS
},
}
// API Router
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
var routers []*mux.Router
for _, domainName := range globalDomainNames {
if IsKubernetes() {
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
host, _, err := net.SplitHostPort(getHost(r))
if err != nil {
host = r.Host
}
// Make sure to skip matching minio.<domain>` this is
// specifically meant for operator/k8s deployment
// The reason we need to skip this is for a special
// usecase where we need to make sure that
// minio.<namespace>.svc.<cluster_domain> is ignored
// by the bucketDNS style to ensure that path style
// is available and honored at this domain.
//
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
// makes sure that buckets are routed through this matcher
// to match for `<bucket>`
return host != minioReservedBucket+"."+domainName
}).Host("{bucket:.+}."+domainName).Subrouter())
} else {
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
}
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
@@ -120,280 +91,207 @@ func registerAPIRouter(router *mux.Router) {
// Object operations
// HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
// PostRestoreObject
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketReplicationConfig
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenNotification
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
// GetBucketTaggingHandler
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListObjectVersions
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
// GetBucketPolicyStatus
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketReplicationConfig
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
// GetObjectRetention
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketTaggingHandler
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketReplication
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
}
/// Root operation
// ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
}
// corsHandler handler for CORS (Cross Origin Resource Sharing)
func corsHandler(handler http.Handler) http.Handler {
commonS3Headers := []string{
xhttp.Date,
xhttp.ETag,
xhttp.ServerInfo,
xhttp.Connection,
xhttp.AcceptRanges,
xhttp.ContentRange,
xhttp.ContentEncoding,
xhttp.ContentLength,
xhttp.ContentType,
xhttp.ContentDisposition,
xhttp.LastModified,
xhttp.ContentLanguage,
xhttp.CacheControl,
xhttp.RetryAfter,
xhttp.AmzBucketRegion,
xhttp.Expires,
"X-Amz*",
"x-amz*",
"*",
}
return cors.New(cors.Options{
AllowOriginFunc: func(origin string) bool {
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
if wildcard.MatchSimple(allowedOrigin, origin) {
return true
}
}
return false
},
AllowedMethods: []string{
http.MethodGet,
http.MethodPut,
http.MethodHead,
http.MethodPost,
http.MethodDelete,
http.MethodOptions,
http.MethodPatch,
},
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
}).Handler(handler)
}

View File

@@ -36,7 +36,6 @@ import (
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/etag"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
@@ -152,17 +151,15 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
return cred, claims, owner, ErrNone
}
// checkAdminRequestAuth checks for authentication and authorization for the incoming
// request. It only accepts V2 and V4 requests. Presigned, JWT and anonymous requests
// are automatically rejected.
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
@@ -187,12 +184,12 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(getSessionToken(r))
claims, _ := getClaimsFromToken(r, getSessionToken(r))
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]interface{}, error) {
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
if token == "" {
return claims.Map(), nil
@@ -216,15 +213,15 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
if globalPolicyOPA == nil {
// If OPA is not set and if ldap claim key is set, allow the claim.
if _, ok := claims.MapClaims[ldapUser]; ok {
if _, ok := claims.Lookup(ldapUser); ok {
return claims.Map(), nil
}
// If OPA is not set, session token should
// have a policy and its mandatory, reject
// requests without policy claim.
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
if !pokOpenID && !pokSA {
return nil, errAuthentication
}
@@ -239,7 +236,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(GlobalContext, err, logger.Application)
logger.LogIf(r.Context(), err, logger.Application)
return nil, errAuthentication
}
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
@@ -260,7 +257,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(token)
claims, err := getClaimsFromToken(r, token)
if err != nil {
return nil, toAPIErrorCode(r.Context(), err)
}
@@ -273,7 +270,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
return s3Err
}
@@ -283,13 +280,14 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
var cred auth.Credentials
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, ErrSignatureVersionNotSupported
return accessKey, owner, ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, s3Err
return accessKey, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
@@ -299,18 +297,18 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
region = ""
}
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, s3Err
return accessKey, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, s3Err
return accessKey, owner, s3Err
}
var claims map[string]interface{}
claims, s3Err = checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, owner, s3Err
return accessKey, owner, s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
@@ -320,7 +318,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return cred, owner, ErrMalformedXML
return accessKey, owner, ErrMalformedXML
}
// Populate payload to extract location constraint.
@@ -329,18 +327,14 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
if s3Error != ErrNone {
return cred, owner, s3Error
return accessKey, owner, s3Error
}
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
}
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
// Anonymous checks are not meant for ListBuckets action
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: action,
@@ -350,31 +344,12 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return cred.AccessKey, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
IsOwner: false,
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
}
}
return cred, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -383,28 +358,9 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
return cred.AccessKey, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred, owner, ErrNone
}
}
return cred, owner, ErrAccessDenied
return cred.AccessKey, owner, ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '2'.
@@ -433,14 +389,19 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
return errCode
}
clientETag, err := etag.FromContentMD5(r.Header)
var (
err error
contentMD5, contentSHA256 []byte
)
// Extract 'Content-Md5' if present.
contentMD5, err = checkValidMD5(r.Header)
if err != nil {
return ErrInvalidDigest
}
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
var contentSHA256 []byte
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
contentSHA256, err = hex.DecodeString(sha256Sum[0])
@@ -457,7 +418,8 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
if err != nil {
return toAPIErrorCode(ctx, err)
}
@@ -465,6 +427,16 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
return ErrNone
}
// authHandler - handles all the incoming authorization headers and validates them if possible.
type authHandler struct {
handler http.Handler
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
return authHandler{h}
}
// List of all support S3 auth types.
var supportedS3AuthTypes = map[authType]struct{}{
authTypeAnonymous: {},
@@ -482,30 +454,26 @@ func isSupportedS3AuthType(aType authType) bool {
return ok
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
// handler for validating incoming authorization headers.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if isSupportedS3AuthType(aType) {
// Let top level caller validate for anonymous and known signed requests.
h.ServeHTTP(w, r)
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(authErr.Error()))
return
}
h.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
h.ServeHTTP(w, r)
// handler for validating incoming authorization headers.
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if isSupportedS3AuthType(aType) {
// Let top level caller validate for anonymous and known signed requests.
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
return
}
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
})
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
@@ -551,8 +519,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.BypassGovernanceRetentionAction,
Action: policy.Action(policy.BypassGovernanceRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
@@ -561,8 +528,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.PutObjectRetentionAction,
Action: policy.Action(policy.PutObjectRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
@@ -585,8 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.BypassGovernanceRetentionAction,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
@@ -596,8 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectRetentionAction,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
@@ -615,7 +579,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch atype {
@@ -636,10 +600,6 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
return s3Err
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
}
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
@@ -652,7 +612,6 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
@@ -666,7 +625,6 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),

View File

@@ -52,7 +52,7 @@ func TestGetRequestAuthType(t *testing.T) {
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
"Content-Encoding": []string{streamingContentEncoding},
},
Method: http.MethodPut,
Method: "PUT",
},
authT: authTypeStreamingSigned,
},
@@ -111,7 +111,7 @@ func TestGetRequestAuthType(t *testing.T) {
Header: http.Header{
"Content-Type": []string{"multipart/form-data"},
},
Method: http.MethodPost,
Method: "POST",
},
authT: authTypePostPolicy,
},
@@ -212,7 +212,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
@@ -246,7 +246,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
@@ -369,15 +369,15 @@ func TestIsReqAuthenticated(t *testing.T) {
s3Error APIErrorCode
}{
// When request is unsigned, access denied is returned.
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
// Empty Content-Md5 header.
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// Short Content-Md5 header.
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// When request is properly signed, but has bad Content-MD5 header.
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
// When request is properly signed, error is none.
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
ctx := context.Background()
@@ -413,15 +413,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
Request *http.Request
ErrCode APIErrorCode
}{
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}
@@ -461,7 +461,7 @@ func TestValidateAdminSignature(t *testing.T) {
}
for i, testCase := range testCases {
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}

View File

@@ -29,10 +29,8 @@ import (
// path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object
type healTask struct {
bucket string
object string
versionID string
opts madmin.HealOpts
path string
opts madmin.HealOpts
// Healing response will be sent here
responseCh chan healResult
}
@@ -54,39 +52,15 @@ func (h *healRoutine) queueHealTask(task healTask) {
h.tasks <- task
}
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
// No need to wait run at full speed.
if maxIO <= 0 {
return
}
// At max 10 attempts to wait with 100 millisecond interval before proceeding
waitTick := 100 * time.Millisecond
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
maxIOFn := func() int {
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalHTTPTrace.NumSubscribers())
}
tmpMaxWait := maxWait
func waitForLowHTTPReq(tolerance int32) {
if httpServer := newHTTPServerFn(); httpServer != nil {
// Wait at max 10 minute for an inprogress request before proceeding to heal
waitCount := 600
// Any requests in progress, delay the heal.
for httpServer.GetRequestCount() >= maxIOFn() {
if tmpMaxWait > 0 {
if tmpMaxWait < waitTick {
time.Sleep(tmpMaxWait)
} else {
time.Sleep(waitTick)
}
tmpMaxWait = tmpMaxWait - waitTick
}
if tmpMaxWait <= 0 {
if intDataUpdateTracker.debug {
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
}
break
}
for (httpServer.GetRequestCount() >= tolerance) &&
waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
}
@@ -97,25 +71,27 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
select {
case task, ok := <-h.tasks:
if !ok {
return
break
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
var res madmin.HealResultItem
var err error
switch task.bucket {
case nopHeal:
continue
case SlashSeparator:
bucket, object := path2BucketObject(task.path)
switch {
case bucket == "" && object == "":
res, err = healDiskFormat(ctx, objAPI, task.opts)
default:
if task.object == "" {
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
} else {
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
}
case bucket != "" && object == "":
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
case bucket != "" && object != "":
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
}
if task.path != slashSeparator && task.path != nopHeal {
ObjectPathUpdated(task.path)
}
task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh:
return
case <-ctx.Done():
@@ -124,7 +100,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
}
}
func newHealRoutine() *healRoutine {
func initHealRoutine() *healRoutine {
return &healRoutine{
tasks: make(chan healTask),
doneCh: make(chan struct{}),
@@ -132,6 +108,24 @@ func newHealRoutine() *healRoutine {
}
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = initHealRoutine()
go globalBackgroundHealRoutine.run(ctx, objAPI)
// Launch the background healer sequence to track
// background healing operations, ignore errors
// errors are handled into offline disks already.
info, _ := objAPI.StorageInfo(ctx, false)
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
nh := newBgHealSequence(numDisks)
globalBackgroundHealState.LaunchNewHealSequence(nh)
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
go startBackgroundHealing(ctx, objAPI)
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
@@ -143,5 +137,16 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
return madmin.HealResultItem{}, err
}
// Healing succeeded notify the peers to reload format and re-initialize disks.
// We will not notify peers if healing is not required.
if err == nil {
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
return res, nil
}

View File

@@ -17,430 +17,110 @@
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/madmin"
)
const (
defaultMonitorNewDiskInterval = time.Second * 10
healingTrackerFilename = ".healing.bin"
)
const defaultMonitorNewDiskInterval = time.Minute * 10
//go:generate msgp -file $GOFILE -unexported
// healingTracker is used to persist healing information during a heal.
type healingTracker struct {
disk StorageAPI `msg:"-"`
ID string
PoolIndex int
SetIndex int
DiskIndex int
Path string
Endpoint string
Started time.Time
LastUpdate time.Time
ObjectsHealed uint64
ObjectsFailed uint64
BytesDone uint64
BytesFailed uint64
// Last object scanned.
Bucket string `json:"-"`
Object string `json:"-"`
// Numbers when current bucket started healing,
// for resuming with correct numbers.
ResumeObjectsHealed uint64 `json:"-"`
ResumeObjectsFailed uint64 `json:"-"`
ResumeBytesDone uint64 `json:"-"`
ResumeBytesFailed uint64 `json:"-"`
// Filled on startup/restarts.
QueuedBuckets []string
// Filled during heal.
HealedBuckets []string
// Add future tracking capabilities
// Be sure that they are included in toHealingDisk
}
// loadHealingTracker will load the healing tracker from the supplied disk.
// The disk ID will be validated against the loaded one.
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
if disk == nil {
return nil, errors.New("loadHealingTracker: nil disk given")
}
diskID, err := disk.GetDiskID()
if err != nil {
return nil, err
}
b, err := disk.ReadAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
if err != nil {
return nil, err
}
var h healingTracker
_, err = h.UnmarshalMsg(b)
if err != nil {
return nil, err
}
if h.ID != diskID && h.ID != "" {
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
}
h.disk = disk
h.ID = diskID
return &h, nil
}
// newHealingTracker will create a new healing tracker for the disk.
func newHealingTracker(disk StorageAPI) *healingTracker {
diskID, _ := disk.GetDiskID()
h := healingTracker{
disk: disk,
ID: diskID,
Path: disk.String(),
Endpoint: disk.Endpoint().String(),
Started: time.Now().UTC(),
}
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
return &h
}
// update will update the tracker on the disk.
// If the tracker has been deleted an error is returned.
func (h *healingTracker) update(ctx context.Context) error {
if h.disk.Healing() == nil {
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
}
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
h.ID, _ = h.disk.GetDiskID()
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
}
return h.save(ctx)
}
// save will unconditionally save the tracker and will be created if not existing.
func (h *healingTracker) save(ctx context.Context) error {
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
// Attempt to get location.
if api := newObjectLayerFn(); api != nil {
if ep, ok := api.(*erasureServerPools); ok {
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
}
}
}
h.LastUpdate = time.Now().UTC()
htrackerBytes, err := h.MarshalMsg(nil)
if err != nil {
return err
}
globalBackgroundHealState.updateHealStatus(h)
return h.disk.WriteAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
htrackerBytes)
}
// delete the tracker on disk.
func (h *healingTracker) delete(ctx context.Context) error {
return h.disk.Delete(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
false)
}
func (h *healingTracker) isHealed(bucket string) bool {
for _, v := range h.HealedBuckets {
if v == bucket {
return true
}
}
return false
}
// resume will reset progress to the numbers at the start of the bucket.
func (h *healingTracker) resume() {
h.ObjectsHealed = h.ResumeObjectsHealed
h.ObjectsFailed = h.ResumeObjectsFailed
h.BytesDone = h.ResumeBytesDone
h.BytesFailed = h.ResumeBytesFailed
}
// bucketDone should be called when a bucket is done healing.
// Adds the bucket to the list of healed buckets and updates resume numbers.
func (h *healingTracker) bucketDone(bucket string) {
h.ResumeObjectsHealed = h.ObjectsHealed
h.ResumeObjectsFailed = h.ObjectsFailed
h.ResumeBytesDone = h.BytesDone
h.ResumeBytesFailed = h.BytesFailed
h.HealedBuckets = append(h.HealedBuckets, bucket)
for i, b := range h.QueuedBuckets {
if b == bucket {
// Delete...
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
}
}
}
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
// Order is preserved.
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
s := set.CreateStringSet(h.HealedBuckets...)
h.QueuedBuckets = make([]string, 0, len(buckets))
for _, b := range buckets {
if !s.Contains(b.Name) {
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
}
}
}
func (h *healingTracker) printTo(writer io.Writer) {
b, err := json.MarshalIndent(h, "", " ")
if err != nil {
writer.Write([]byte(err.Error()))
}
writer.Write(b)
}
// toHealingDisk converts the information to madmin.HealingDisk
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
return madmin.HealingDisk{
ID: h.ID,
Endpoint: h.Endpoint,
PoolIndex: h.PoolIndex,
SetIndex: h.SetIndex,
DiskIndex: h.DiskIndex,
Path: h.Path,
Started: h.Started.UTC(),
LastUpdate: h.LastUpdate.UTC(),
ObjectsHealed: h.ObjectsHealed,
ObjectsFailed: h.ObjectsFailed,
BytesDone: h.BytesDone,
BytesFailed: h.BytesFailed,
Bucket: h.Bucket,
Object: h.Object,
QueuedBuckets: h.QueuedBuckets,
HealedBuckets: h.HealedBuckets,
}
}
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*erasureServerPools)
if !ok {
return
}
initBackgroundHealing(ctx, objAPI) // start quick background healing
bgSeq := mustGetHealSequence(ctx)
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
drivesToHeal, defaultMonitorNewDiskInterval))
// Heal any disk format and metadata early, if possible.
// Start with format healing
if err := bgSeq.healDiskFormat(); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(bgSeq.ctx, err)
}
}
}
if err := bgSeq.healDiskMeta(objAPI); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(bgSeq.ctx, err)
}
}
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
}
func getLocalDisksToHeal() (disksToHeal Endpoints) {
for _, ep := range globalEndpoints {
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
disk, _, err := connectEndpoint(endpoint)
if errors.Is(err, errUnformattedDisk) {
disksToHeal = append(disksToHeal, endpoint)
} else if err == nil && disk != nil && disk.Healing() != nil {
disksToHeal = append(disksToHeal, disk.Endpoint())
}
}
}
return disksToHeal
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = newHealRoutine()
go globalBackgroundHealRoutine.run(ctx, objAPI)
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
go monitorLocalDisksAndHeal(ctx, objAPI)
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
// Perform automatic disk healing when a disk is replaced locally.
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
defer diskCheckTimer.Stop()
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*xlZones)
if !ok {
return
}
var bgSeq *healSequence
var found bool
for {
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if found {
break
}
time.Sleep(time.Second)
}
// Perform automatic disk healing when a disk is replaced locally.
for {
select {
case <-ctx.Done():
return
case <-diskCheckTimer.C:
// Reset to next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
if len(healDisks) > 0 {
// Reformat disks
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{bucket: nopHeal}
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
len(healDisks)))
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
for i := range z.serverPools {
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
}
}
if serverDebugLog {
console.Debugf(color.Green("healDisk:")+" disk check timer fired, attempting to heal %d drives\n", len(healDisks))
}
// heal only if new disks found.
for _, endpoint := range healDisks {
disk, format, err := connectEndpoint(endpoint)
if err != nil {
printEndpointError(endpoint, err, true)
continue
}
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
if poolIdx < 0 {
continue
}
// Calculate the set index where the current endpoint belongs
z.serverPools[poolIdx].erasureDisksMu.RLock()
// Protect reading reference format.
setIndex, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
if err != nil {
printEndpointError(endpoint, err, false)
continue
}
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
}
buckets, _ := z.ListBuckets(ctx)
buckets = append(buckets, BucketInfo{
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
})
// Buckets data are dispersed in multiple zones/sets, make
// sure to heal all bucket metadata configuration.
buckets = append(buckets, []BucketInfo{
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
}...)
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
if a != b {
return a
}
return buckets[i].Created.After(buckets[j].Created)
})
// TODO(klauspost): This will block until all heals are done,
// in the future this should be able to start healing other sets at once.
var wg sync.WaitGroup
for i, setMap := range erasureSetInPoolDisksToHeal {
i := i
for setIndex, disks := range setMap {
if len(disks) == 0 {
case <-time.After(defaultMonitorNewDiskInterval):
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
var healNewDisks bool
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
wg.Add(1)
go func(setIndex int, disks []StorageAPI) {
defer wg.Done()
for _, disk := range disks {
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
}
if len(localDisksToHeal) == 0 {
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
healNewDisks = true
}
// So someone changed the drives underneath, healing tracker missing.
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
tracker = newHealingTracker(disk)
}
// Reformat disks only if needed.
if !healNewDisks {
continue
}
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
tracker.setQueuedBuckets(buckets)
if err := tracker.save(ctx); err != nil {
logger.LogIf(ctx, err)
// Unable to write healing tracker, permission denied or some
// other unexpected error occurred. Proceed to look for new
// disks to be healed again, we cannot proceed further.
return
}
// Reformat disks
bgSeq.sourceCh <- healSource{path: SlashSeparator}
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{path: nopHeal}
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
var buf bytes.Buffer
tracker.printTo(&buf)
logger.Info("Summary:\n%s", buf.String())
logger.LogIf(ctx, tracker.delete(ctx))
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
}
}(setIndex, disks)
erasureSetToHeal = append(erasureSetToHeal, setIndex)
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
if err != nil {
logger.LogIf(ctx, err)
}
}
}
wg.Wait()
}
}
}

View File

@@ -1,664 +0,0 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsHealed":
z.ObjectsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "ObjectsFailed":
z.ObjectsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "BytesDone":
z.BytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeObjectsHealed":
z.ResumeObjectsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
case "ResumeObjectsFailed":
z.ResumeObjectsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 20
// write "ID"
err = en.Append(0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "PoolIndex"
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "SetIndex"
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "DiskIndex"
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.DiskIndex)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
// write "Path"
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.Path)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
// write "Endpoint"
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "Started"
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "LastUpdate"
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "ObjectsHealed"
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsHealed)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
// write "ObjectsFailed"
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "BytesDone"
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.BytesDone)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
// write "BytesFailed"
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Object"
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "ResumeObjectsHealed"
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeObjectsHealed)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
// write "ResumeObjectsFailed"
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
// write "ResumeBytesDone"
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesDone)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
// write "ResumeBytesFailed"
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
// write "QueuedBuckets"
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
for za0001 := range z.QueuedBuckets {
err = en.WriteString(z.QueuedBuckets[za0001])
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
// write "HealedBuckets"
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
for za0002 := range z.HealedBuckets {
err = en.WriteString(z.HealedBuckets[za0002])
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 20
// string "ID"
o = append(o, 0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "PoolIndex"
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "SetIndex"
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "DiskIndex"
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.DiskIndex)
// string "Path"
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.Path)
// string "Endpoint"
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "Started"
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "LastUpdate"
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "ObjectsHealed"
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsHealed)
// string "ObjectsFailed"
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsFailed)
// string "BytesDone"
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.BytesDone)
// string "BytesFailed"
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesFailed)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Object"
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "ResumeObjectsHealed"
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeObjectsHealed)
// string "ResumeObjectsFailed"
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeObjectsFailed)
// string "ResumeBytesDone"
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.ResumeBytesDone)
// string "ResumeBytesFailed"
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
// string "QueuedBuckets"
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
for za0001 := range z.QueuedBuckets {
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
}
// string "HealedBuckets"
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
for za0002 := range z.HealedBuckets {
o = msgp.AppendString(o, z.HealedBuckets[za0002])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsHealed":
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "ObjectsFailed":
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "BytesDone":
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeObjectsHealed":
z.ResumeObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
case "ResumeObjectsFailed":
z.ResumeObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *healingTracker) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
for za0001 := range z.QueuedBuckets {
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
}
s += 14 + msgp.ArrayHeaderSize
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
return
}

View File

@@ -1,123 +0,0 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
v := healingTracker{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
v := healingTracker{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsghealingTracker(b *testing.B) {
v := healingTracker{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
v := healingTracker{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodehealingTracker(t *testing.T) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
}
vn := healingTracker{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -19,6 +19,7 @@ package cmd
import (
"bytes"
"context"
"io/ioutil"
"math"
"math/rand"
"strconv"
@@ -34,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -54,7 +55,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -75,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -113,7 +114,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -126,9 +127,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
b.StopTimer()
}
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@@ -142,9 +143,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
runPutObjectPartBenchmark(b, objLayer, objSize)
}
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@@ -158,9 +159,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
runPutObjectBenchmark(b, objLayer, objSize)
}
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@@ -174,6 +175,56 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
textData := generateBytesData(objSize)
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// randomly picks a character and returns its equivalent byte array.
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -189,13 +240,45 @@ func generateBytesData(size int) []byte {
return bytes.Repeat(getRandomByte(), size)
}
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
@@ -218,7 +301,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -232,3 +315,58 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
i++
if i == 10 {
i = 0
}
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}

View File

@@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"hash"
@@ -37,7 +36,7 @@ func (err *errHashMismatch) Error() string {
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow io.WriteCloser
iow *io.PipeWriter
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
@@ -71,10 +70,9 @@ func (b *streamingBitrotWriter) Close() error {
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
go func() {
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
@@ -82,7 +80,8 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
err := disk.CreateFile(volume, filePath, totalFileSize, r)
r.CloseWithError(err)
close(bw.canClose)
}()
return bw
@@ -91,8 +90,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
type streamingBitrotReader struct {
disk StorageAPI
data []byte
rc io.Reader
rc io.ReadCloser
volume string
filePath string
tillOffset int64
@@ -106,10 +104,7 @@ func (b *streamingBitrotReader) Close() error {
if b.rc == nil {
return nil
}
if closer, ok := b.rc.(io.Closer); ok {
return closer.Close()
}
return nil
return b.rc.Close()
}
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
@@ -123,16 +118,11 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
if len(b.data) == 0 {
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
} else {
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
}
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
return 0, err
}
}
if offset != b.currOffset {
// Can never happen unless there are programmer bugs
return 0, errUnexpected
@@ -149,20 +139,20 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
return 0, errFileCorrupt
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
logger.LogIf(GlobalContext, err)
return 0, err
}
b.currOffset += int64(len(buf))
return len(buf), nil
}
// Returns streaming bitrot reader implementation.
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk,
data,
nil,
volume,
filePath,

View File

@@ -17,8 +17,6 @@
package cmd
import (
"context"
"fmt"
"hash"
"io"
@@ -35,14 +33,14 @@ type wholeBitrotWriter struct {
}
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
logger.LogIf(GlobalContext, err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
logger.LogIf(GlobalContext, err)
return 0, err
}
return len(p), nil
@@ -70,13 +68,15 @@ type wholeBitrotReader struct {
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := GlobalContext
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
logger.LogIf(GlobalContext, errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)

View File

@@ -17,19 +17,38 @@
package cmd
import (
"crypto/sha256"
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b"
)
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
type BitrotAlgorithm uint
const (
// SHA256 represents the SHA-256 hash function
SHA256 BitrotAlgorithm = 1 + iota
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
HighwayHash256S
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512
)
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
const (
DefaultBitrotAlgorithm = HighwayHash256S
)
var bitrotAlgorithms = map[BitrotAlgorithm]string{
SHA256: "sha256",
BLAKE2b512: "blake2b",
@@ -96,16 +115,16 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return
}
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
}
func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
if algo == HighwayHash256S {
return newStreamingBitrotReader(disk, data, bucket, filePath, tillOffset, algo, shardSize)
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
}
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
}

View File

@@ -17,9 +17,9 @@
package cmd
import (
"context"
"io"
"io/ioutil"
"log"
"os"
"testing"
)
@@ -27,53 +27,53 @@ import (
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
defer os.RemoveAll(tmpDir)
volume := "testvol"
filePath := "testfile"
disk, err := newLocalXLStorage(tmpDir)
disk, err := newPosix(tmpDir, "")
if err != nil {
t.Fatal(err)
}
disk.MakeVol(context.Background(), volume)
disk.MakeVol(volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
t.Fatal(err)
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
t.Fatal(err)
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
t.Fatal(err)
log.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
t.Fatal(err)
log.Fatal(err)
}
}

Some files were not shown because too many files have changed in this diff Show More