Compare commits
255 Commits
RELEASE.20
...
release
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
404d2ebe3f | ||
|
|
46964eb764 | ||
|
|
bfab990c33 | ||
|
|
94018588fe | ||
|
|
8b76ba8d5d | ||
|
|
7eb7f65e48 | ||
|
|
c608c0688a | ||
|
|
41a9d1d778 | ||
|
|
e21e80841e | ||
|
|
98c792bbeb | ||
|
|
f687ba53bc | ||
|
|
e3da59c923 | ||
|
|
781b9b051c | ||
|
|
438becfde8 | ||
|
|
16ef338649 | ||
|
|
3242847ec0 | ||
|
|
cf87303094 | ||
|
|
90d8ec6310 | ||
|
|
b383522743 | ||
|
|
6d42036dd4 | ||
|
|
b4d8bcf644 | ||
|
|
d7f32ad649 | ||
|
|
906d68c356 | ||
|
|
749e9c5771 | ||
|
|
410e84d273 | ||
|
|
75741dbf4a | ||
|
|
fad7b27f15 | ||
|
|
79564656eb | ||
|
|
21cfc4aa49 | ||
|
|
e80239a661 | ||
|
|
6a2ed44095 | ||
|
|
8adfeb0d84 | ||
|
|
d23485e571 | ||
|
|
da70e6ddf6 | ||
|
|
922c7b57f5 | ||
|
|
726d80dbb7 | ||
|
|
23b03dadb8 | ||
|
|
7b3719c17b | ||
|
|
9a6487319a | ||
|
|
94ff624242 | ||
|
|
98ff91b484 | ||
|
|
4d86384dc7 | ||
|
|
27eb4ae3bc | ||
|
|
b5dcaaccb4 | ||
|
|
0843280dc3 | ||
|
|
61a1ea60c2 | ||
|
|
b92a220db1 | ||
|
|
be5910b87e | ||
|
|
51a8619a79 | ||
|
|
d46c3c07a8 | ||
|
|
14d89eaae4 | ||
|
|
32b088a2ff | ||
|
|
eed3b66d98 | ||
|
|
add3cd4e44 | ||
|
|
60b0f2324e | ||
|
|
0eb146e1b2 | ||
|
|
b379ca3bb0 | ||
|
|
e197800f90 | ||
|
|
980311fdfd | ||
|
|
771dea175c | ||
|
|
fa94682e83 | ||
|
|
6160188bf3 | ||
|
|
e5a1a2a974 | ||
|
|
642ba3f2d6 | ||
|
|
4d80de899a | ||
|
|
afbd3e41eb | ||
|
|
5e003549cc | ||
|
|
7fa3e4106b | ||
|
|
75db500e85 | ||
|
|
910097bbbc | ||
|
|
afd346417d | ||
|
|
feafccf007 | ||
|
|
9b54fcdf12 | ||
|
|
f92b7a5621 | ||
|
|
c25e75f0b5 | ||
|
|
3ffe520643 | ||
|
|
952b0f111d | ||
|
|
777344a594 | ||
|
|
9d118b372e | ||
|
|
878bc6c72b | ||
|
|
fdc2f69218 | ||
|
|
0d124095ea | ||
|
|
691035832a | ||
|
|
eac66e67ec | ||
|
|
57f3ed22d4 | ||
|
|
2f29719e6b | ||
|
|
209fe61dcc | ||
|
|
8ecffdb7a7 | ||
|
|
806df164b2 | ||
|
|
4ac9ed4248 | ||
|
|
3d8c512bba | ||
|
|
3ff5f55dcb | ||
|
|
097e5eba9f | ||
|
|
ba6930bb13 | ||
|
|
64662a49ff | ||
|
|
78e867e145 | ||
|
|
9ccc483df6 | ||
|
|
abce040088 | ||
|
|
558762bdf6 | ||
|
|
d971061305 | ||
|
|
509bcc01ad | ||
|
|
7ea95fcec8 | ||
|
|
79b0d056a2 | ||
|
|
ec547c0fa8 | ||
|
|
bcf9825082 | ||
|
|
651487507a | ||
|
|
124816f6a6 | ||
|
|
fa9cf1251b | ||
|
|
97e7a902d0 | ||
|
|
d73d756a80 | ||
|
|
7488c77e7c | ||
|
|
786585009e | ||
|
|
7be7109471 | ||
|
|
464fa08f2e | ||
|
|
c3217bd6eb | ||
|
|
f14cc6c943 | ||
|
|
2c198ae7b6 | ||
|
|
690434514d | ||
|
|
039f59b552 | ||
|
|
c6a120df0e | ||
|
|
cd9e30c0f4 | ||
|
|
f96d4cf7d3 | ||
|
|
879599b0cf | ||
|
|
b1bb3f7016 | ||
|
|
e8d8dfa3ae | ||
|
|
bbd1244a88 | ||
|
|
10bdb78699 | ||
|
|
289b22d911 | ||
|
|
0b9c17443e | ||
|
|
23f7ab40b3 | ||
|
|
e3f8830ab7 | ||
|
|
2f4af09c01 | ||
|
|
37960cbc2f | ||
|
|
c67d1bf120 | ||
|
|
c5b3a675fa | ||
|
|
b690304eed | ||
|
|
9171d6ef65 | ||
|
|
6386b45c08 | ||
|
|
1f659204a2 | ||
|
|
f9f6fd0421 | ||
|
|
85620dfe93 | ||
|
|
a8e4f64ff3 | ||
|
|
ca5c6e3160 | ||
|
|
b23659927c | ||
|
|
b912e9ab41 | ||
|
|
c1a49be639 | ||
|
|
03172b89e2 | ||
|
|
b517c791e9 | ||
|
|
14aef52004 | ||
|
|
67b20125e4 | ||
|
|
d4b822d697 | ||
|
|
1b63291ee2 | ||
|
|
aa7244a9a4 | ||
|
|
2a79ea0332 | ||
|
|
6e5c61d917 | ||
|
|
02e7de6367 | ||
|
|
cec12f4c76 | ||
|
|
da676ac298 | ||
|
|
18ec933085 | ||
|
|
c31d2c3fdc | ||
|
|
8778828a03 | ||
|
|
48b212dd8e | ||
|
|
be7de911c4 | ||
|
|
8cad407e0b | ||
|
|
85d2187c20 | ||
|
|
98d3f94996 | ||
|
|
173284903b | ||
|
|
c70240b893 | ||
|
|
8ba2136e06 | ||
|
|
2dce5d9442 | ||
|
|
f28b063091 | ||
|
|
90abea5b7a | ||
|
|
c5b2a8441b | ||
|
|
0f5ca83418 | ||
|
|
8a6b13c239 | ||
|
|
8e8a792d9d | ||
|
|
95e0acbb26 | ||
|
|
55037e6e54 | ||
|
|
289e1d8b2a | ||
|
|
e07918abe3 | ||
|
|
ffea6fcf09 | ||
|
|
11b2220696 | ||
|
|
aa8450a2a1 | ||
|
|
87cce344f6 | ||
|
|
7d4a2d2b68 | ||
|
|
cfc8b92dff | ||
|
|
c4e12dc846 | ||
|
|
a94a9c37fa | ||
|
|
79b6a43467 | ||
|
|
928de04f7a | ||
|
|
93fd248b52 | ||
|
|
2a7b123895 | ||
|
|
b3c56b53fb | ||
|
|
0ef3e359d8 | ||
|
|
f24d8127ab | ||
|
|
7875d472bc | ||
|
|
711adb9652 | ||
|
|
e6b4ea7618 | ||
|
|
466e95bb59 | ||
|
|
881f98e511 | ||
|
|
cbf4bb62e0 | ||
|
|
682482459d | ||
|
|
b87fae0049 | ||
|
|
b8b44c879f | ||
|
|
f53d1de87f | ||
|
|
5a18d437ce | ||
|
|
93eb549a83 | ||
|
|
fe3c39b583 | ||
|
|
84d400487f | ||
|
|
3afa499885 | ||
|
|
13d015cf93 | ||
|
|
876b79b8d8 | ||
|
|
3d74efa6b1 | ||
|
|
68d299e719 | ||
|
|
f9c5636c2d | ||
|
|
9b10118d34 | ||
|
|
0e3211f4ad | ||
|
|
2e4d9124ad | ||
|
|
6fef4c21b9 | ||
|
|
8e1bbd989a | ||
|
|
152d7cd95b | ||
|
|
74080bf108 | ||
|
|
647a209c73 | ||
|
|
0d057c777a | ||
|
|
275f7a63e8 | ||
|
|
97fe57bba9 | ||
|
|
88c1bb0720 | ||
|
|
1fdafaf72f | ||
|
|
5fe4bb6b36 | ||
|
|
99b733d44c | ||
|
|
b4ac05523b | ||
|
|
c7eacba41c | ||
|
|
1887c25279 | ||
|
|
c9b0f595b9 | ||
|
|
8bb580abfc | ||
|
|
af9cb5f5f2 | ||
|
|
9497dfd804 | ||
|
|
da55a05587 | ||
|
|
3fc4d6f620 | ||
|
|
67a8f37df0 | ||
|
|
075c429021 | ||
|
|
df0c678167 | ||
|
|
f108873c48 | ||
|
|
871b450dbd | ||
|
|
f71e192343 | ||
|
|
b3f81e75f6 | ||
|
|
a71e0483c9 | ||
|
|
f2d49ec21a | ||
|
|
4a9d9c8585 | ||
|
|
c885777ac6 | ||
|
|
fe3aca70c3 | ||
|
|
c4848f9b4f | ||
|
|
838d4dafbd | ||
|
|
8c663f93f7 | ||
|
|
b4cb7edf85 |
@@ -1,2 +1,9 @@
|
||||
.git
|
||||
.github
|
||||
docs
|
||||
default.etcd
|
||||
browser
|
||||
*.gz
|
||||
*.tar.gz
|
||||
*.bzip2
|
||||
*.zip
|
||||
|
||||
14
.github/workflows/go.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.16.x]
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
@@ -21,6 +21,14 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'macos-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
make
|
||||
make test-race
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -42,8 +50,6 @@ jobs:
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
|
||||
@@ -17,6 +17,8 @@ linters:
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
- gomodguard
|
||||
- gofmt
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
ENV MINIO_UPDATE=off \
|
||||
MINIO_ACCESS_KEY_FILE=access_key \
|
||||
@@ -17,10 +15,9 @@ ENV MINIO_UPDATE=off \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
RUN microdnf update --nodocs
|
||||
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
|
||||
RUN microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2021-01-30T00-20-58Z" \
|
||||
release="RELEASE.2021-01-30T00-20-58Z" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
@@ -28,9 +30,9 @@ RUN \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
|
||||
28
Makefile
@@ -17,38 +17,28 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && go get github.com/quasilyte/go-ruleguard/cmd/ruleguard@v0.2.1)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
verifiers: getdeps lint check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
@@ -71,18 +61,18 @@ build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
|
||||
TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)"
|
||||
hotfix: install
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install
|
||||
|
||||
docker-hotfix: hotfix checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
docker: checks
|
||||
docker: build checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
|
||||
100
README.md
@@ -5,32 +5,32 @@
|
||||
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Docker Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -45,12 +45,12 @@ Run the following command to run the bleeding-edge image of MinIO on a Docker co
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -61,9 +61,9 @@ see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
@@ -82,12 +82,12 @@ brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
@@ -100,12 +100,12 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -130,18 +130,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
@@ -158,17 +158,17 @@ Use the following command to run a standalone MinIO server on the Windows host.
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# FreeBSD
|
||||
@@ -184,27 +184,27 @@ service minio start
|
||||
|
||||
# Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
1
browser/.gitignore
vendored
@@ -17,4 +17,3 @@ release
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
production
|
||||
|
||||
@@ -17,24 +17,13 @@ nvm install stable
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
|
||||
```sh
|
||||
go get github.com/go-bindata/go-bindata/go-bindata
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
```
|
||||
|
||||
## Generating Assets
|
||||
|
||||
### Generate ui-assets.go
|
||||
|
||||
```sh
|
||||
npm run release
|
||||
```
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
This generates `production` in the current directory.
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
@@ -24,9 +24,6 @@ jest.mock("jwt-decode")
|
||||
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
GenerateAuth: jest.fn(() => {
|
||||
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
|
||||
}),
|
||||
SetAuth: jest.fn(
|
||||
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
|
||||
if (
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
} from "../constants"
|
||||
import QRCode from "react-qr-code";
|
||||
|
||||
export class ShareObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -89,6 +90,7 @@ export class ShareObjectModal extends React.Component {
|
||||
<ModalHeader>Share Object</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group copy-text">
|
||||
<QRCode value={url} size={128}/>
|
||||
<label>Shareable Link</label>
|
||||
<input
|
||||
type="text"
|
||||
|
||||
@@ -75,6 +75,11 @@
|
||||
border-color: darken(@input-border, 5%);
|
||||
}
|
||||
}
|
||||
|
||||
svg {
|
||||
display: block;
|
||||
margin: 0 auto 5px;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
@@ -150,4 +155,4 @@
|
||||
100% {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
11
browser/assets.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package browser
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed production/*
|
||||
var fs embed.FS
|
||||
|
||||
// GetStaticAssets returns assets
|
||||
func GetStaticAssets() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -14,19 +14,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var moment = require('moment')
|
||||
var async = require('async')
|
||||
var exec = require('child_process').exec
|
||||
var fs = require('fs')
|
||||
|
||||
var isProduction = process.env.NODE_ENV == 'production' ? true : false
|
||||
var assetsFileName = ''
|
||||
var commitId = ''
|
||||
var date = moment.utc()
|
||||
var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
|
||||
var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z'
|
||||
var buildType = 'DEVELOPMENT'
|
||||
if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD
|
||||
|
||||
rmDir = function(dirPath) {
|
||||
try { var files = fs.readdirSync(dirPath); }
|
||||
@@ -53,74 +45,6 @@ async.waterfall([
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (isProduction) {
|
||||
fs.renameSync('production/index_bundle.js',
|
||||
'production/index_bundle-' + releaseTag + '.js')
|
||||
} else {
|
||||
fs.renameSync('dev/index_bundle.js',
|
||||
'dev/index_bundle-' + releaseTag + '.js')
|
||||
}
|
||||
var cmd = 'git log --format="%H" -n1'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (!stdout) throw new Error('commitId is empty')
|
||||
commitId = stdout.replace('\n', '')
|
||||
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
|
||||
assetsFileName = 'ui-assets.go';
|
||||
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
|
||||
if (!isProduction) {
|
||||
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
|
||||
}
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
var cmd = 'gofmt -s -w -l bindata_assetfs.go'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
fs.renameSync('bindata_assetfs.go', assetsFileName)
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' +
|
||||
releaseTag + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"')
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
var contents;
|
||||
if (isProduction) {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_productionIndexHtml/g, '_productionIndexHTML')
|
||||
.replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes')
|
||||
.replace(/productionIndexHtml/g, 'productionIndexHTML')
|
||||
.replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs')
|
||||
.replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes')
|
||||
.replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs')
|
||||
.replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs')
|
||||
.replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes')
|
||||
.replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs');
|
||||
} else {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_devIndexHtml/g, '_devIndexHTML')
|
||||
.replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes')
|
||||
.replace(/devIndexHtml/g, 'devIndexHTML')
|
||||
.replace(/_devIndex_bundleJs/g, '_devIndexBundleJs')
|
||||
.replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes')
|
||||
.replace(/devIndex_bundleJs/g, 'devIndexBundleJs')
|
||||
.replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs')
|
||||
.replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes')
|
||||
.replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs');
|
||||
}
|
||||
contents = contents.replace(/MINIO_UI_VERSION/g, version)
|
||||
contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js')
|
||||
|
||||
fs.writeFileSync(assetsFileName, contents, 'utf8')
|
||||
console.log('UI assets file :', assetsFileName)
|
||||
cb()
|
||||
}
|
||||
], function(err) {
|
||||
if (err) return console.log(err)
|
||||
})
|
||||
|
||||
32008
browser/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"test": "jest",
|
||||
"dev": "NODE_ENV=dev webpack-dev-server --devtool cheap-module-eval-source-map --progress --colors --hot --content-base dev",
|
||||
"build": "NODE_ENV=dev node build.js",
|
||||
"release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js",
|
||||
"release": "NODE_ENV=production node build.js",
|
||||
"format": "esformatter -i 'app/**/*.js'"
|
||||
},
|
||||
"jest": {
|
||||
@@ -84,6 +84,7 @@
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-qr-code": "^1.1.1",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
|
||||
BIN
browser/production/chrome.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
BIN
browser/production/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
browser/production/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
browser/production/favicon-96x96.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
browser/production/firefox.png
Normal file
|
After Width: | Height: | Size: 4.7 KiB |
59
browser/production/index.html
Normal file
@@ -0,0 +1,59 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>MinIO Browser</title>
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/minio/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/minio/favicon-96x96.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/minio/favicon-16x16.png">
|
||||
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
|
||||
<!--[if lt IE 11]>
|
||||
<div class="ie-warning">
|
||||
<div class="iw-inner">
|
||||
<i class="iwi-icon fas fa-exclamation-triangle"></i>
|
||||
|
||||
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<a href="http://www.google.com/chrome/">
|
||||
<img src="chrome.png" alt="">
|
||||
<div>Chrome</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.mozilla.org/en-US/firefox/new/">
|
||||
<img src="firefox.png" alt="">
|
||||
<div>Firefox</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.apple.com/safari/">
|
||||
<img src="safari.png" alt="">
|
||||
<div>Safari</div>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="iwi-skip">Skip & Continue</div>
|
||||
</div>
|
||||
</div>
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
67
browser/production/index_bundle.js
Normal file
98
browser/production/loader.css
Normal file
@@ -0,0 +1,98 @@
|
||||
.page-load {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background: #002a37;
|
||||
z-index: 100;
|
||||
transition: opacity 200ms;
|
||||
-webkit-transition: opacity 200ms;
|
||||
}
|
||||
|
||||
.pl-0{
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.pl-1 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.pl-inner {
|
||||
position: absolute;
|
||||
width: 100px;
|
||||
height: 100px;
|
||||
left: 50%;
|
||||
margin-left: -50px;
|
||||
top: 50%;
|
||||
margin-top: -50px;
|
||||
text-align: center;
|
||||
-webkit-animation: fade-in 500ms;
|
||||
animation: fade-in 500ms;
|
||||
-webkit-animation-fill-mode: both;
|
||||
animation-fill-mode: both;
|
||||
animation-delay: 350ms;
|
||||
-webkit-animation-delay: 350ms;
|
||||
-webkit-backface-visibility: visible;
|
||||
backface-visibility: visible;
|
||||
}
|
||||
|
||||
.pl-inner:before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
left: 0;
|
||||
top: 0;
|
||||
display: block;
|
||||
-webkit-animation: spin 1000ms infinite linear;
|
||||
animation: spin 1000ms infinite linear;
|
||||
border: 1px solid rgba(255, 255, 255, 0.2);;
|
||||
border-left-color: #fff;
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
.pl-inner > img {
|
||||
width: 30px;
|
||||
margin-top: 21px;
|
||||
}
|
||||
|
||||
@-webkit-keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@-webkit-keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
12
browser/production/logo.svg
Normal file
@@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="93px" height="187px" viewBox="0 0 93 187" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
|
||||
<title>logo</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs></defs>
|
||||
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="logo" transform="translate(0.187500, -0.683594)" fill="#FFFFFF" fill-rule="nonzero">
|
||||
<path d="M91.49,46.551 C86.7827023,38.7699609 82.062696,30.9966172 77.33,23.231 C74.87,19.231 72.33,15.231 69.88,11.231 C69.57,10.731 69.18,10.291 68.88,9.831 C64.35,2.931 55.44,-1.679 46.73,2.701 C42.9729806,4.51194908 40.0995718,7.75449451 38.7536428,11.7020516 C37.4077139,15.6496086 37.701799,19.9721186 39.57,23.701 C41.08,26.641 43.57,29.121 45.91,31.581 C53.03,39.141 60.38,46.491 67.45,54.111 C72.4175495,59.4492221 74.4526451,66.8835066 72.8965704,74.0075359 C71.3404956,81.1315653 66.390952,87.0402215 59.65,89.821 C59.4938176,89.83842 59.3361824,89.83842 59.18,89.821 L59.18,54.591 C46.6388051,61.0478363 35.3944735,69.759905 26.01,80.291 C11.32,96.671 2.64,117.141 0.01,132.071 L23.96,119.821 C31.96,115.771 39.86,111.821 48.14,107.581 L48.14,175.921 L59.14,187.131 L59.14,101.831 C59.14,101.831 59.39,101.711 60.22,101.261 C63.5480598,99.6738911 66.7772674,97.8873078 69.89,95.911 C77.7130888,90.4306687 82.7479457,81.8029342 83.6709542,72.295947 C84.5939627,62.7889599 81.3127806,53.3538429 74.69,46.471 C66.49,37.891 58.24,29.351 50.05,20.761 C47.67,18.261 47.72,15.101 50.05,12.881 C52.38,10.661 55.56,10.881 57.96,13.331 L61.38,16.781 C64.1,19.681 66.79,22.611 69.53,25.481 C76.4547149,32.7389629 83.3947303,39.9823123 90.35,47.211 C90.7,47.571 91.12,47.871 91.5,48.211 L91.93,47.951 C91.8351945,47.4695902 91.6876376,47.0000911 91.49,46.551 Z M48.11,94.931 C47.9883217,95.5022568 47.6230065,95.9917791 47.11,96.271 C42.72,98.601 38.29,100.871 33.87,103.141 L17.76,111.401 C24.771203,96.7435071 35.1132853,83.9289138 47.96,73.981 C48.08,74.221 48.16,74.301 48.16,74.381 C48.15,81.231 48.17,88.081 48.11,94.931 Z" id="Shape"></path>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.2 KiB |
BIN
browser/production/safari.png
Normal file
|
After Width: | Height: | Size: 4.9 KiB |
@@ -21,7 +21,7 @@ _init() {
|
||||
|
||||
## Minimum required versions for build dependencies
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.13"
|
||||
GO_VERSION="1.16"
|
||||
OSX_VERSION="10.8"
|
||||
KNAME=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
||||
@@ -28,6 +28,8 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -180,7 +179,7 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -244,7 +243,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -172,7 +172,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
|
||||
@@ -19,12 +19,15 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -66,7 +69,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
ok, err := globalIAMSys.IsTempUser(accessKey)
|
||||
ok, _, err := globalIAMSys.IsTempUser(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -155,6 +158,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -395,6 +399,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
@@ -405,6 +410,19 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true, // check if changing password is explicitly denied.
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -483,7 +501,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -663,6 +681,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -675,6 +694,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -688,12 +708,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
@@ -701,12 +715,47 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var buckets []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
dns.ErrNoEntriesFound,
|
||||
dns.ErrDomainMissing) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
for _, dnsRecords := range dnsBuckets {
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: dnsRecords[0].Key,
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
accountName := cred.AccessKey
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should not happen!")
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -963,7 +1012,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
ok, _, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -259,9 +260,11 @@ type ServerHTTPAPIStats struct {
|
||||
// ServerHTTPStats holds all type of http operations performed to/from the server
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
}
|
||||
|
||||
// ServerInfoData holds storage, connections and other
|
||||
@@ -296,7 +299,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx, nil)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
@@ -860,16 +863,14 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// getAggregatedBackgroundHealState returns the heal state of disks.
|
||||
// If no ObjectLayer is provided no set status is returned.
|
||||
func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, localHealState)
|
||||
bgHealStates, ok := getBackgroundHealStatus(ctx, o)
|
||||
if !ok {
|
||||
return bgHealStates, errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
@@ -884,33 +885,10 @@ func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState,
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
bgHealStates.Merge(peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
HealDisks: bgHealStates[0].HealDisks,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatedHealStateResult, nil
|
||||
return bgHealStates, nil
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -929,7 +907,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1347,8 +1325,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
ctx, err = nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
if err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
@@ -1491,30 +1471,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
reportCh := make(chan bandwidth.Report)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
@@ -1550,13 +1533,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
if err != nil {
|
||||
ldap.Status = "offline"
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
ldap.Status = "Not Configured"
|
||||
} else {
|
||||
// Close ldap connection to avoid leaks.
|
||||
ldapConn.Close()
|
||||
ldap.Status = "online"
|
||||
ldap.Status = string(madmin.ItemOnline)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1565,12 +1548,14 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
local := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
servers = append(servers, local)
|
||||
|
||||
assignPoolNumbers(servers)
|
||||
|
||||
var backend interface{}
|
||||
mode := madmin.ObjectLayerInitializing
|
||||
mode := madmin.ItemInitializing
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
@@ -1578,18 +1563,23 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI != nil {
|
||||
mode = madmin.ObjectLayerOnline
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
} else {
|
||||
buckets = madmin.Buckets{Error: err.Error()}
|
||||
objects = madmin.Objects{Error: err.Error()}
|
||||
usage = madmin.Usage{Error: err.Error()}
|
||||
}
|
||||
|
||||
// Fetching the backend information
|
||||
backendInfo := objectAPI.BackendInfo()
|
||||
if backendInfo.Type == BackendType(madmin.Erasure) {
|
||||
if backendInfo.Type == madmin.Erasure {
|
||||
// Calculate the number of online/offline disks of all nodes
|
||||
var allDisks []madmin.Disk
|
||||
for _, s := range servers {
|
||||
@@ -1621,7 +1611,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
infoMsg := madmin.InfoMessage{
|
||||
Mode: mode,
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
@@ -1646,6 +1636,22 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
for i := range servers {
|
||||
for idx, ge := range globalEndpoints {
|
||||
for _, endpoint := range ge.Endpoints {
|
||||
if servers[i].Endpoint == endpoint.Host {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
} else if host, err := xnet.ParseHost(servers[i].Endpoint); err == nil {
|
||||
if host.Name == endpoint.Hostname() {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
@@ -1655,9 +1661,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1669,9 +1675,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1704,9 +1710,9 @@ func fetchKMSStatus() madmin.KMS {
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
kmsStat.Status = "offline"
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
} else {
|
||||
kmsStat.Status = "online"
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
@@ -1714,7 +1720,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
if err != nil {
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
kmsStat.Encrypt = "Ok"
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
@@ -1725,7 +1731,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
kmsStat.Decrypt = "Ok"
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
}
|
||||
return kmsStat
|
||||
@@ -1741,11 +1747,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
@@ -1757,11 +1763,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,8 +89,9 @@ type allHealState struct {
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence // Indexed by endpoint
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
healStatus map[string]healingTracker // Indexed by disk ID
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
@@ -99,6 +99,7 @@ func newHealState(cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
healStatus: make(map[string]healingTracker),
|
||||
}
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -113,7 +114,56 @@ func (ahs *allHealState) healDriveCount() int {
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
for id, disk := range ahs.healStatus {
|
||||
for _, ep := range healLocalDisks {
|
||||
if disk.Endpoint == ep.String() {
|
||||
delete(ahs.healStatus, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateHealStatus will update the heal status.
|
||||
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.healStatus[tracker.ID] = *tracker
|
||||
}
|
||||
|
||||
// Sort by zone, set and disk index
|
||||
func sortDisks(disks []madmin.Disk) {
|
||||
sort.Slice(disks, func(i, j int) bool {
|
||||
a, b := &disks[i], &disks[j]
|
||||
if a.PoolIndex != b.PoolIndex {
|
||||
return a.PoolIndex < b.PoolIndex
|
||||
}
|
||||
if a.SetIndex != b.SetIndex {
|
||||
return a.SetIndex < b.SetIndex
|
||||
}
|
||||
return a.DiskIndex < b.DiskIndex
|
||||
})
|
||||
}
|
||||
|
||||
// getLocalHealingDisks returns local healing disks indexed by endpoint.
|
||||
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
|
||||
for _, v := range ahs.healStatus {
|
||||
dst[v.Endpoint] = v.toHealingDisk()
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
@@ -124,15 +174,6 @@ func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
@@ -662,6 +703,13 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
@@ -834,11 +882,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
|
||||
@@ -110,10 +110,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
@@ -17,9 +17,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -40,65 +42,39 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, time.Second); err == nil {
|
||||
network[nodeName] = "online"
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
|
||||
return madmin.ServerProperties{
|
||||
State: "ok",
|
||||
props := madmin.ServerProperties{
|
||||
State: string(madmin.ItemInitializing),
|
||||
Endpoint: addr,
|
||||
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: storageInfo.Disks,
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalDisks(endpointServerPools EndpointServerPools) []madmin.Disk {
|
||||
var localEndpoints Endpoints
|
||||
network := make(map[string]string)
|
||||
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = "localhost"
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, time.Second); err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
// only need Disks information in server mode.
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
}
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
return storageInfo.Disks
|
||||
|
||||
return props
|
||||
}
|
||||
|
||||
@@ -133,18 +133,20 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
@@ -177,21 +179,25 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
if opts.VersionID == "" {
|
||||
if ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
SuccessorModTime: objInfo.SuccessorModTime,
|
||||
}); !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
|
||||
@@ -36,8 +36,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
@@ -56,8 +56,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("key-marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
@@ -86,8 +86,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
startAfter = values.Get("start-after")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
startAfter = trimLeadingSlash(values.Get("start-after"))
|
||||
delimiter = values.Get("delimiter")
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
@@ -117,8 +117,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
keyMarker = trimLeadingSlash(values.Get("key-marker"))
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
|
||||
@@ -48,6 +48,12 @@ type LocationResponse struct {
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
|
||||
type PolicyStatus struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
|
||||
IsPublic string
|
||||
}
|
||||
|
||||
// ListVersionsResponse - format for list bucket versions response.
|
||||
type ListVersionsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
@@ -410,9 +416,11 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
@@ -429,10 +437,12 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
@@ -485,10 +495,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
if object.Name == "" {
|
||||
@@ -532,12 +544,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
if fetchOwner {
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
@@ -565,7 +576,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
@@ -639,8 +650,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
|
||||
listPartsResponse.UploadID = partsInfo.UploadID
|
||||
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
|
||||
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
|
||||
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
|
||||
|
||||
// Dumb values not meaningful
|
||||
listPartsResponse.Initiator = Initiator{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
listPartsResponse.Owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
|
||||
listPartsResponse.MaxParts = partsInfo.MaxParts
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
|
||||
@@ -185,6 +185,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
@@ -262,9 +266,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
// GetBucketPolicyStatus
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
@@ -320,9 +324,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/etag"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
@@ -161,6 +162,7 @@ func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolic
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -185,12 +187,12 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
@@ -237,7 +239,7 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
@@ -258,7 +260,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
@@ -271,7 +273,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
@@ -281,14 +283,13 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -298,18 +299,18 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -319,7 +320,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
return cred, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -328,7 +329,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return accessKey, owner, s3Error
|
||||
return cred, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -349,7 +350,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -364,15 +365,16 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -381,7 +383,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -389,6 +391,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -397,11 +400,11 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -430,19 +433,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
return errCode
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
|
||||
// Extract 'Content-Md5' if present.
|
||||
contentMD5, err = checkValidMD5(r.Header)
|
||||
clientETag, err := etag.FromContentMD5(r.Header)
|
||||
if err != nil {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
var contentSHA256 []byte
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
@@ -459,8 +457,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -498,6 +495,7 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
@@ -553,6 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -562,6 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -585,6 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
@@ -595,6 +596,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -650,6 +652,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
@@ -663,6 +666,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
|
||||
@@ -17,16 +17,23 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,10 +42,200 @@ const (
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
// healingTracker is used to persist healing information during a heal.
|
||||
type healingTracker struct {
|
||||
disk StorageAPI `msg:"-"`
|
||||
|
||||
ID string
|
||||
PoolIndex int
|
||||
SetIndex int
|
||||
DiskIndex int
|
||||
Path string
|
||||
Endpoint string
|
||||
Started time.Time
|
||||
LastUpdate time.Time
|
||||
ObjectsHealed uint64
|
||||
ObjectsFailed uint64
|
||||
BytesDone uint64
|
||||
BytesFailed uint64
|
||||
|
||||
// Last object scanned.
|
||||
Bucket string `json:"-"`
|
||||
Object string `json:"-"`
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeObjectsHealed uint64 `json:"-"`
|
||||
ResumeObjectsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
|
||||
// Filled during heal.
|
||||
HealedBuckets []string
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
|
||||
// loadHealingTracker will load the healing tracker from the supplied disk.
|
||||
// The disk ID will be validated against the loaded one.
|
||||
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
|
||||
if disk == nil {
|
||||
return nil, errors.New("loadHealingTracker: nil disk given")
|
||||
}
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := disk.ReadAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var h healingTracker
|
||||
_, err = h.UnmarshalMsg(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.ID != diskID && h.ID != "" {
|
||||
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
|
||||
}
|
||||
h.disk = disk
|
||||
h.ID = diskID
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
// newHealingTracker will create a new healing tracker for the disk.
|
||||
func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
diskID, _ := disk.GetDiskID()
|
||||
h := healingTracker{
|
||||
disk: disk,
|
||||
ID: diskID,
|
||||
Path: disk.String(),
|
||||
Endpoint: disk.Endpoint().String(),
|
||||
Started: time.Now().UTC(),
|
||||
}
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
|
||||
return &h
|
||||
}
|
||||
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
|
||||
}
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
|
||||
}
|
||||
return h.save(ctx)
|
||||
}
|
||||
|
||||
// save will unconditionally save the tracker and will be created if not existing.
|
||||
func (h *healingTracker) save(ctx context.Context) error {
|
||||
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
// Attempt to get location.
|
||||
if api := newObjectLayerFn(); api != nil {
|
||||
if ep, ok := api.(*erasureServerPools); ok {
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
h.LastUpdate = time.Now().UTC()
|
||||
htrackerBytes, err := h.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalBackgroundHealState.updateHealStatus(h)
|
||||
return h.disk.WriteAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
htrackerBytes)
|
||||
}
|
||||
|
||||
// delete the tracker on disk.
|
||||
func (h *healingTracker) delete(ctx context.Context) error {
|
||||
return h.disk.Delete(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
false)
|
||||
}
|
||||
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
func (h *healingTracker) resume() {
|
||||
h.ObjectsHealed = h.ResumeObjectsHealed
|
||||
h.ObjectsFailed = h.ResumeObjectsFailed
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
// Adds the bucket to the list of healed buckets and updates resume numbers.
|
||||
func (h *healingTracker) bucketDone(bucket string) {
|
||||
h.ResumeObjectsHealed = h.ObjectsHealed
|
||||
h.ResumeObjectsFailed = h.ObjectsFailed
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
// Delete...
|
||||
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
|
||||
// Order is preserved.
|
||||
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
|
||||
s := set.CreateStringSet(h.HealedBuckets...)
|
||||
h.QueuedBuckets = make([]string, 0, len(buckets))
|
||||
for _, b := range buckets {
|
||||
if !s.Contains(b.Name) {
|
||||
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healingTracker) printTo(writer io.Writer) {
|
||||
b, err := json.MarshalIndent(h, "", " ")
|
||||
if err != nil {
|
||||
writer.Write([]byte(err.Error()))
|
||||
}
|
||||
writer.Write(b)
|
||||
}
|
||||
|
||||
// toHealingDisk converts the information to madmin.HealingDisk
|
||||
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
return madmin.HealingDisk{
|
||||
ID: h.ID,
|
||||
Endpoint: h.Endpoint,
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
ObjectsHealed: h.ObjectsHealed,
|
||||
ObjectsFailed: h.ObjectsFailed,
|
||||
BytesDone: h.BytesDone,
|
||||
BytesFailed: h.BytesFailed,
|
||||
Bucket: h.Bucket,
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
@@ -90,7 +287,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -114,7 +311,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
|
||||
defer diskCheckTimer.Stop()
|
||||
wait:
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -125,7 +322,7 @@ wait:
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
@@ -174,55 +371,76 @@ wait:
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
|
||||
if a != b {
|
||||
return a
|
||||
}
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
// TODO(klauspost): This will block until all heals are done,
|
||||
// in the future this should be able to start healing other sets at once.
|
||||
var wg sync.WaitGroup
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
i := i
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
if len(disks) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
|
||||
tracker.setQueuedBuckets(buckets)
|
||||
if err := tracker.save(ctx); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
|
||||
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}(setIndex, disks)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,146 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -42,10 +182,10 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 20
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -54,16 +194,283 @@ func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
// write "PoolIndex"
|
||||
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.PoolIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
// write "SetIndex"
|
||||
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.SetIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
// write "DiskIndex"
|
||||
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.DiskIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
// write "Path"
|
||||
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Path)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
// write "Endpoint"
|
||||
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Endpoint)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
// write "Started"
|
||||
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Started)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
// write "LastUpdate"
|
||||
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.LastUpdate)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
// write "ObjectsHealed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ObjectsFailed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "BytesDone"
|
||||
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
// write "BytesFailed"
|
||||
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
// write "Bucket"
|
||||
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
// write "Object"
|
||||
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Object)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsHealed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsFailed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesFailed"
|
||||
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
err = en.WriteString(z.QueuedBuckets[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "HealedBuckets"
|
||||
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
err = en.WriteString(z.HealedBuckets[za0002])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// map header, size 20
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.PoolIndex)
|
||||
// string "SetIndex"
|
||||
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.SetIndex)
|
||||
// string "DiskIndex"
|
||||
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.DiskIndex)
|
||||
// string "Path"
|
||||
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
o = msgp.AppendString(o, z.Path)
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
// string "Started"
|
||||
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Started)
|
||||
// string "LastUpdate"
|
||||
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
o = msgp.AppendTime(o, z.LastUpdate)
|
||||
// string "ObjectsHealed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsHealed)
|
||||
// string "ObjectsFailed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsFailed)
|
||||
// string "BytesDone"
|
||||
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.BytesDone)
|
||||
// string "BytesFailed"
|
||||
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesFailed)
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Object"
|
||||
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
o = msgp.AppendString(o, z.Object)
|
||||
// string "ResumeObjectsHealed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsHealed)
|
||||
// string "ResumeObjectsFailed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsFailed)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
|
||||
}
|
||||
// string "HealedBuckets"
|
||||
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
|
||||
for za0002 := range z.HealedBuckets {
|
||||
o = msgp.AppendString(o, z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -91,6 +498,146 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -104,7 +651,14 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
s += 14 + msgp.ArrayHeaderSize
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
textData := generateBytesData(objSize)
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
// randomly picks a character and returns its equivalent byte array.
|
||||
func getRandomByte() []byte {
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
@@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
i++
|
||||
if i == 10 {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
})
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (err *errHashMismatch) Error() string {
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
iow io.WriteCloser
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
@@ -71,9 +71,10 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
|
||||
go func() {
|
||||
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
||||
@@ -81,8 +82,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
|
||||
@@ -17,13 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
@@ -96,9 +96,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@@ -42,39 +41,39 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,14 +17,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -125,7 +127,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice))
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
@@ -133,14 +138,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket.", bucket, globalDomainIPs.ToSlice()))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -288,7 +292,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -334,16 +338,17 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
@@ -408,6 +413,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
}
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -424,6 +434,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the list of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
@@ -480,7 +496,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
@@ -495,9 +511,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
@@ -541,14 +554,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
DeleteMarkerVersionID: dObjects[i].DeleteMarkerVersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
@@ -580,6 +597,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
|
||||
dv := DeletedObjectVersionInfo{
|
||||
@@ -591,25 +612,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
deleteTransitionedObject(ctx, newObjectLayerFn(), bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}, false, true)
|
||||
}
|
||||
|
||||
}
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
eventName := event.ObjectRemovedDelete
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
if objInfo.DeleteMarker {
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
@@ -789,13 +806,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != path.Clean(resource[1:]) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -838,13 +857,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
defer fileBody.Close()
|
||||
|
||||
formValues.Set("Bucket", bucket)
|
||||
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
}
|
||||
object := formValues.Get("Key")
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
successRedirect := formValues.Get("success_action_redirect")
|
||||
successStatus := formValues.Get("success_action_status")
|
||||
@@ -858,12 +876,51 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Verify policy signature.
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
cred, errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
@@ -872,10 +929,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
|
||||
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
|
||||
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -903,20 +961,20 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
@@ -956,12 +1014,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1018,6 +1080,64 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
}
|
||||
|
||||
// GetBucketPolicyStatusHandler - Retrieves the policy status
|
||||
// for an MinIO bucket, indicating whether the bucket is public.
|
||||
func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicyStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyStatusAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if anonymous (non-owner) has access to list objects.
|
||||
readable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
// Check if anonymous (non-owner) has access to upload objects.
|
||||
writable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
encodedSuccessResponse := encodeResponse(PolicyStatus{
|
||||
IsPublic: func() string {
|
||||
// Silly to have special 'boolean' values yes
|
||||
// but complying with silly implementation
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
|
||||
if readable && writable {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
}(),
|
||||
})
|
||||
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// HeadBucketHandler - HEAD Bucket
|
||||
// ----------
|
||||
// This operation is useful to determine if a bucket exists.
|
||||
|
||||
@@ -65,13 +65,18 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
expiryCh chan ObjectInfo
|
||||
type expiryTask struct {
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo) {
|
||||
type expiryState struct {
|
||||
expiryCh chan expiryTask
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
|
||||
select {
|
||||
case es.expiryCh <- oi:
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -82,7 +87,7 @@ var (
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
es := &expiryState{
|
||||
expiryCh: make(chan ObjectInfo, 10000),
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
@@ -94,8 +99,8 @@ func newExpiryState() *expiryState {
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for oi := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, oi, false)
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -240,16 +245,11 @@ func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, ar
|
||||
}
|
||||
|
||||
// set PutObjectOptions for PUT operation to transition data to target cluster
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
@@ -259,22 +259,40 @@ func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
|
||||
return
|
||||
return putOpts, nil
|
||||
}
|
||||
|
||||
// handle deletes of transitioned objects or object versions when one of the following is true:
|
||||
@@ -375,7 +393,12 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
|
||||
return nil
|
||||
}
|
||||
|
||||
putOpts := putTransitionOpts(oi)
|
||||
putOpts, err := putTransitionOpts(oi)
|
||||
if err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
|
||||
}
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
@@ -400,6 +423,7 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -668,11 +692,11 @@ func restoreTransitionedObject(ctx context.Context, bucket, object string, objAP
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader, nil, nil)
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
opts := putRestoreOpts(bucket, object, rreq, objInfo)
|
||||
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
|
||||
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
|
||||
|
||||
@@ -26,32 +26,25 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
inParallel := func(objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects))
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
objects[index].Size, _ = objects[index].GetActualSize()
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
const maxConcurrent = 500
|
||||
for {
|
||||
if len(objects) < maxConcurrent {
|
||||
inParallel(objects)
|
||||
return
|
||||
}
|
||||
inParallel(objects[:maxConcurrent])
|
||||
objects = objects[maxConcurrent:]
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
@@ -301,10 +294,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
@@ -343,15 +332,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
|
||||
@@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
authType := getRequestAuthType(r)
|
||||
var signatureVersion string
|
||||
switch authType {
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
signatureVersion = signV2Algorithm
|
||||
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
|
||||
signatureVersion = signV4Algorithm
|
||||
}
|
||||
|
||||
var authtype string
|
||||
switch authType {
|
||||
case authTypePresignedV2, authTypePresigned:
|
||||
authtype = "REST-QUERY-STRING"
|
||||
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
|
||||
authtype = "REST-HEADER"
|
||||
case authTypePostPolicy:
|
||||
authtype = "POST"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"signatureversion": {signatureVersion},
|
||||
"authType": {authtype},
|
||||
}
|
||||
|
||||
if lc != "" {
|
||||
|
||||
@@ -20,8 +20,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
@@ -169,22 +170,25 @@ func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToD
|
||||
}
|
||||
|
||||
// isStandardHeader returns true if header is a supported header and not a custom header
|
||||
func isStandardHeader(headerKey string) bool {
|
||||
key := strings.ToLower(headerKey)
|
||||
for _, header := range standardHeaders {
|
||||
if strings.ToLower(header) == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
func isStandardHeader(matchHeaderKey string) bool {
|
||||
return equals(matchHeaderKey, standardHeaders...)
|
||||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (replicate, sync bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false, sync
|
||||
return false, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: dobj.VersionID,
|
||||
OpType: replication.DeleteReplicationType,
|
||||
}
|
||||
replicate = rcfg.Replicate(opts)
|
||||
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
|
||||
// GetObjectInfo returns extra information even though it returns errFileNotFound
|
||||
if gerr != nil {
|
||||
@@ -193,25 +197,20 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
case replication.Pending, replication.Completed, replication.Failed:
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && validReplStatus {
|
||||
return oi.DeleteMarker, true, sync
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
return true, sync
|
||||
}
|
||||
return oi.DeleteMarker, false, sync
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
return oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
if tgt == nil {
|
||||
return oi.DeleteMarker, false, false
|
||||
return false, false
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: dobj.VersionID,
|
||||
}
|
||||
return oi.DeleteMarker, rcfg.Replicate(opts), tgt.replicateSync
|
||||
return replicate, tgt.replicateSync
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
@@ -226,26 +225,52 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
// on target.
|
||||
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
|
||||
bucket := dobj.Bucket
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
|
||||
return
|
||||
}
|
||||
versionID := dobj.DeleteMarkerVersionID
|
||||
if versionID == "" {
|
||||
versionID = dobj.VersionID
|
||||
}
|
||||
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
logger.LogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
|
||||
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
})
|
||||
|
||||
@@ -258,6 +283,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
|
||||
} else {
|
||||
versionPurgeStatus = Failed
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %s", rcfg.GetDestination().Bucket, dobj.ObjectName, versionID, rmErr))
|
||||
} else {
|
||||
if dobj.VersionID == "" {
|
||||
replicationStatus = string(replication.Completed)
|
||||
@@ -272,66 +298,102 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
|
||||
}
|
||||
|
||||
// Update metadata on the delete marker or purge permanent delete if replication success.
|
||||
objInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
|
||||
dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
DeleteMarkerReplicationStatus: replicationStatus,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionPurgeStatus: versionPurgeStatus,
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s %s: %w", bucket, dobj.ObjectName, dobj.VersionID, err))
|
||||
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %s", bucket, dobj.ObjectName, versionID, err))
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
})
|
||||
} else {
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: dobjInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
})
|
||||
}
|
||||
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: eventName,
|
||||
})
|
||||
}
|
||||
|
||||
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
|
||||
meta := make(map[string]string, len(oi.UserDefined))
|
||||
for k, v := range oi.UserDefined {
|
||||
if k == xhttp.AmzBucketReplicationStatus {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
|
||||
if equals(k, xhttp.AmzBucketReplicationStatus) {
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
if oi.ContentEncoding != "" {
|
||||
meta[xhttp.ContentEncoding] = oi.ContentEncoding
|
||||
}
|
||||
|
||||
if oi.ContentType != "" {
|
||||
meta[xhttp.ContentType] = oi.ContentType
|
||||
}
|
||||
tag, err := tags.ParseObjectTags(oi.UserTags)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if tag != nil {
|
||||
meta[xhttp.AmzObjectTagging] = tag.String()
|
||||
|
||||
if oi.UserTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = oi.UserTags
|
||||
meta[xhttp.AmzTagDirective] = "REPLACE"
|
||||
}
|
||||
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = oi.StorageClass
|
||||
}
|
||||
meta[xhttp.AmzStorageClass] = sc
|
||||
if oi.UserTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = oi.UserTags
|
||||
if sc != "" {
|
||||
meta[xhttp.AmzStorageClass] = sc
|
||||
}
|
||||
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
|
||||
meta[xhttp.MinIOSourceETag] = oi.ETag
|
||||
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
|
||||
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
|
||||
return meta
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
type caseInsensitiveMap map[string]string
|
||||
|
||||
// Lookup map entry case insensitively.
|
||||
func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
for _, k := range []string{
|
||||
key,
|
||||
strings.ToLower(key),
|
||||
http.CanonicalHeaderKey(key),
|
||||
} {
|
||||
v, ok := m[k]
|
||||
if ok {
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
@@ -342,54 +404,58 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
}
|
||||
if lang, ok := objInfo.UserDefined[xhttp.ContentLanguage]; ok {
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := objInfo.UserDefined[xhttp.ContentDisposition]; ok {
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := objInfo.UserDefined[xhttp.CacheControl]; ok {
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
|
||||
rdate, err := time.Parse(time.RFC3339Nano, retainDateStr)
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
|
||||
putOpts.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -401,6 +467,16 @@ const (
|
||||
replicateAll replicationAction = "all"
|
||||
)
|
||||
|
||||
// matches k1 with all keys, returns 'true' if one of them matches
|
||||
func equals(k1 string, keys ...string) bool {
|
||||
for _, k2 := range keys {
|
||||
if strings.ToLower(k1) == strings.ToLower(k2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns replicationAction by comparing metadata between source and target
|
||||
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
|
||||
// needs full replication
|
||||
@@ -408,83 +484,153 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
|
||||
oi1.VersionID != oi2.VersionID ||
|
||||
oi1.Size != oi2.Size ||
|
||||
oi1.DeleteMarker != oi2.IsDeleteMarker ||
|
||||
!oi1.ModTime.Equal(oi2.LastModified) {
|
||||
oi1.ModTime.Unix() != oi2.LastModified.Unix() {
|
||||
return replicateAll
|
||||
}
|
||||
|
||||
if oi1.ContentType != oi2.ContentType {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
if oi1.ContentEncoding != "" {
|
||||
enc, ok := oi2.Metadata[xhttp.ContentEncoding]
|
||||
if !ok || strings.Join(enc, "") != oi1.ContentEncoding {
|
||||
if !ok {
|
||||
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
|
||||
if !ok {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
if strings.Join(enc, ",") != oi1.ContentEncoding {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
// Compare only necessary headers
|
||||
compareKeys := []string{
|
||||
"Expires",
|
||||
"Cache-Control",
|
||||
"Content-Language",
|
||||
"Content-Disposition",
|
||||
"X-Amz-Object-Lock-Mode",
|
||||
"X-Amz-Object-Lock-Retain-Until-Date",
|
||||
"X-Amz-Object-Lock-Legal-Hold",
|
||||
"X-Amz-Website-Redirect-Location",
|
||||
"X-Amz-Meta-",
|
||||
}
|
||||
|
||||
// compare metadata on both maps to see if meta is identical
|
||||
for k1, v1 := range oi1.UserDefined {
|
||||
if v2, ok := oi2.UserMetadata[k1]; ok && v1 == v2 {
|
||||
continue
|
||||
compareMeta1 := make(map[string]string)
|
||||
for k, v := range oi1.UserDefined {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if v2, ok := oi2.Metadata[k1]; ok && v1 == strings.Join(v2, "") {
|
||||
continue
|
||||
if found {
|
||||
compareMeta1[strings.ToLower(k)] = v
|
||||
}
|
||||
}
|
||||
|
||||
compareMeta2 := make(map[string]string)
|
||||
for k, v := range oi2.Metadata {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if found {
|
||||
compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(compareMeta1, compareMeta2) {
|
||||
return replicateMetadata
|
||||
}
|
||||
for k1, v1 := range oi2.UserMetadata {
|
||||
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
for k1, v1slc := range oi2.Metadata {
|
||||
v1 := strings.Join(v1slc, "")
|
||||
if k1 == xhttp.ContentEncoding { //already compared
|
||||
continue
|
||||
}
|
||||
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 {
|
||||
return replicateMetadata
|
||||
}
|
||||
}
|
||||
t, _ := tags.MapToObjectTags(oi2.UserTags)
|
||||
if t.String() != oi1.UserTags {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
return replicateNone
|
||||
}
|
||||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
z, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
defer gr.Close() // hold read lock for entire transaction
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
gr.Close()
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
dest := cfg.GetDestination()
|
||||
if dest.Bucket == "" {
|
||||
gr.Close()
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -497,50 +643,79 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
if err == nil {
|
||||
rtype = getReplicationAction(objInfo, oi)
|
||||
if rtype == replicateNone {
|
||||
gr.Close()
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
return
|
||||
}
|
||||
}
|
||||
replicationStatus := replication.Completed
|
||||
// use core client to avoid doing multipart on PUT
|
||||
c := &miniogo.Core{Client: tgt.Client}
|
||||
if rtype != replicateAll {
|
||||
gr.Close()
|
||||
// replicate metadata for object tagging/copy with metadata replacement
|
||||
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
|
||||
c := &miniogo.Core{Client: tgt.Client}
|
||||
_, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), dstOpts)
|
||||
if err != nil {
|
||||
srcOpts := miniogo.CopySrcOptions{
|
||||
Bucket: dest.Bucket,
|
||||
Object: object,
|
||||
VersionID: objInfo.VersionID}
|
||||
dstOpts := miniogo.PutObjectOptions{
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
}}
|
||||
if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), srcOpts, dstOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
} else {
|
||||
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
|
||||
gr.Close()
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
putOpts, err := putReplicationOpts(ctx, dest, objInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%w", bucket, cfg.RoleArn, err))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
putOpts := putReplicationOpts(ctx, dest, objInfo)
|
||||
// Setup bandwidth throttling
|
||||
peers, _ := globalEndpoints.peers()
|
||||
totalNodesCount := len(peers)
|
||||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
|
||||
// r takes over closing gr.
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
_, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, putOpts)
|
||||
if err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
opts := &bandwidth.MonitorReaderOptions{
|
||||
Bucket: objInfo.Bucket,
|
||||
Object: objInfo.Name,
|
||||
HeaderSize: headerSize,
|
||||
BandwidthBytesPerSec: target.BandwidthLimit / int64(totalNodesCount),
|
||||
ClusterBandwidth: target.BandwidthLimit,
|
||||
}
|
||||
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
r.Close()
|
||||
}
|
||||
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
@@ -549,7 +724,6 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
}
|
||||
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationNotTracked
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
@@ -557,16 +731,17 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
objInfo, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
} else {
|
||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
@@ -603,91 +778,102 @@ type DeletedObjectVersionInfo struct {
|
||||
DeletedObject
|
||||
Bucket string
|
||||
}
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
// TODO: currently keeping it conservative
|
||||
// but eventually can be tuned in future,
|
||||
// take only half the CPUs for replication
|
||||
// conservatively.
|
||||
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
globalReplicationPool *ReplicationPool
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
|
||||
// fix minimum concurrent replication to 1 for single CPU setup
|
||||
if globalReplicationConcurrent == 0 {
|
||||
globalReplicationConcurrent = 1
|
||||
}
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
close(rs.replicaDeleteCh)
|
||||
}()
|
||||
return rs
|
||||
// ReplicationPool describes replication pool
|
||||
type ReplicationPool struct {
|
||||
mu sync.Mutex
|
||||
size int
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
killCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
objLayer ObjectLayer
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
// NewReplicationPool creates a pool of replication workers of specified size
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, sz int) *ReplicationPool {
|
||||
pool := &ReplicationPool{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
ctx: ctx,
|
||||
objLayer: o,
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
case doi, ok := <-r.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(ctx, doi, objectAPI)
|
||||
}
|
||||
}
|
||||
<-ctx.Done()
|
||||
close(pool.replicaCh)
|
||||
close(pool.replicaDeleteCh)
|
||||
}()
|
||||
pool.Resize(sz)
|
||||
return pool
|
||||
}
|
||||
|
||||
// AddWorker adds a replication worker to the pool
|
||||
func (p *ReplicationPool) AddWorker() {
|
||||
defer p.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case oi, ok := <-p.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(p.ctx, oi, p.objLayer)
|
||||
case doi, ok := <-p.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(p.ctx, doi, p.objLayer)
|
||||
case <-p.killCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Resize replication pool to new size
|
||||
func (p *ReplicationPool) Resize(n int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
for p.size < n {
|
||||
p.size++
|
||||
p.wg.Add(1)
|
||||
go p.AddWorker()
|
||||
}
|
||||
for p.size > n {
|
||||
p.size--
|
||||
go func() { p.killCh <- struct{}{} }()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaTask(oi ObjectInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case p.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case p.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalReplicationConcurrent.
|
||||
for i := 0; i < globalReplicationConcurrent; i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationWorkers())
|
||||
}
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
@@ -714,8 +900,11 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
// Make sure to match ETag when proxying.
|
||||
if err = gopts.SetMatchETag(oi.ETag); err != nil {
|
||||
return nil, false
|
||||
}
|
||||
c := miniogo.Core{Client: tgt.Client}
|
||||
|
||||
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
@@ -726,6 +915,7 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
reader.ObjInfo = oi.Clone()
|
||||
return reader, true
|
||||
}
|
||||
|
||||
@@ -781,6 +971,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
|
||||
if err != nil {
|
||||
return nil, oi, false, err
|
||||
}
|
||||
|
||||
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
|
||||
oi = ObjectInfo{
|
||||
Bucket: bucket,
|
||||
@@ -795,12 +986,18 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
|
||||
Expires: objInfo.Expires,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
|
||||
UserDefined: cloneMSS(objInfo.UserMetadata),
|
||||
UserTags: tags.String(),
|
||||
}
|
||||
if ce, ok := oi.UserDefined[xhttp.ContentEncoding]; ok {
|
||||
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
|
||||
for k, v := range objInfo.Metadata {
|
||||
oi.UserDefined[k] = v[0]
|
||||
}
|
||||
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
|
||||
if !ok {
|
||||
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
|
||||
}
|
||||
if ok {
|
||||
oi.ContentEncoding = ce
|
||||
delete(oi.UserDefined, xhttp.ContentEncoding)
|
||||
}
|
||||
return tgt, oi, true, nil
|
||||
}
|
||||
@@ -816,7 +1013,7 @@ func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer,
|
||||
if sync {
|
||||
replicateObject(ctx, objInfo, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaTask(objInfo)
|
||||
globalReplicationPool.queueReplicaTask(objInfo)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -824,6 +1021,6 @@ func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo,
|
||||
if sync {
|
||||
replicateDelete(ctx, dv, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaDeleteTask(dv)
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -100,7 +100,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
@@ -111,7 +111,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -124,7 +124,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -362,6 +362,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
||||
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
|
||||
Creds: creds,
|
||||
Secure: tcfg.Secure,
|
||||
Region: tcfg.Region,
|
||||
Transport: getRemoteTargetInstanceTransport,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -50,10 +50,14 @@ import (
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
// Inject into config package.
|
||||
config.Logger.Info = logger.Info
|
||||
config.Logger.LogIf = logger.LogIf
|
||||
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
|
||||
|
||||
@@ -69,7 +73,6 @@ func init() {
|
||||
},
|
||||
})
|
||||
|
||||
globalReplicationState = newReplicationState()
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", color.New())
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -27,9 +29,9 @@ import (
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
@@ -37,13 +39,16 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type objectDeleter interface {
|
||||
@@ -59,12 +64,12 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,7 +27,6 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/heal"
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/scanner"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -59,7 +60,7 @@ func initHelp() {
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
config.ScannerSubSys: scanner.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -116,8 +117,8 @@ func initHelp() {
|
||||
Description: "manage object healing frequency and bitrot verification checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage crawling for usage calculation, lifecycle, healing and more",
|
||||
Key: config.ScannerSubSys,
|
||||
Description: "manage namespace scanning for usage calculation, lifecycle, healing and more",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
@@ -199,7 +200,7 @@ func initHelp() {
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.HealSubSys: heal.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.ScannerSubSys: scanner.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -273,11 +274,11 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
if _, err = heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
if _, err = scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -295,7 +296,10 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
}
|
||||
}
|
||||
{
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -471,7 +475,10 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
}
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
|
||||
}
|
||||
@@ -546,7 +553,7 @@ If you need help to migrate smoothly visit: https://min.io/pricing`
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
http.WithTransport(NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)),
|
||||
),
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
@@ -599,10 +606,10 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
|
||||
// Crawler
|
||||
crawlerCfg, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
// Scanner
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply crawler config: %w", err)
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
|
||||
// Apply configurations.
|
||||
@@ -617,7 +624,7 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
|
||||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
logger.LogIf(ctx, crawlerSleeper.Update(crawlerCfg.Delay, crawlerCfg.MaxWait))
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
globalServerConfigMu.Lock()
|
||||
|
||||
@@ -29,14 +29,14 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
apiReplicationWorkers = "replication_workers"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
@@ -45,6 +45,7 @@ const (
|
||||
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
|
||||
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
|
||||
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
|
||||
EnvAPIReplicationWorkers = "MINIO_API_REPLICATION_WORKERS"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
@@ -78,12 +79,16 @@ var (
|
||||
},
|
||||
config.KV{
|
||||
Key: apiListQuorum,
|
||||
Value: "optimal",
|
||||
Value: "strict",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiExtendListCacheLife,
|
||||
Value: "0s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReplicationWorkers,
|
||||
Value: "100",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -96,6 +101,7 @@ type Config struct {
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
ListQuorum string `json:"list_strict_quorum"`
|
||||
ExtendListLife time.Duration `json:"extend_list_cache_life"`
|
||||
ReplicationWorkers int `json:"replication_workers"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -173,6 +179,15 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
replicationWorkers, err := strconv.Atoi(env.Get(EnvAPIReplicationWorkers, kvs.Get(apiReplicationWorkers)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if replicationWorkers <= 0 {
|
||||
return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Minimum number of replication workers should be 1")
|
||||
}
|
||||
|
||||
return Config{
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
@@ -181,5 +196,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
ListQuorum: listQuorum,
|
||||
ExtendListLife: listLife,
|
||||
ReplicationWorkers: replicationWorkers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -45,5 +45,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiReplicationWorkers,
|
||||
Description: `set the number of replication workers, defaults to 100`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
16
cmd/config/cache/help.go
vendored
@@ -40,19 +40,13 @@ var (
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Exclude,
|
||||
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Description: `exclude cache for following patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: After,
|
||||
Description: `minimum accesses before caching an object`,
|
||||
Description: `minimum number of access before caching an object`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
@@ -80,5 +74,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
@@ -113,3 +114,12 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// EnsureCertAndKey checks if both client certificate and key paths are provided
|
||||
func EnsureCertAndKey(ClientCert, ClientKey string) error {
|
||||
if (ClientCert != "" && ClientKey == "") ||
|
||||
(ClientCert == "" && ClientKey != "") {
|
||||
return errors.New("cert and key must be specified as a pair")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ const (
|
||||
|
||||
// Include-list for compression.
|
||||
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
|
||||
DefaultMimeTypes = "text/*,application/json,application/xml"
|
||||
DefaultMimeTypes = "text/*,application/json,application/xml,binary/octet-stream"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV config for compression settings
|
||||
|
||||
@@ -77,6 +77,7 @@ const (
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
HealSubSys = "heal"
|
||||
ScannerSubSys = "scanner"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
@@ -114,7 +115,7 @@ var SubSystems = set.CreateStringSet(
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
HealSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
@@ -132,7 +133,7 @@ var SubSystems = set.CreateStringSet(
|
||||
var SubSystemsDynamic = set.CreateStringSet(
|
||||
APISubSys,
|
||||
CompressionSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
HealSubSys,
|
||||
)
|
||||
|
||||
@@ -151,7 +152,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
HealSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -462,6 +463,13 @@ func LookupWorm() (bool, error) {
|
||||
return ParseBool(env.Get(EnvWorm, EnableOff))
|
||||
}
|
||||
|
||||
// Carries all the renamed sub-systems from their
|
||||
// previously known names
|
||||
var renamedSubsys = map[string]string{
|
||||
CrawlerSubSys: ScannerSubSys,
|
||||
// Add future sub-system renames
|
||||
}
|
||||
|
||||
// Merge - merges a new config with all the
|
||||
// missing values for default configs,
|
||||
// returns a config.
|
||||
@@ -477,9 +485,21 @@ func (c Config) Merge() Config {
|
||||
}
|
||||
}
|
||||
if _, ok := cp[subSys]; !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
rnSubSys, ok := renamedSubsys[subSys]
|
||||
if !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
}
|
||||
// Copy over settings from previous sub-system
|
||||
// to newly renamed sub-system
|
||||
for _, kv := range cp[rnSubSys][Default] {
|
||||
_, ok := c[subSys][tgt].Lookup(kv.Key)
|
||||
if !ok {
|
||||
ckvs.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
subSys = rnSubSys
|
||||
}
|
||||
cp[subSys][tgt] = ckvs
|
||||
}
|
||||
|
||||
@@ -23,21 +23,23 @@ const (
|
||||
|
||||
// Top level common ENVs
|
||||
const (
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
|
||||
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
@@ -281,4 +281,10 @@ Example 1:
|
||||
"",
|
||||
"Refer to https://docs.min.io/docs/minio-kms-quickstart-guide.html for setting up SSE",
|
||||
)
|
||||
|
||||
ErrInvalidReplicationWorkersValue = newErrFn(
|
||||
"Invalid value for replication workers",
|
||||
"",
|
||||
"MINIO_API_REPLICATION_WORKERS: should be > 0",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -66,7 +66,7 @@ var (
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Bitrot,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Description: `perform bitrot scan on disks when checking objects during scanner`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
|
||||
@@ -21,13 +21,14 @@ import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ldap "github.com/go-ldap/ldap/v3"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
ldap "gopkg.in/ldap.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -58,7 +59,6 @@ type Config struct {
|
||||
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
|
||||
GroupSearchBaseDistNames []string `json:"-"`
|
||||
GroupSearchFilter string `json:"groupSearchFilter"`
|
||||
GroupNameAttribute string `json:"groupNameAttribute"`
|
||||
|
||||
// Lookup bind LDAP service account
|
||||
LookupBindDN string `json:"lookupBindDN"`
|
||||
@@ -82,7 +82,6 @@ const (
|
||||
UserDNSearchFilter = "user_dn_search_filter"
|
||||
UsernameFormat = "username_format"
|
||||
GroupSearchFilter = "group_search_filter"
|
||||
GroupNameAttribute = "group_name_attribute"
|
||||
GroupSearchBaseDN = "group_search_base_dn"
|
||||
TLSSkipVerify = "tls_skip_verify"
|
||||
ServerInsecure = "server_insecure"
|
||||
@@ -97,7 +96,6 @@ const (
|
||||
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
|
||||
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
|
||||
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
|
||||
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
|
||||
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
|
||||
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
|
||||
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
|
||||
@@ -106,6 +104,7 @@ const (
|
||||
var removedKeys = []string{
|
||||
"username_search_filter",
|
||||
"username_search_base_dn",
|
||||
"group_name_attribute",
|
||||
}
|
||||
|
||||
// DefaultKVS - default config for LDAP config
|
||||
@@ -131,10 +130,6 @@ var (
|
||||
Key: GroupSearchFilter,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: "",
|
||||
@@ -180,13 +175,22 @@ func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
|
||||
for _, entry := range sres.Entries {
|
||||
// We only queried one attribute,
|
||||
// so we only look up the first one.
|
||||
groups = append(groups, entry.Attributes[0].Values...)
|
||||
groups = append(groups, entry.DN)
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (l *Config) lookupBind(conn *ldap.Conn) error {
|
||||
return conn.Bind(l.LookupBindDN, l.LookupBindPassword)
|
||||
var err error
|
||||
if l.LookupBindPassword == "" {
|
||||
err = conn.UnauthenticatedBind(l.LookupBindDN)
|
||||
} else {
|
||||
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
|
||||
}
|
||||
if ldap.IsErrorWithCode(err, 49) {
|
||||
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// usernameFormatsBind - Iterates over all given username formats and expects
|
||||
@@ -208,6 +212,8 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
|
||||
if errs[i] == nil {
|
||||
bindDistNames = append(bindDistNames, bindDN)
|
||||
successCount++
|
||||
} else if !ldap.IsErrorWithCode(errs[i], 49) {
|
||||
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %v", errs[i])
|
||||
}
|
||||
}
|
||||
if successCount == 0 {
|
||||
@@ -217,7 +223,7 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
}
|
||||
outErr := fmt.Sprintf("All username formats failed with: %s", strings.Join(errStrings, "; "))
|
||||
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
|
||||
return "", errors.New(outErr)
|
||||
}
|
||||
if successCount > 1 {
|
||||
@@ -307,12 +313,13 @@ func (l *Config) Bind(username, password string) (string, []string, error) {
|
||||
var groups []string
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(bindDN), -1)
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
[]string{l.GroupNameAttribute},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
@@ -368,6 +375,38 @@ func (l Config) GetExpiryDuration() time.Duration {
|
||||
return l.stsExpiryDuration
|
||||
}
|
||||
|
||||
func (l Config) testConnection() error {
|
||||
conn, err := l.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating connection to LDAP server: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if l.isUsingLookupBind {
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate some random user credentials for username formats mode test.
|
||||
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
|
||||
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||
rand.Shuffle(len(charset), func(i, j int) {
|
||||
charset[i], charset[j] = charset[j], charset[i]
|
||||
})
|
||||
password := string(charset[:20])
|
||||
_, err = l.usernameFormatsBind(conn, username, password)
|
||||
if err == nil {
|
||||
// We don't expect to successfully guess a credential in this
|
||||
// way.
|
||||
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
|
||||
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("LDAP connection test error: %v", err)
|
||||
}
|
||||
|
||||
// Enabled returns if jwks is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(ServerAddr) != ""
|
||||
@@ -427,14 +466,14 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
// Lookup bind user configuration
|
||||
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
|
||||
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
|
||||
if lookupBindDN != "" && lookupBindPassword != "" {
|
||||
if lookupBindDN != "" {
|
||||
l.LookupBindDN = lookupBindDN
|
||||
l.LookupBindPassword = lookupBindPassword
|
||||
l.isUsingLookupBind = true
|
||||
|
||||
// User DN search configuration
|
||||
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
|
||||
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(EnvUserDNSearchFilter))
|
||||
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
|
||||
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
|
||||
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
|
||||
}
|
||||
@@ -461,23 +500,22 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
return l, errors.New("Either Lookup Bind mode or Username Format mode is required.")
|
||||
}
|
||||
|
||||
// Test connection to LDAP server.
|
||||
if err := l.testConnection(); err != nil {
|
||||
return l, fmt.Errorf("Connection test for LDAP server failed: %v", err)
|
||||
}
|
||||
|
||||
// Group search params configuration
|
||||
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
|
||||
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
|
||||
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
|
||||
|
||||
// Either all group params must be set or none must be set.
|
||||
var allSet bool
|
||||
if grpSearchFilter != "" {
|
||||
if grpSearchNameAttr == "" || grpSearchBaseDN == "" {
|
||||
return l, errors.New("All group related parameters must be set")
|
||||
}
|
||||
allSet = true
|
||||
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
|
||||
return l, errors.New("All group related parameters must be set")
|
||||
}
|
||||
|
||||
if allSet {
|
||||
if grpSearchFilter != "" {
|
||||
l.GroupSearchFilter = grpSearchFilter
|
||||
l.GroupNameAttribute = grpSearchNameAttr
|
||||
l.GroupSearchBaseDistName = grpSearchBaseDN
|
||||
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
|
||||
}
|
||||
|
||||
@@ -68,12 +68,6 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupNameAttribute,
|
||||
Description: `search attribute for group name e.g. "cn"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Description: `";" separated list of group search base DNs e.g. "dc=myldapserver,dc=com"`,
|
||||
|
||||
@@ -41,10 +41,6 @@ func SetIdentityLDAP(s config.Config, ldapArgs Config) {
|
||||
Key: GroupSearchFilter,
|
||||
Value: ldapArgs.GroupSearchFilter,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: ldapArgs.GroupNameAttribute,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: ldapArgs.GroupSearchBaseDistName,
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
@@ -29,8 +29,10 @@ const (
|
||||
Delay = "delay"
|
||||
MaxWait = "max_wait"
|
||||
|
||||
EnvDelay = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_CRAWLER_MAX_WAIT"
|
||||
EnvDelay = "MINIO_SCANNER_DELAY"
|
||||
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
|
||||
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
|
||||
)
|
||||
|
||||
// Config represents the heal settings.
|
||||
@@ -58,7 +60,7 @@ var (
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Delay,
|
||||
Description: `crawler delay multiplier, defaults to '10.0'`,
|
||||
Description: `scanner delay multiplier, defaults to '10.0'`,
|
||||
Optional: true,
|
||||
Type: "float",
|
||||
},
|
||||
@@ -73,14 +75,22 @@ var (
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
if err = config.CheckValidKeys(config.ScannerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Delay, err = strconv.ParseFloat(env.Get(EnvDelay, kvs.Get(Delay)), 64)
|
||||
delay := env.Get(EnvDelayLegacy, "")
|
||||
if delay == "" {
|
||||
delay = env.Get(EnvDelay, kvs.Get(Delay))
|
||||
}
|
||||
cfg.Delay, err = strconv.ParseFloat(delay, 64)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.MaxWait, err = time.ParseDuration(env.Get(EnvMaxWait, kvs.Get(MaxWait)))
|
||||
maxWait := env.Get(EnvMaxWaitLegacy, "")
|
||||
if maxWait == "" {
|
||||
maxWait = env.Get(EnvMaxWait, kvs.Get(MaxWait))
|
||||
}
|
||||
cfg.MaxWait, err = time.ParseDuration(maxWait)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -410,6 +411,9 @@ func NewKMS(cfg KMSConfig) (kms KMS, err error) {
|
||||
} else if cfg.Vault.Enabled && cfg.Kes.Enabled {
|
||||
return kms, errors.New("Ambiguous KMS configuration: vault configuration and kes configuration are provided at the same time")
|
||||
} else if cfg.Vault.Enabled {
|
||||
if v, ok := os.LookupEnv("MINIO_KMS_VAULT_DEPRECATION"); !ok || v != "off" { // TODO(aead): Remove once Vault support has been removed
|
||||
return kms, errors.New("Hashicorp Vault is deprecated and will be removed Oct. 2021. To temporarily enable Hashicorp Vault support, set MINIO_KMS_VAULT_DEPRECATION=off")
|
||||
}
|
||||
kms, err = NewVault(cfg.Vault)
|
||||
if err != nil {
|
||||
return kms, err
|
||||
|
||||
@@ -81,8 +81,6 @@ var (
|
||||
|
||||
errInvalidInternalIV = Errorf("The internal encryption IV is malformed")
|
||||
errInvalidInternalSealAlgorithm = Errorf("The internal seal algorithm is invalid and not supported")
|
||||
|
||||
errMissingUpdatedKey = Errorf("The key update returned no error but also no sealed key")
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -181,7 +181,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, nil, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
|
||||
@@ -203,7 +206,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
|
||||
|
||||
@@ -19,13 +19,13 @@ import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
|
||||
@@ -19,13 +19,12 @@ import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
@@ -33,74 +32,29 @@ import (
|
||||
// associated with a certain object.
|
||||
type Context map[string]string
|
||||
|
||||
// WriteTo writes the context in a canonical from to w.
|
||||
// It returns the number of bytes and the first error
|
||||
// encounter during writing to w, if any.
|
||||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Sort(sortedKeys)
|
||||
// MarshalText returns a canonical text representation of
|
||||
// the Context.
|
||||
|
||||
escape := func(s string) string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
|
||||
EscapeStringJSON(buf, s)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
nn, err := io.WriteString(w, "{")
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
for i, k := range sortedKeys {
|
||||
s := fmt.Sprintf("\"%s\":\"%s\",", escape(k), escape(c[k]))
|
||||
if i == len(sortedKeys)-1 {
|
||||
s = s[:len(s)-1] // remove last ','
|
||||
}
|
||||
|
||||
nn, err = io.WriteString(w, s)
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
}
|
||||
nn, err = io.WriteString(w, "}")
|
||||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// AppendTo appends the context in a canonical from to dst.
|
||||
//
|
||||
// AppendTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
// MarshalText sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object. The sort order
|
||||
// is based on the un-escaped keys.
|
||||
func (c Context) MarshalText() ([]byte, error) {
|
||||
if len(c) == 0 {
|
||||
return append(dst, '{', '}')
|
||||
return []byte{'{', '}'}, nil
|
||||
}
|
||||
|
||||
// out should not escape.
|
||||
out := bytes.NewBuffer(dst)
|
||||
|
||||
// No need to copy+sort
|
||||
// Pre-allocate a buffer - 128 bytes is an arbitrary
|
||||
// heuristic value that seems like a good starting size.
|
||||
var b = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
if len(c) == 1 {
|
||||
for k, v := range c {
|
||||
out.WriteString(`{"`)
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, v)
|
||||
out.WriteString(`"}`)
|
||||
b.WriteString(`{"`)
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, v)
|
||||
b.WriteString(`"}`)
|
||||
}
|
||||
return out.Bytes()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
sortedKeys := make([]string, 0, len(c))
|
||||
@@ -109,19 +63,19 @@ func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
out.WriteByte('{')
|
||||
b.WriteByte('{')
|
||||
for i, k := range sortedKeys {
|
||||
out.WriteByte('"')
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, c[k])
|
||||
out.WriteByte('"')
|
||||
b.WriteByte('"')
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, c[k])
|
||||
b.WriteByte('"')
|
||||
if i < len(sortedKeys)-1 {
|
||||
out.WriteByte(',')
|
||||
b.WriteByte(',')
|
||||
}
|
||||
}
|
||||
out.WriteByte('}')
|
||||
return out.Bytes()
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
@@ -225,9 +179,11 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
}
|
||||
ctxBytes, _ := context.MarshalText()
|
||||
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
mac.Write(context.AppendTo(make([]byte, 0, 128)))
|
||||
mac.Write(ctxBytes)
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -61,7 +60,7 @@ func TestMasterKeyKMS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var contextWriteToTests = []struct {
|
||||
var contextMarshalTextTests = []struct {
|
||||
Context Context
|
||||
ExpectedJSON string
|
||||
}{
|
||||
@@ -76,43 +75,29 @@ var contextWriteToTests = []struct {
|
||||
6: {Context: Context{"a": "<>&"}, ExpectedJSON: `{"a":"\u003c\u003e\u0026"}`},
|
||||
}
|
||||
|
||||
func TestContextWriteTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
var jsonContext strings.Builder
|
||||
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
|
||||
t.Errorf("Test %d: Failed to encode context: %v", i, err)
|
||||
continue
|
||||
func TestContextMarshalText(t *testing.T) {
|
||||
for i, test := range contextMarshalTextTests {
|
||||
text, err := test.Context.MarshalText()
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encode context: %v", i, err)
|
||||
}
|
||||
if s := jsonContext.String(); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
if string(text) != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, string(text), test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextAppendTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
dst := make([]byte, 0, 1024)
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
// Append one more
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContext_AppendTo(b *testing.B) {
|
||||
func BenchmarkContext(b *testing.B) {
|
||||
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%<>&"}}
|
||||
for _, test := range tests {
|
||||
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
|
||||
dst := make([]byte, 0, 1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst = test.AppendTo(dst[:0])
|
||||
_, err := test.MarshalText()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,6 +44,15 @@ const (
|
||||
// MetaDataEncryptionKey is the sealed data encryption key (DEK) received from
|
||||
// the KMS.
|
||||
MetaDataEncryptionKey = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key"
|
||||
|
||||
// MetaContext is the KMS context provided by a client when encrypting an
|
||||
// object with SSE-KMS. A client may not send a context in which case the
|
||||
// MetaContext will not be present.
|
||||
// MetaContext only contains the bucket/object name if the client explicitly
|
||||
// added it. However, when decrypting an object the bucket/object name must
|
||||
// be part of the object. Therefore, the bucket/object name must be added
|
||||
// to the context, if not present, whenever a decryption is performed.
|
||||
MetaContext = "X-Minio-Internal-Server-Side-Encryption-Context"
|
||||
)
|
||||
|
||||
// IsMultiPart returns true if the object metadata indicates
|
||||
@@ -109,26 +118,35 @@ func IsSourceEncrypted(metadata map[string]string) bool {
|
||||
//
|
||||
// IsEncrypted only checks whether the metadata contains at least
|
||||
// one entry indicating SSE-C or SSE-S3.
|
||||
func IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[MetaIV]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[MetaAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if IsMultiPart(metadata) {
|
||||
return true
|
||||
func IsEncrypted(metadata map[string]string) (Type, bool) {
|
||||
if S3KMS.IsEncrypted(metadata) {
|
||||
return S3KMS, true
|
||||
}
|
||||
if S3.IsEncrypted(metadata) {
|
||||
return true
|
||||
return S3, true
|
||||
}
|
||||
if SSEC.IsEncrypted(metadata) {
|
||||
return true
|
||||
return SSEC, true
|
||||
}
|
||||
if S3KMS.IsEncrypted(metadata) {
|
||||
return true
|
||||
if IsMultiPart(metadata) {
|
||||
return nil, true
|
||||
}
|
||||
return false
|
||||
if _, ok := metadata[MetaIV]; ok {
|
||||
return nil, true
|
||||
}
|
||||
if _, ok := metadata[MetaAlgorithm]; ok {
|
||||
return nil, true
|
||||
}
|
||||
if _, ok := metadata[MetaKeyID]; ok {
|
||||
return nil, true
|
||||
}
|
||||
if _, ok := metadata[MetaDataEncryptionKey]; ok {
|
||||
return nil, true
|
||||
}
|
||||
if _, ok := metadata[MetaContext]; ok {
|
||||
return nil, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// CreateMultipartMetadata adds the multipart flag entry to metadata
|
||||
|
||||
@@ -59,7 +59,7 @@ var isEncryptedTests = []struct {
|
||||
|
||||
func TestIsEncrypted(t *testing.T) {
|
||||
for i, test := range isEncryptedTests {
|
||||
if isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
|
||||
if _, isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
|
||||
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
|
||||
}
|
||||
}
|
||||
@@ -74,8 +74,8 @@ var s3IsEncryptedTests = []struct {
|
||||
{Encrypted: false, Metadata: map[string]string{MetaAlgorithm: ""}}, // 2
|
||||
{Encrypted: false, Metadata: map[string]string{MetaSealedKeySSEC: ""}}, // 3
|
||||
{Encrypted: true, Metadata: map[string]string{MetaSealedKeyS3: ""}}, // 4
|
||||
{Encrypted: true, Metadata: map[string]string{MetaKeyID: ""}}, // 5
|
||||
{Encrypted: true, Metadata: map[string]string{MetaDataEncryptionKey: ""}}, // 6
|
||||
{Encrypted: false, Metadata: map[string]string{MetaKeyID: ""}}, // 5
|
||||
{Encrypted: false, Metadata: map[string]string{MetaDataEncryptionKey: ""}}, // 6
|
||||
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
|
||||
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
|
||||
}
|
||||
|
||||
@@ -82,12 +82,6 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[MetaSealedKeyKMS]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[MetaKeyID]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[MetaDataEncryptionKey]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -95,11 +89,14 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool {
|
||||
// from the metadata using KMS and returns the decrypted object
|
||||
// key.
|
||||
func (s3 ssekms) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
keyID, kmsKey, sealedKey, err := s3.ParseMetadata(metadata)
|
||||
keyID, kmsKey, sealedKey, ctx, err := s3.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
|
||||
if _, ok := ctx[bucket]; !ok {
|
||||
ctx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
unsealKey, err := kms.UnsealKey(keyID, kmsKey, ctx)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
@@ -147,19 +144,19 @@ func (ssekms) CreateMetadata(metadata map[string]string, keyID string, kmsKey []
|
||||
// KMS data key it returns both. If the metadata does not contain neither a
|
||||
// KMS master key ID nor a sealed KMS data key it returns an empty keyID and
|
||||
// KMS data key. Otherwise, it returns an error.
|
||||
func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, err error) {
|
||||
func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, ctx Context, err error) {
|
||||
// Extract all required values from object metadata
|
||||
b64IV, ok := metadata[MetaIV]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, errMissingInternalIV
|
||||
return keyID, kmsKey, sealedKey, ctx, errMissingInternalIV
|
||||
}
|
||||
algorithm, ok := metadata[MetaAlgorithm]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, errMissingInternalSealAlgorithm
|
||||
return keyID, kmsKey, sealedKey, ctx, errMissingInternalSealAlgorithm
|
||||
}
|
||||
b64SealedKey, ok := metadata[MetaSealedKeyKMS]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal sealed key for SSE-S3")
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal sealed key for SSE-S3")
|
||||
}
|
||||
|
||||
// There are two possibilites:
|
||||
@@ -169,33 +166,44 @@ func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []
|
||||
keyID, idPresent := metadata[MetaKeyID]
|
||||
b64KMSSealedKey, kmsKeyPresent := metadata[MetaDataEncryptionKey]
|
||||
if !idPresent && kmsKeyPresent {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal KMS key-ID for SSE-S3")
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal KMS key-ID for SSE-S3")
|
||||
}
|
||||
if idPresent && !kmsKeyPresent {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal sealed KMS data key for SSE-S3")
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal sealed KMS data key for SSE-S3")
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
iv, err := base64.StdEncoding.DecodeString(b64IV)
|
||||
if err != nil || len(iv) != 32 {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalIV
|
||||
return keyID, kmsKey, sealedKey, ctx, errInvalidInternalIV
|
||||
}
|
||||
if algorithm != SealAlgorithm {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
|
||||
return keyID, kmsKey, sealedKey, ctx, errInvalidInternalSealAlgorithm
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The internal sealed key for SSE-S3 is invalid")
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed key for SSE-KMS is invalid")
|
||||
}
|
||||
if idPresent && kmsKeyPresent { // We are using a KMS -> parse the sealed KMS data key.
|
||||
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
|
||||
if err != nil {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The internal sealed KMS data key for SSE-S3 is invalid")
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed KMS data key for SSE-KMS is invalid")
|
||||
}
|
||||
}
|
||||
b64Ctx, ok := metadata[MetaContext]
|
||||
if ok {
|
||||
b, err := base64.StdEncoding.DecodeString(b64Ctx)
|
||||
if err != nil {
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal KMS context is not base64-encoded")
|
||||
}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(b, ctx); err != nil {
|
||||
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed KMS context is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
copy(sealedKey.IV[:], iv)
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
return keyID, kmsKey, sealedKey, nil
|
||||
return keyID, kmsKey, sealedKey, ctx, nil
|
||||
}
|
||||
|
||||
@@ -62,12 +62,6 @@ func (sses3) IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[MetaSealedKeyS3]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[MetaKeyID]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[MetaDataEncryptionKey]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -223,7 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
@@ -258,7 +261,10 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
@@ -282,29 +288,3 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
copy(key[:], plainKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, Errorf("crypto: client error %w", err)
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = []byte(ciphertext.(string))
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -42,9 +43,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
dataCrawlSleepPerFolder = time.Millisecond // Time to wait between folders.
|
||||
dataCrawlStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
@@ -55,27 +56,28 @@ var (
|
||||
globalHealConfig heal.Config
|
||||
globalHealConfigMu sync.Mutex
|
||||
|
||||
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
// Sleeper values are updated when config is loaded.
|
||||
crawlerSleeper = newDynamicSleeper(10, 10*time.Second)
|
||||
scannerSleeper = newDynamicSleeper(10, 10*time.Second)
|
||||
)
|
||||
|
||||
// initDataCrawler will start the crawler in the background.
|
||||
func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
go runDataCrawler(ctx, objAPI)
|
||||
// initDataScanner will start the scanner in the background.
|
||||
func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
go runDataScanner(ctx, objAPI)
|
||||
}
|
||||
|
||||
// runDataCrawler will start a data crawler.
|
||||
// runDataScanner will start a data scanner.
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one crawler running per cluster.
|
||||
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 crawler is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataCrawler.lock")
|
||||
// There should only ever be one scanner running per cluster.
|
||||
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
var err error
|
||||
// Make sure only 1 scanner is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
err := locker.GetLock(ctx, dataCrawlerLeaderLockTimeout)
|
||||
ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataCrawlStartDelay)))
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataScannerStartDelay)))
|
||||
continue
|
||||
}
|
||||
break
|
||||
@@ -84,31 +86,34 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
// Load current bloom cycle
|
||||
nextBloomCycle := intDataUpdateTracker.current() + 1
|
||||
var buf bytes.Buffer
|
||||
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageBloomName, 0, -1, &buf, "", ObjectOptions{})
|
||||
|
||||
br, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageBloomName, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
} else {
|
||||
if buf.Len() == 8 {
|
||||
nextBloomCycle = binary.LittleEndian.Uint64(buf.Bytes())
|
||||
if br.ObjInfo.Size == 8 {
|
||||
if err = binary.Read(br, binary.LittleEndian, &nextBloomCycle); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
br.Close()
|
||||
}
|
||||
|
||||
crawlTimer := time.NewTimer(dataCrawlStartDelay)
|
||||
defer crawlTimer.Stop()
|
||||
scannerTimer := time.NewTimer(dataScannerStartDelay)
|
||||
defer scannerTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-crawlTimer.C:
|
||||
case <-scannerTimer.C:
|
||||
// Reset the timer for next cycle.
|
||||
crawlTimer.Reset(dataCrawlStartDelay)
|
||||
scannerTimer.Reset(dataScannerStartDelay)
|
||||
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln("starting crawler cycle")
|
||||
console.Debugln("starting scanner cycle")
|
||||
}
|
||||
|
||||
// Wait before starting next cycle and wait on startup.
|
||||
@@ -116,7 +121,7 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
|
||||
logger.LogIf(ctx, err)
|
||||
err = objAPI.CrawlAndGetDataUsage(ctx, bf, results)
|
||||
err = objAPI.NSScanner(ctx, bf, results)
|
||||
close(results)
|
||||
logger.LogIf(ctx, err)
|
||||
if err == nil {
|
||||
@@ -124,13 +129,13 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
nextBloomCycle++
|
||||
var tmp [8]byte
|
||||
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r, nil, nil), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r), ObjectOptions{})
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -152,27 +157,27 @@ type folderScanner struct {
|
||||
newCache dataUsageCache
|
||||
withFilter *bloomFilter
|
||||
|
||||
dataUsageCrawlDebug bool
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
dataUsageScannerDebug bool
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
|
||||
newFolders []cachedFolder
|
||||
existingFolders []cachedFolder
|
||||
disks []StorageAPI
|
||||
}
|
||||
|
||||
// crawlDataFolder will crawl the basepath+cache.Info.Name and return an updated cache.
|
||||
// scanDataFolder will scanner the basepath+cache.Info.Name and return an updated cache.
|
||||
// The returned cache will always be valid, but may not be updated from the existing.
|
||||
// Before each operation sleepDuration is called which can be used to temporarily halt the crawler.
|
||||
// Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
|
||||
// If the supplied context is canceled the function will return at the first chance.
|
||||
func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
|
||||
func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
|
||||
t := UTCNow()
|
||||
|
||||
logPrefix := color.Green("data-usage: ")
|
||||
logSuffix := color.Blue("- %v + %v", basePath, cache.Info.Name)
|
||||
if intDataUpdateTracker.debug {
|
||||
defer func() {
|
||||
console.Debugf(logPrefix+" Crawl time: %v %s\n", time.Since(t), logSuffix)
|
||||
console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
|
||||
}()
|
||||
|
||||
}
|
||||
@@ -185,15 +190,15 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
skipHeal := cache.Info.SkipHealing
|
||||
|
||||
s := folderScanner{
|
||||
root: basePath,
|
||||
getSize: getSize,
|
||||
oldCache: cache,
|
||||
newCache: dataUsageCache{Info: cache.Info},
|
||||
newFolders: nil,
|
||||
existingFolders: nil,
|
||||
dataUsageCrawlDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
root: basePath,
|
||||
getSize: getSize,
|
||||
oldCache: cache,
|
||||
newCache: dataUsageCache{Info: cache.Info},
|
||||
newFolders: nil,
|
||||
existingFolders: nil,
|
||||
dataUsageScannerDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
}
|
||||
|
||||
// Add disks for set healing.
|
||||
@@ -223,21 +228,21 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
s.withFilter = nil
|
||||
}
|
||||
}
|
||||
if s.dataUsageCrawlDebug {
|
||||
console.Debugf(logPrefix+"Start crawling. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix)
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Start scanning. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix)
|
||||
}
|
||||
|
||||
done := ctx.Done()
|
||||
var flattenLevels = 2
|
||||
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix)
|
||||
}
|
||||
|
||||
// Always scan flattenLevels deep. Cache root is level 0.
|
||||
todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
|
||||
for i := 0; i < flattenLevels; i++ {
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Level %v, scanning %v directories. %s\n", i, len(todo), logSuffix)
|
||||
}
|
||||
select {
|
||||
@@ -253,7 +258,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
}
|
||||
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"New folders: %v %s\n", s.newFolders, logSuffix)
|
||||
}
|
||||
|
||||
@@ -282,7 +287,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
}
|
||||
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Existing folders: %v %s\n", len(s.existingFolders), logSuffix)
|
||||
}
|
||||
|
||||
@@ -309,13 +314,13 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
// If folder isn't in filter, skip it completely.
|
||||
if !s.withFilter.containsDir(folder.name) {
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Skipping non-updated folder: %v %s\n", folder, logSuffix)
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
if s.dataUsageCrawlDebug {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Adding non-updated folder to heal check: %v %s\n", folder.name, logSuffix)
|
||||
}
|
||||
// Update probability of including objects
|
||||
@@ -337,8 +342,8 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, *du)
|
||||
}
|
||||
if s.dataUsageCrawlDebug {
|
||||
console.Debugf(logPrefix+"Finished crawl, %v entries %s\n", len(s.newCache.Cache), logSuffix)
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Finished scanner, %v entries %s\n", len(s.newCache.Cache), logSuffix)
|
||||
}
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle++
|
||||
@@ -367,7 +372,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Prefix %q has active rules\n", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
@@ -378,20 +383,20 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
|
||||
if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Skipping non-updated folder: %v\n", folder.name)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name)
|
||||
}
|
||||
// If probability was already crawlerHealFolderInclude, keep it.
|
||||
// If probability was already scannerHealFolderInclude, keep it.
|
||||
folder.objectHealProbDiv = f.healFolderInclude
|
||||
}
|
||||
}
|
||||
}
|
||||
crawlerSleeper.Sleep(ctx, dataCrawlSleepPerFolder)
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
|
||||
cache := dataUsageEntry{}
|
||||
|
||||
@@ -400,22 +405,22 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
entName = path.Clean(path.Join(folder.name, entName))
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
|
||||
if bucket == "" {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
|
||||
}
|
||||
return nil
|
||||
return errDoneForNow
|
||||
}
|
||||
|
||||
if isReservedOrInvalidBucket(bucket, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" invalid bucket: %v, entry: %v\n", bucket, entName)
|
||||
}
|
||||
return nil
|
||||
return errDoneForNow
|
||||
}
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
return ctx.Err()
|
||||
return errDoneForNow
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -443,16 +448,16 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
}
|
||||
|
||||
// Dynamic time delay.
|
||||
wait := crawlerSleeper.Timer(ctx)
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
// Get file size, ignore errors.
|
||||
item := crawlItem{
|
||||
item := scannerItem{
|
||||
Path: path.Join(f.root, entName),
|
||||
Typ: typ,
|
||||
bucket: bucket,
|
||||
prefix: path.Dir(prefix),
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
debug: f.dataUsageScannerDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
|
||||
}
|
||||
@@ -528,17 +533,18 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
healObjectsPrefix := color.Green("healObjects:")
|
||||
for k := range existing {
|
||||
bucket, prefix := path2BucketObject(k)
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
|
||||
}
|
||||
|
||||
// Dynamic time delay.
|
||||
wait := crawlerSleeper.Timer(ctx)
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
resolver.bucket = bucket
|
||||
|
||||
foundObjs := false
|
||||
dangling := false
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
err := listPathRaw(ctx, listPathRawOptions{
|
||||
disks: f.disks,
|
||||
bucket: bucket,
|
||||
@@ -548,13 +554,13 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
minDisks: len(f.disks), // We want full consistency.
|
||||
// Weird, maybe transient error.
|
||||
agreed: func(entry metaCacheEntry) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name)
|
||||
}
|
||||
},
|
||||
// Some disks have data for this.
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" got partial, %d agreed, errs: %v\n", nAgreed, errs)
|
||||
}
|
||||
|
||||
@@ -563,7 +569,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
|
||||
// Sleep and reset.
|
||||
wait()
|
||||
wait = crawlerSleeper.Timer(ctx)
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if !ok {
|
||||
for _, err := range errs {
|
||||
@@ -576,7 +582,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
entry, _ = entries.firstFound()
|
||||
}
|
||||
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" resolved to: %v, dir: %v\n", entry.name, entry.isDir())
|
||||
}
|
||||
|
||||
@@ -600,7 +606,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
for _, ver := range fiv.Versions {
|
||||
// Sleep and reset.
|
||||
wait()
|
||||
wait = crawlerSleeper.Timer(ctx)
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: fiv.Name,
|
||||
@@ -614,31 +620,31 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
},
|
||||
// Too many disks failed.
|
||||
finished: func(errs []error) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" too many errors: %v\n", errs)
|
||||
}
|
||||
cancel()
|
||||
},
|
||||
})
|
||||
|
||||
if f.dataUsageCrawlDebug && err != nil && err != errFileNotFound {
|
||||
if f.dataUsageScannerDebug && err != nil && err != errFileNotFound {
|
||||
console.Debugf(healObjectsPrefix+" checking returned value %v (%T)\n", err, err)
|
||||
}
|
||||
|
||||
// If we found one or more disks with this folder, delete it.
|
||||
if err == nil && dangling {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(healObjectsPrefix+" deleting dangling directory %s\n", prefix)
|
||||
}
|
||||
|
||||
objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
|
||||
Recursive: true,
|
||||
Remove: true,
|
||||
Remove: healDeleteDangling,
|
||||
},
|
||||
func(bucket, object, versionID string) error {
|
||||
// Wait for each heal as per crawler frequency.
|
||||
// Wait for each heal as per scanner frequency.
|
||||
wait()
|
||||
wait = crawlerSleeper.Timer(ctx)
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
@@ -678,7 +684,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
|
||||
addDir = func(entName string, typ os.FileMode) error {
|
||||
select {
|
||||
case <-done:
|
||||
return ctx.Err()
|
||||
return errDoneForNow
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -686,12 +692,12 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
|
||||
dirStack = append(dirStack, entName)
|
||||
err := readDirFn(path.Join(dirStack...), addDir)
|
||||
dirStack = dirStack[:len(dirStack)-1]
|
||||
crawlerSleeper.Sleep(ctx, dataCrawlSleepPerFolder)
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
return err
|
||||
}
|
||||
|
||||
// Dynamic time delay.
|
||||
wait := crawlerSleeper.Timer(ctx)
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
// Get file size, ignore errors.
|
||||
dirStack = append(dirStack, entName)
|
||||
@@ -701,19 +707,19 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, fileName)
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
|
||||
if f.dataUsageCrawlDebug {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(deepScannerLogPrefix+" Prefix %q has active rules\n", prefix)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
}
|
||||
|
||||
item := crawlItem{
|
||||
item := scannerItem{
|
||||
Path: fileName,
|
||||
Typ: typ,
|
||||
bucket: bucket,
|
||||
prefix: path.Dir(prefix),
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageCrawlDebug,
|
||||
debug: f.dataUsageScannerDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
|
||||
}
|
||||
@@ -748,8 +754,8 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
// crawlItem represents each file while walking.
|
||||
type crawlItem struct {
|
||||
// scannerItem represents each file while walking.
|
||||
type scannerItem struct {
|
||||
Path string
|
||||
Typ os.FileMode
|
||||
|
||||
@@ -769,10 +775,10 @@ type sizeSummary struct {
|
||||
replicaSize int64
|
||||
}
|
||||
|
||||
type getSizeFn func(item crawlItem) (sizeSummary, error)
|
||||
type getSizeFn func(item scannerItem) (sizeSummary, error)
|
||||
|
||||
// transformMetaDir will transform a directory to prefix/file.ext
|
||||
func (i *crawlItem) transformMetaDir() {
|
||||
func (i *scannerItem) transformMetaDir() {
|
||||
split := strings.Split(i.prefix, SlashSeparator)
|
||||
if len(split) > 1 {
|
||||
i.prefix = path.Join(split[:len(split)-1]...)
|
||||
@@ -791,42 +797,39 @@ type actionMeta struct {
|
||||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return res.ObjectSize
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
|
||||
size, err := meta.oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
versionID := meta.oi.VersionID
|
||||
@@ -860,7 +863,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
@@ -872,26 +875,46 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
case ObjectNotFound, VersionNotFound:
|
||||
// object not found or version not found return 0
|
||||
return 0
|
||||
return false, 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
}
|
||||
|
||||
var applied bool
|
||||
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
|
||||
if action != lifecycle.NoneAction {
|
||||
applied = applyLifecycleAction(ctx, action, o, obj)
|
||||
}
|
||||
|
||||
if applied {
|
||||
return 0
|
||||
switch action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
return true, size
|
||||
}
|
||||
// For all other lifecycle actions that remove data
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, size
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) int64 {
|
||||
applied, size := i.applyLifecycle(ctx, o, meta)
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied && i.heal {
|
||||
size = i.applyHealing(ctx, o, meta)
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -990,10 +1013,12 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
|
||||
return true
|
||||
}
|
||||
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo) bool {
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, applyOnVersion bool) bool {
|
||||
opts := ObjectOptions{}
|
||||
|
||||
opts.VersionID = obj.VersionID
|
||||
if applyOnVersion {
|
||||
opts.VersionID = obj.VersionID
|
||||
}
|
||||
if opts.VersionID == "" {
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
|
||||
}
|
||||
@@ -1025,20 +1050,20 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
||||
}
|
||||
|
||||
// Apply object, object version, restored object or restored object version action on the given object
|
||||
func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
|
||||
func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject, applyOnVersion bool) bool {
|
||||
if obj.TransitionStatus != "" {
|
||||
return applyExpiryOnTransitionedObject(ctx, objLayer, obj, restoredObject)
|
||||
}
|
||||
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj)
|
||||
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion)
|
||||
}
|
||||
|
||||
// Perform actions (removal of transitioning of objects), return true the action is successfully performed
|
||||
// Perform actions (removal or transitioning of objects), return true the action is successfully performed
|
||||
func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) {
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||
success = applyExpiryRule(ctx, objLayer, obj, false)
|
||||
success = applyExpiryRule(ctx, objLayer, obj, false, action == lifecycle.DeleteVersionAction)
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
success = applyExpiryRule(ctx, objLayer, obj, true)
|
||||
success = applyExpiryRule(ctx, objLayer, obj, true, action == lifecycle.DeleteRestoredVersionAction)
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
success = applyTransitionAction(ctx, action, objLayer, obj)
|
||||
}
|
||||
@@ -1046,12 +1071,12 @@ func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer
|
||||
}
|
||||
|
||||
// objectPath returns the prefix and object name.
|
||||
func (i *crawlItem) objectPath() string {
|
||||
func (i *scannerItem) objectPath() string {
|
||||
return path.Join(i.prefix, i.objectName)
|
||||
}
|
||||
|
||||
// healReplication will heal a scanned item that has failed replication.
|
||||
func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
|
||||
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
// heal delete marker replication failure or versioned delete replication failure
|
||||
if oi.ReplicationStatus == replication.Pending ||
|
||||
@@ -1064,10 +1089,10 @@ func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi Objec
|
||||
switch oi.ReplicationStatus {
|
||||
case replication.Pending:
|
||||
sizeS.pendingSize += oi.Size
|
||||
globalReplicationState.queueReplicaTask(oi)
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
case replication.Failed:
|
||||
sizeS.failedSize += oi.Size
|
||||
globalReplicationState.queueReplicaTask(oi)
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
case replication.Completed, "COMPLETE":
|
||||
sizeS.replicatedSize += oi.Size
|
||||
case replication.Replica:
|
||||
@@ -1076,7 +1101,7 @@ func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi Objec
|
||||
}
|
||||
|
||||
// healReplicationDeletes will heal a scanned deleted item that failed to replicate deletes.
|
||||
func (i *crawlItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
|
||||
func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
|
||||
// handle soft delete and permanent delete failures here.
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
versionID := ""
|
||||
@@ -1086,7 +1111,7 @@ func (i *crawlItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, o
|
||||
} else {
|
||||
versionID = oi.VersionID
|
||||
}
|
||||
globalReplicationState.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
globalReplicationPool.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
DeletedObject: DeletedObject{
|
||||
ObjectName: oi.Name,
|
||||
DeleteMarkerVersionID: dmVersionID,
|
||||
@@ -80,7 +80,7 @@ func newDataUpdateTracker() *dataUpdateTracker {
|
||||
Current: dataUpdateFilter{
|
||||
idx: 1,
|
||||
},
|
||||
debug: env.Get(envDataUsageCrawlDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
|
||||
debug: env.Get(envDataUsageScannerDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
|
||||
input: make(chan string, dataUpdateTrackerQueueSize),
|
||||
save: make(chan struct{}, 1),
|
||||
saveExited: make(chan struct{}),
|
||||
|
||||
@@ -91,7 +91,7 @@ type dataUsageCacheInfo struct {
|
||||
Name string
|
||||
LastUpdate time.Time
|
||||
NextCycle uint32
|
||||
// indicates if the disk is being healed and crawler
|
||||
// indicates if the disk is being healed and scanner
|
||||
// should skip healing the disk
|
||||
SkipHealing bool
|
||||
BloomFilter []byte `msg:"BloomFilter,omitempty"`
|
||||
@@ -485,7 +485,7 @@ type objectIO interface {
|
||||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, noLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound:
|
||||
@@ -513,7 +513,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
}()
|
||||
defer pr.Close()
|
||||
|
||||
r, err := hash.NewReader(pr, -1, "", "", -1, false)
|
||||
r, err := hash.NewReader(pr, -1, "", "", -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -521,7 +521,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(r, nil, nil),
|
||||
NewPutObjReader(r),
|
||||
ObjectOptions{})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -27,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
|
||||
envDataUsageScannerDebug = "MINIO_DISK_USAGE_SCANNER_DEBUG"
|
||||
|
||||
dataUsageRoot = SlashSeparator
|
||||
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
|
||||
@@ -46,12 +47,12 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
||||
continue
|
||||
}
|
||||
size := int64(len(dataUsageJSON))
|
||||
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
|
||||
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r), ObjectOptions{})
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -59,20 +60,18 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
var dataUsageInfoJSON bytes.Buffer
|
||||
|
||||
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
|
||||
r, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageObjName, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
|
||||
return DataUsageInfo{}, nil
|
||||
}
|
||||
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err = json.Unmarshal(dataUsageInfoJSON.Bytes(), &dataUsageInfo)
|
||||
if err != nil {
|
||||
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
||||
return DataUsageInfo{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
}
|
||||
createUsageTestFiles(t, base, bucket, files)
|
||||
|
||||
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
|
||||
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
|
||||
if item.Typ&os.ModeDir == 0 {
|
||||
var s os.FileInfo
|
||||
s, err = os.Stat(item.Path)
|
||||
@@ -64,7 +64,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
createUsageTestFiles(t, base, bucket, files)
|
||||
got, err = crawlDataFolder(context.Background(), base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = crawlDataFolder(context.Background(), base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -347,7 +347,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
createUsageTestFiles(t, base, "", files)
|
||||
|
||||
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
|
||||
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
|
||||
if item.Typ&os.ModeDir == 0 {
|
||||
var s os.FileInfo
|
||||
s, err = os.Stat(item.Path)
|
||||
@@ -359,7 +359,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
|
||||
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -469,7 +469,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
},
|
||||
}
|
||||
createUsageTestFiles(t, base, "", files)
|
||||
got, err = crawlDataFolder(context.Background(), base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -552,7 +552,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = crawlDataFolder(context.Background(), base, got, getSize)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -646,7 +646,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
createUsageTestFiles(t, base, bucket, files)
|
||||
|
||||
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
|
||||
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
|
||||
if item.Typ&os.ModeDir == 0 {
|
||||
var s os.FileInfo
|
||||
s, err = os.Stat(item.Path)
|
||||
@@ -658,7 +658,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
want, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
want, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -103,7 +102,6 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
||||
}
|
||||
|
||||
// We set file info only if its valid.
|
||||
o.ModTime = m.Stat.ModTime
|
||||
o.Size = m.Stat.Size
|
||||
o.ETag = extractETag(m.Meta)
|
||||
o.ContentType = m.Meta["content-type"]
|
||||
@@ -122,6 +120,12 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
||||
o.Expires = t.UTC()
|
||||
}
|
||||
}
|
||||
if mtime, ok := m.Meta["last-modified"]; ok {
|
||||
if t, e = time.Parse(http.TimeFormat, mtime); e == nil {
|
||||
o.ModTime = t.UTC()
|
||||
}
|
||||
}
|
||||
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of user-defined metadata
|
||||
o.UserDefined = cleanMetadata(m.Meta)
|
||||
@@ -264,10 +268,6 @@ func (c *diskCache) toClear() uint64 {
|
||||
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark))
|
||||
}
|
||||
|
||||
var (
|
||||
errDoneForNow = errors.New("done for now")
|
||||
)
|
||||
|
||||
func (c *diskCache) purgeWait(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
@@ -377,7 +377,7 @@ func (c *diskCache) purge(ctx context.Context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := readDirFilterFn(c.dir, filterFn); err != nil {
|
||||
if err := readDirFn(c.dir, filterFn); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
@@ -437,7 +437,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
if ctx, err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -506,9 +506,7 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
}
|
||||
// get metadata of part.1 if full file has been cached.
|
||||
partial = true
|
||||
fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile))
|
||||
if err == nil {
|
||||
meta.Stat.ModTime = atime.Get(fi)
|
||||
if _, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)); err == nil {
|
||||
partial = false
|
||||
}
|
||||
return meta, partial, meta.Hits, nil
|
||||
@@ -517,9 +515,11 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
// saves object metadata to disk cache
|
||||
// incHitsOnly is true if metadata update is incrementing only the hit counter
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
var err error
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachedPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -570,7 +570,6 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
|
||||
}
|
||||
}
|
||||
m.Stat.Size = actualSize
|
||||
m.Stat.ModTime = UTCNow()
|
||||
if !incHitsOnly {
|
||||
// reset meta
|
||||
m.Meta = meta
|
||||
@@ -697,7 +696,8 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -911,7 +911,8 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
@@ -975,7 +976,8 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
_, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -1023,7 +1025,7 @@ func (c *diskCache) scanCacheWritebackFailures(ctx context.Context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := readDirFilterFn(c.dir, filterFn); err != nil {
|
||||
if err := readDirFn(c.dir, filterFn); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -173,7 +173,11 @@ func backendDownError(err error) bool {
|
||||
|
||||
// IsCacheable returns if the object should be saved in the cache.
|
||||
func (o ObjectInfo) IsCacheable() bool {
|
||||
return !crypto.IsEncrypted(o.UserDefined) || globalCacheKMS != nil
|
||||
if globalCacheKMS != nil {
|
||||
return true
|
||||
}
|
||||
_, ok := crypto.IsEncrypted(o.UserDefined)
|
||||
return !ok
|
||||
}
|
||||
|
||||
// reads file cached on disk from offset upto length
|
||||
|
||||
@@ -200,6 +200,7 @@ func getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
if !objInfo.Expires.Equal(timeSentinel) {
|
||||
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
|
||||
}
|
||||
metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
@@ -704,14 +705,14 @@ func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
|
||||
if st == CommitComplete || st.String() == "" {
|
||||
return
|
||||
}
|
||||
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var opts ObjectOptions
|
||||
opts.UserDefined = make(map[string]string)
|
||||
opts.UserDefined[xhttp.ContentMD5] = oi.UserDefined["content-md5"]
|
||||
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts)
|
||||
wbCommitStatus := CommitComplete
|
||||
if err != nil {
|
||||
wbCommitStatus = CommitFailed
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Tests ToObjectInfo function.
|
||||
@@ -27,7 +28,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
|
||||
if objInfo.Size != 0 {
|
||||
t.Fatal("Unexpected object info value for Size", objInfo.Size)
|
||||
}
|
||||
if !objInfo.ModTime.Equal(timeSentinel) {
|
||||
if !objInfo.ModTime.Equal(time.Time{}) {
|
||||
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
|
||||
}
|
||||
if objInfo.IsDir {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"bufio"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
@@ -247,56 +247,45 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
|
||||
}
|
||||
|
||||
func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||
switch {
|
||||
default:
|
||||
return nil, errObjectTampered
|
||||
case crypto.S3.IsEncrypted(metadata) && isCacheEncrypted(metadata):
|
||||
if globalCacheKMS == nil {
|
||||
switch kind, _ := crypto.IsEncrypted(metadata); kind {
|
||||
case crypto.S3:
|
||||
var KMS crypto.KMS = GlobalKMS
|
||||
if isCacheEncrypted(metadata) {
|
||||
KMS = globalCacheKMS
|
||||
}
|
||||
if KMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
|
||||
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := globalCacheKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objectKey[:], nil
|
||||
case crypto.S3.IsEncrypted(metadata):
|
||||
case crypto.S3KMS:
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
|
||||
|
||||
objectKey, err := crypto.S3KMS.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objectKey[:], nil
|
||||
case crypto.SSEC.IsEncrypted(metadata):
|
||||
var extKey [32]byte
|
||||
copy(extKey[:], key)
|
||||
case crypto.SSEC:
|
||||
sealedKey, err := crypto.SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
var (
|
||||
objectKey crypto.ObjectKey
|
||||
extKey [32]byte
|
||||
)
|
||||
copy(extKey[:], key)
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objectKey[:], nil
|
||||
default:
|
||||
return nil, errObjectTampered
|
||||
}
|
||||
}
|
||||
|
||||
@@ -516,7 +505,7 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
|
||||
// It returns an error if the object is not encrypted or marked as encrypted
|
||||
// but has an invalid size.
|
||||
func (o *ObjectInfo) DecryptedSize() (int64, error) {
|
||||
if !crypto.IsEncrypted(o.UserDefined) {
|
||||
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
|
||||
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
|
||||
}
|
||||
if !isEncryptedMultipart(*o) {
|
||||
@@ -649,7 +638,7 @@ func tryDecryptETag(key []byte, encryptedETag string, ssec bool) string {
|
||||
// requested range starts, along with the DARE sequence number within
|
||||
// that part. For single part objects, the partStart will be 0.
|
||||
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
|
||||
if !crypto.IsEncrypted(o.UserDefined) {
|
||||
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
|
||||
err = errors.New("Object is not encrypted")
|
||||
return
|
||||
}
|
||||
@@ -796,7 +785,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
|
||||
}
|
||||
}
|
||||
|
||||
encrypted = crypto.IsEncrypted(info.UserDefined)
|
||||
_, encrypted = crypto.IsEncrypted(info.UserDefined)
|
||||
if !encrypted && crypto.SSEC.IsRequested(headers) && r.Header.Get(xhttp.AmzCopySource) == "" {
|
||||
return false, errInvalidEncryptionParameters
|
||||
}
|
||||
@@ -818,7 +807,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
|
||||
return encrypted, err
|
||||
}
|
||||
|
||||
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
|
||||
if _, ok := crypto.IsEncrypted(info.UserDefined); ok && !crypto.IsMultiPart(info.UserDefined) {
|
||||
info.ETag = getDecryptedETag(headers, *info, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ func TestDecryptObjectInfo(t *testing.T) {
|
||||
for i, test := range decryptObjectInfoTests {
|
||||
if encrypted, err := DecryptObjectInfo(&test.info, test.request); err != test.expErr {
|
||||
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
||||
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||
} else if _, enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
||||
} else if !encrypted && enc != encrypted {
|
||||
t.Errorf("Test %d: Decryption thinks object is not encrypted but it is", i)
|
||||
|
||||