Compare commits
131 Commits
RELEASE.20
...
release
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
404d2ebe3f | ||
|
|
46964eb764 | ||
|
|
bfab990c33 | ||
|
|
94018588fe | ||
|
|
8b76ba8d5d | ||
|
|
7eb7f65e48 | ||
|
|
c608c0688a | ||
|
|
41a9d1d778 | ||
|
|
e21e80841e | ||
|
|
98c792bbeb | ||
|
|
f687ba53bc | ||
|
|
e3da59c923 | ||
|
|
781b9b051c | ||
|
|
438becfde8 | ||
|
|
16ef338649 | ||
|
|
3242847ec0 | ||
|
|
cf87303094 | ||
|
|
90d8ec6310 | ||
|
|
b383522743 | ||
|
|
6d42036dd4 | ||
|
|
b4d8bcf644 | ||
|
|
d7f32ad649 | ||
|
|
906d68c356 | ||
|
|
749e9c5771 | ||
|
|
410e84d273 | ||
|
|
75741dbf4a | ||
|
|
fad7b27f15 | ||
|
|
79564656eb | ||
|
|
21cfc4aa49 | ||
|
|
e80239a661 | ||
|
|
6a2ed44095 | ||
|
|
8adfeb0d84 | ||
|
|
d23485e571 | ||
|
|
da70e6ddf6 | ||
|
|
922c7b57f5 | ||
|
|
726d80dbb7 | ||
|
|
23b03dadb8 | ||
|
|
7b3719c17b | ||
|
|
9a6487319a | ||
|
|
94ff624242 | ||
|
|
98ff91b484 | ||
|
|
4d86384dc7 | ||
|
|
27eb4ae3bc | ||
|
|
b5dcaaccb4 | ||
|
|
0843280dc3 | ||
|
|
61a1ea60c2 | ||
|
|
b92a220db1 | ||
|
|
be5910b87e | ||
|
|
51a8619a79 | ||
|
|
d46c3c07a8 | ||
|
|
14d89eaae4 | ||
|
|
32b088a2ff | ||
|
|
eed3b66d98 | ||
|
|
add3cd4e44 | ||
|
|
60b0f2324e | ||
|
|
0eb146e1b2 | ||
|
|
b379ca3bb0 | ||
|
|
e197800f90 | ||
|
|
980311fdfd | ||
|
|
771dea175c | ||
|
|
fa94682e83 | ||
|
|
6160188bf3 | ||
|
|
e5a1a2a974 | ||
|
|
642ba3f2d6 | ||
|
|
4d80de899a | ||
|
|
afbd3e41eb | ||
|
|
5e003549cc | ||
|
|
7fa3e4106b | ||
|
|
75db500e85 | ||
|
|
910097bbbc | ||
|
|
afd346417d | ||
|
|
feafccf007 | ||
|
|
9b54fcdf12 | ||
|
|
f92b7a5621 | ||
|
|
c25e75f0b5 | ||
|
|
3ffe520643 | ||
|
|
952b0f111d | ||
|
|
777344a594 | ||
|
|
9d118b372e | ||
|
|
878bc6c72b | ||
|
|
fdc2f69218 | ||
|
|
0d124095ea | ||
|
|
691035832a | ||
|
|
eac66e67ec | ||
|
|
57f3ed22d4 | ||
|
|
2f29719e6b | ||
|
|
209fe61dcc | ||
|
|
8ecffdb7a7 | ||
|
|
806df164b2 | ||
|
|
4ac9ed4248 | ||
|
|
3d8c512bba | ||
|
|
3ff5f55dcb | ||
|
|
097e5eba9f | ||
|
|
ba6930bb13 | ||
|
|
64662a49ff | ||
|
|
78e867e145 | ||
|
|
9ccc483df6 | ||
|
|
abce040088 | ||
|
|
558762bdf6 | ||
|
|
d971061305 | ||
|
|
509bcc01ad | ||
|
|
7ea95fcec8 | ||
|
|
79b0d056a2 | ||
|
|
ec547c0fa8 | ||
|
|
bcf9825082 | ||
|
|
651487507a | ||
|
|
124816f6a6 | ||
|
|
fa9cf1251b | ||
|
|
97e7a902d0 | ||
|
|
d73d756a80 | ||
|
|
7488c77e7c | ||
|
|
786585009e | ||
|
|
7be7109471 | ||
|
|
464fa08f2e | ||
|
|
c3217bd6eb | ||
|
|
f14cc6c943 | ||
|
|
2c198ae7b6 | ||
|
|
690434514d | ||
|
|
039f59b552 | ||
|
|
c6a120df0e | ||
|
|
cd9e30c0f4 | ||
|
|
f96d4cf7d3 | ||
|
|
879599b0cf | ||
|
|
b1bb3f7016 | ||
|
|
e8d8dfa3ae | ||
|
|
bbd1244a88 | ||
|
|
10bdb78699 | ||
|
|
289b22d911 | ||
|
|
0b9c17443e | ||
|
|
23f7ab40b3 | ||
|
|
e3f8830ab7 |
14
.github/workflows/go.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.16.x]
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
@@ -21,6 +21,14 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'macos-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
make
|
||||
make test-race
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -42,8 +50,6 @@ jobs:
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
|
||||
@@ -17,6 +17,8 @@ linters:
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
- gomodguard
|
||||
- gofmt
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
|
||||
74
CREDITS
@@ -3696,36 +3696,6 @@ Redistribution and use in source and binary forms, with or without modification,
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/elazarl/go-bindata-assetfs
|
||||
https://github.com/elazarl/go-bindata-assetfs
|
||||
----------------------------------------------------------------
|
||||
Copyright (c) 2014, Elazar Leibovich
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/fatih/color
|
||||
@@ -12501,28 +12471,28 @@ SOFTWARE.
|
||||
github.com/klauspost/readahead
|
||||
https://github.com/klauspost/readahead
|
||||
----------------------------------------------------------------
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
================================================================
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2021-02-24T18-44-45Z" \
|
||||
release="RELEASE.2021-02-24T18-44-45Z" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
@@ -28,9 +30,9 @@ RUN \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
|
||||
16
Makefile
@@ -17,38 +17,28 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && go get github.com/quasilyte/go-ruleguard/cmd/ruleguard@v0.2.1)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
verifiers: getdeps lint check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
|
||||
100
README.md
@@ -5,32 +5,32 @@
|
||||
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Docker Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -45,12 +45,12 @@ Run the following command to run the bleeding-edge image of MinIO on a Docker co
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -61,9 +61,9 @@ see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
@@ -82,12 +82,12 @@ brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
@@ -100,12 +100,12 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
@@ -130,18 +130,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
@@ -158,17 +158,17 @@ Use the following command to run a standalone MinIO server on the Windows host.
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# FreeBSD
|
||||
@@ -184,27 +184,27 @@ service minio start
|
||||
|
||||
# Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.15](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
1
browser/.gitignore
vendored
@@ -17,4 +17,3 @@ release
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
production
|
||||
|
||||
@@ -17,24 +17,13 @@ nvm install stable
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
|
||||
```sh
|
||||
go get github.com/go-bindata/go-bindata/go-bindata
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
```
|
||||
|
||||
## Generating Assets
|
||||
|
||||
### Generate ui-assets.go
|
||||
|
||||
```sh
|
||||
npm run release
|
||||
```
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
This generates `production` in the current directory.
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
@@ -24,9 +24,6 @@ jest.mock("jwt-decode")
|
||||
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
GenerateAuth: jest.fn(() => {
|
||||
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
|
||||
}),
|
||||
SetAuth: jest.fn(
|
||||
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
|
||||
if (
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
} from "../constants"
|
||||
import QRCode from "react-qr-code";
|
||||
|
||||
export class ShareObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -89,6 +90,7 @@ export class ShareObjectModal extends React.Component {
|
||||
<ModalHeader>Share Object</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group copy-text">
|
||||
<QRCode value={url} size={128}/>
|
||||
<label>Shareable Link</label>
|
||||
<input
|
||||
type="text"
|
||||
|
||||
@@ -75,6 +75,11 @@
|
||||
border-color: darken(@input-border, 5%);
|
||||
}
|
||||
}
|
||||
|
||||
svg {
|
||||
display: block;
|
||||
margin: 0 auto 5px;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
@@ -150,4 +155,4 @@
|
||||
100% {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
11
browser/assets.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package browser
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed production/*
|
||||
var fs embed.FS
|
||||
|
||||
// GetStaticAssets returns assets
|
||||
func GetStaticAssets() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -14,19 +14,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var moment = require('moment')
|
||||
var async = require('async')
|
||||
var exec = require('child_process').exec
|
||||
var fs = require('fs')
|
||||
|
||||
var isProduction = process.env.NODE_ENV == 'production' ? true : false
|
||||
var assetsFileName = ''
|
||||
var commitId = ''
|
||||
var date = moment.utc()
|
||||
var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
|
||||
var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z'
|
||||
var buildType = 'DEVELOPMENT'
|
||||
if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD
|
||||
|
||||
rmDir = function(dirPath) {
|
||||
try { var files = fs.readdirSync(dirPath); }
|
||||
@@ -53,74 +45,6 @@ async.waterfall([
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (isProduction) {
|
||||
fs.renameSync('production/index_bundle.js',
|
||||
'production/index_bundle-' + releaseTag + '.js')
|
||||
} else {
|
||||
fs.renameSync('dev/index_bundle.js',
|
||||
'dev/index_bundle-' + releaseTag + '.js')
|
||||
}
|
||||
var cmd = 'git log --format="%H" -n1'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (!stdout) throw new Error('commitId is empty')
|
||||
commitId = stdout.replace('\n', '')
|
||||
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
|
||||
assetsFileName = 'ui-assets.go';
|
||||
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
|
||||
if (!isProduction) {
|
||||
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
|
||||
}
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
var cmd = 'gofmt -s -w -l bindata_assetfs.go'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
fs.renameSync('bindata_assetfs.go', assetsFileName)
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' +
|
||||
releaseTag + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"')
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
var contents;
|
||||
if (isProduction) {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_productionIndexHtml/g, '_productionIndexHTML')
|
||||
.replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes')
|
||||
.replace(/productionIndexHtml/g, 'productionIndexHTML')
|
||||
.replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs')
|
||||
.replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes')
|
||||
.replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs')
|
||||
.replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs')
|
||||
.replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes')
|
||||
.replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs');
|
||||
} else {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_devIndexHtml/g, '_devIndexHTML')
|
||||
.replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes')
|
||||
.replace(/devIndexHtml/g, 'devIndexHTML')
|
||||
.replace(/_devIndex_bundleJs/g, '_devIndexBundleJs')
|
||||
.replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes')
|
||||
.replace(/devIndex_bundleJs/g, 'devIndexBundleJs')
|
||||
.replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs')
|
||||
.replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes')
|
||||
.replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs');
|
||||
}
|
||||
contents = contents.replace(/MINIO_UI_VERSION/g, version)
|
||||
contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js')
|
||||
|
||||
fs.writeFileSync(assetsFileName, contents, 'utf8')
|
||||
console.log('UI assets file :', assetsFileName)
|
||||
cb()
|
||||
}
|
||||
], function(err) {
|
||||
if (err) return console.log(err)
|
||||
})
|
||||
|
||||
32008
browser/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"test": "jest",
|
||||
"dev": "NODE_ENV=dev webpack-dev-server --devtool cheap-module-eval-source-map --progress --colors --hot --content-base dev",
|
||||
"build": "NODE_ENV=dev node build.js",
|
||||
"release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js",
|
||||
"release": "NODE_ENV=production node build.js",
|
||||
"format": "esformatter -i 'app/**/*.js'"
|
||||
},
|
||||
"jest": {
|
||||
@@ -84,6 +84,7 @@
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-qr-code": "^1.1.1",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
|
||||
BIN
browser/production/chrome.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
BIN
browser/production/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
browser/production/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
browser/production/favicon-96x96.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
browser/production/firefox.png
Normal file
|
After Width: | Height: | Size: 4.7 KiB |
59
browser/production/index.html
Normal file
@@ -0,0 +1,59 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>MinIO Browser</title>
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/minio/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/minio/favicon-96x96.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/minio/favicon-16x16.png">
|
||||
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
|
||||
<!--[if lt IE 11]>
|
||||
<div class="ie-warning">
|
||||
<div class="iw-inner">
|
||||
<i class="iwi-icon fas fa-exclamation-triangle"></i>
|
||||
|
||||
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<a href="http://www.google.com/chrome/">
|
||||
<img src="chrome.png" alt="">
|
||||
<div>Chrome</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.mozilla.org/en-US/firefox/new/">
|
||||
<img src="firefox.png" alt="">
|
||||
<div>Firefox</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.apple.com/safari/">
|
||||
<img src="safari.png" alt="">
|
||||
<div>Safari</div>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="iwi-skip">Skip & Continue</div>
|
||||
</div>
|
||||
</div>
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
67
browser/production/index_bundle.js
Normal file
98
browser/production/loader.css
Normal file
@@ -0,0 +1,98 @@
|
||||
.page-load {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background: #002a37;
|
||||
z-index: 100;
|
||||
transition: opacity 200ms;
|
||||
-webkit-transition: opacity 200ms;
|
||||
}
|
||||
|
||||
.pl-0{
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.pl-1 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.pl-inner {
|
||||
position: absolute;
|
||||
width: 100px;
|
||||
height: 100px;
|
||||
left: 50%;
|
||||
margin-left: -50px;
|
||||
top: 50%;
|
||||
margin-top: -50px;
|
||||
text-align: center;
|
||||
-webkit-animation: fade-in 500ms;
|
||||
animation: fade-in 500ms;
|
||||
-webkit-animation-fill-mode: both;
|
||||
animation-fill-mode: both;
|
||||
animation-delay: 350ms;
|
||||
-webkit-animation-delay: 350ms;
|
||||
-webkit-backface-visibility: visible;
|
||||
backface-visibility: visible;
|
||||
}
|
||||
|
||||
.pl-inner:before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
left: 0;
|
||||
top: 0;
|
||||
display: block;
|
||||
-webkit-animation: spin 1000ms infinite linear;
|
||||
animation: spin 1000ms infinite linear;
|
||||
border: 1px solid rgba(255, 255, 255, 0.2);;
|
||||
border-left-color: #fff;
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
.pl-inner > img {
|
||||
width: 30px;
|
||||
margin-top: 21px;
|
||||
}
|
||||
|
||||
@-webkit-keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@-webkit-keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
12
browser/production/logo.svg
Normal file
@@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="93px" height="187px" viewBox="0 0 93 187" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
|
||||
<title>logo</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs></defs>
|
||||
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="logo" transform="translate(0.187500, -0.683594)" fill="#FFFFFF" fill-rule="nonzero">
|
||||
<path d="M91.49,46.551 C86.7827023,38.7699609 82.062696,30.9966172 77.33,23.231 C74.87,19.231 72.33,15.231 69.88,11.231 C69.57,10.731 69.18,10.291 68.88,9.831 C64.35,2.931 55.44,-1.679 46.73,2.701 C42.9729806,4.51194908 40.0995718,7.75449451 38.7536428,11.7020516 C37.4077139,15.6496086 37.701799,19.9721186 39.57,23.701 C41.08,26.641 43.57,29.121 45.91,31.581 C53.03,39.141 60.38,46.491 67.45,54.111 C72.4175495,59.4492221 74.4526451,66.8835066 72.8965704,74.0075359 C71.3404956,81.1315653 66.390952,87.0402215 59.65,89.821 C59.4938176,89.83842 59.3361824,89.83842 59.18,89.821 L59.18,54.591 C46.6388051,61.0478363 35.3944735,69.759905 26.01,80.291 C11.32,96.671 2.64,117.141 0.01,132.071 L23.96,119.821 C31.96,115.771 39.86,111.821 48.14,107.581 L48.14,175.921 L59.14,187.131 L59.14,101.831 C59.14,101.831 59.39,101.711 60.22,101.261 C63.5480598,99.6738911 66.7772674,97.8873078 69.89,95.911 C77.7130888,90.4306687 82.7479457,81.8029342 83.6709542,72.295947 C84.5939627,62.7889599 81.3127806,53.3538429 74.69,46.471 C66.49,37.891 58.24,29.351 50.05,20.761 C47.67,18.261 47.72,15.101 50.05,12.881 C52.38,10.661 55.56,10.881 57.96,13.331 L61.38,16.781 C64.1,19.681 66.79,22.611 69.53,25.481 C76.4547149,32.7389629 83.3947303,39.9823123 90.35,47.211 C90.7,47.571 91.12,47.871 91.5,48.211 L91.93,47.951 C91.8351945,47.4695902 91.6876376,47.0000911 91.49,46.551 Z M48.11,94.931 C47.9883217,95.5022568 47.6230065,95.9917791 47.11,96.271 C42.72,98.601 38.29,100.871 33.87,103.141 L17.76,111.401 C24.771203,96.7435071 35.1132853,83.9289138 47.96,73.981 C48.08,74.221 48.16,74.301 48.16,74.381 C48.15,81.231 48.17,88.081 48.11,94.931 Z" id="Shape"></path>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.2 KiB |
BIN
browser/production/safari.png
Normal file
|
After Width: | Height: | Size: 4.9 KiB |
@@ -21,7 +21,7 @@ _init() {
|
||||
|
||||
## Minimum required versions for build dependencies
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.13"
|
||||
GO_VERSION="1.16"
|
||||
OSX_VERSION="10.8"
|
||||
KNAME=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
||||
@@ -28,6 +28,8 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -180,7 +179,7 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -244,7 +243,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -172,7 +172,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -157,6 +158,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -397,6 +399,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
@@ -407,6 +410,19 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true, // check if changing password is explicitly denied.
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -665,6 +681,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -677,6 +694,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -725,7 +743,19 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should not happen!")
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -263,6 +264,7 @@ type ServerHTTPStats struct {
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
}
|
||||
|
||||
// ServerInfoData holds storage, connections and other
|
||||
@@ -297,7 +299,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx, nil)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
@@ -861,16 +863,14 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// getAggregatedBackgroundHealState returns the heal state of disks.
|
||||
// If no ObjectLayer is provided no set status is returned.
|
||||
func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, localHealState)
|
||||
bgHealStates, ok := getBackgroundHealStatus(ctx, o)
|
||||
if !ok {
|
||||
return bgHealStates, errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
@@ -885,33 +885,10 @@ func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState,
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
bgHealStates.Merge(peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
HealDisks: bgHealStates[0].HealDisks,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatedHealStateResult, nil
|
||||
return bgHealStates, nil
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -930,7 +907,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1348,8 +1325,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
ctx, err = nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
if err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
@@ -1492,30 +1471,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
reportCh := make(chan bandwidth.Report)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
@@ -1551,13 +1533,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
if err != nil {
|
||||
ldap.Status = "offline"
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
ldap.Status = "Not Configured"
|
||||
} else {
|
||||
// Close ldap connection to avoid leaks.
|
||||
ldapConn.Close()
|
||||
ldap.Status = "online"
|
||||
ldap.Status = string(madmin.ItemOnline)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1566,12 +1548,14 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
local := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
servers = append(servers, local)
|
||||
|
||||
assignPoolNumbers(servers)
|
||||
|
||||
var backend interface{}
|
||||
mode := madmin.ObjectLayerInitializing
|
||||
mode := madmin.ItemInitializing
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
@@ -1579,7 +1563,8 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI != nil {
|
||||
mode = madmin.ObjectLayerOnline
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil {
|
||||
@@ -1594,7 +1579,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
// Fetching the backend information
|
||||
backendInfo := objectAPI.BackendInfo()
|
||||
if backendInfo.Type == BackendType(madmin.Erasure) {
|
||||
if backendInfo.Type == madmin.Erasure {
|
||||
// Calculate the number of online/offline disks of all nodes
|
||||
var allDisks []madmin.Disk
|
||||
for _, s := range servers {
|
||||
@@ -1626,7 +1611,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
infoMsg := madmin.InfoMessage{
|
||||
Mode: mode,
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
@@ -1651,6 +1636,22 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
for i := range servers {
|
||||
for idx, ge := range globalEndpoints {
|
||||
for _, endpoint := range ge.Endpoints {
|
||||
if servers[i].Endpoint == endpoint.Host {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
} else if host, err := xnet.ParseHost(servers[i].Endpoint); err == nil {
|
||||
if host.Name == endpoint.Hostname() {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
@@ -1660,9 +1661,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1674,9 +1675,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1709,9 +1710,9 @@ func fetchKMSStatus() madmin.KMS {
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
kmsStat.Status = "offline"
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
} else {
|
||||
kmsStat.Status = "online"
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
@@ -1719,7 +1720,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
if err != nil {
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
kmsStat.Encrypt = "Ok"
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
@@ -1730,7 +1731,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
kmsStat.Decrypt = "Ok"
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
}
|
||||
return kmsStat
|
||||
@@ -1746,11 +1747,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
@@ -1762,11 +1763,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,8 +89,9 @@ type allHealState struct {
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence // Indexed by endpoint
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
healStatus map[string]healingTracker // Indexed by disk ID
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
@@ -99,6 +99,7 @@ func newHealState(cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
healStatus: make(map[string]healingTracker),
|
||||
}
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -113,7 +114,56 @@ func (ahs *allHealState) healDriveCount() int {
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
for id, disk := range ahs.healStatus {
|
||||
for _, ep := range healLocalDisks {
|
||||
if disk.Endpoint == ep.String() {
|
||||
delete(ahs.healStatus, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateHealStatus will update the heal status.
|
||||
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.healStatus[tracker.ID] = *tracker
|
||||
}
|
||||
|
||||
// Sort by zone, set and disk index
|
||||
func sortDisks(disks []madmin.Disk) {
|
||||
sort.Slice(disks, func(i, j int) bool {
|
||||
a, b := &disks[i], &disks[j]
|
||||
if a.PoolIndex != b.PoolIndex {
|
||||
return a.PoolIndex < b.PoolIndex
|
||||
}
|
||||
if a.SetIndex != b.SetIndex {
|
||||
return a.SetIndex < b.SetIndex
|
||||
}
|
||||
return a.DiskIndex < b.DiskIndex
|
||||
})
|
||||
}
|
||||
|
||||
// getLocalHealingDisks returns local healing disks indexed by endpoint.
|
||||
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
|
||||
for _, v := range ahs.healStatus {
|
||||
dst[v.Endpoint] = v.toHealingDisk()
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
@@ -124,15 +174,6 @@ func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
@@ -841,11 +882,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
|
||||
@@ -42,16 +42,16 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
network[nodeName] = "online"
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
}
|
||||
@@ -59,35 +59,22 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
|
||||
return madmin.ServerProperties{
|
||||
State: "ok",
|
||||
props := madmin.ServerProperties{
|
||||
State: string(madmin.ItemInitializing),
|
||||
Endpoint: addr,
|
||||
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: storageInfo.Disks,
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalDisks(endpointServerPools EndpointServerPools) []madmin.Disk {
|
||||
var localEndpoints Endpoints
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
return storageInfo.Disks
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
// only need Disks information in server mode.
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
}
|
||||
|
||||
return props
|
||||
}
|
||||
|
||||
@@ -179,22 +179,25 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
SuccessorModTime: objInfo.SuccessorModTime,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
if opts.VersionID == "" {
|
||||
if ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
SuccessorModTime: objInfo.SuccessorModTime,
|
||||
}); !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
|
||||
@@ -36,8 +36,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
@@ -56,8 +56,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("key-marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
@@ -86,8 +86,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
startAfter = values.Get("start-after")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
startAfter = trimLeadingSlash(values.Get("start-after"))
|
||||
delimiter = values.Get("delimiter")
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
@@ -117,8 +117,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
keyMarker = trimLeadingSlash(values.Get("key-marker"))
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
|
||||
@@ -48,6 +48,12 @@ type LocationResponse struct {
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
|
||||
type PolicyStatus struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
|
||||
IsPublic string
|
||||
}
|
||||
|
||||
// ListVersionsResponse - format for list bucket versions response.
|
||||
type ListVersionsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
@@ -410,9 +416,11 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
@@ -429,10 +437,12 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
@@ -485,10 +495,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
if object.Name == "" {
|
||||
@@ -532,12 +544,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
if fetchOwner {
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
|
||||
@@ -185,6 +185,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
@@ -262,9 +266,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
// GetBucketPolicyStatus
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
@@ -320,9 +324,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/etag"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
@@ -161,6 +162,7 @@ func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolic
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -185,12 +187,12 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
@@ -237,7 +239,7 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
@@ -258,7 +260,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
@@ -271,7 +273,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
@@ -281,14 +283,13 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -298,18 +299,18 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -319,7 +320,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
return cred, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -328,7 +329,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return accessKey, owner, s3Error
|
||||
return cred, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -349,7 +350,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -364,15 +365,16 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -381,7 +383,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -389,6 +391,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -397,11 +400,11 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -430,19 +433,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
return errCode
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
|
||||
// Extract 'Content-Md5' if present.
|
||||
contentMD5, err = checkValidMD5(r.Header)
|
||||
clientETag, err := etag.FromContentMD5(r.Header)
|
||||
if err != nil {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
var contentSHA256 []byte
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
@@ -459,7 +457,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -553,6 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -562,6 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -585,6 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
@@ -595,6 +596,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -650,6 +652,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
@@ -663,6 +666,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
|
||||
@@ -17,16 +17,23 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,10 +42,200 @@ const (
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
// healingTracker is used to persist healing information during a heal.
|
||||
type healingTracker struct {
|
||||
disk StorageAPI `msg:"-"`
|
||||
|
||||
ID string
|
||||
PoolIndex int
|
||||
SetIndex int
|
||||
DiskIndex int
|
||||
Path string
|
||||
Endpoint string
|
||||
Started time.Time
|
||||
LastUpdate time.Time
|
||||
ObjectsHealed uint64
|
||||
ObjectsFailed uint64
|
||||
BytesDone uint64
|
||||
BytesFailed uint64
|
||||
|
||||
// Last object scanned.
|
||||
Bucket string `json:"-"`
|
||||
Object string `json:"-"`
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeObjectsHealed uint64 `json:"-"`
|
||||
ResumeObjectsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
|
||||
// Filled during heal.
|
||||
HealedBuckets []string
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
|
||||
// loadHealingTracker will load the healing tracker from the supplied disk.
|
||||
// The disk ID will be validated against the loaded one.
|
||||
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
|
||||
if disk == nil {
|
||||
return nil, errors.New("loadHealingTracker: nil disk given")
|
||||
}
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := disk.ReadAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var h healingTracker
|
||||
_, err = h.UnmarshalMsg(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.ID != diskID && h.ID != "" {
|
||||
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
|
||||
}
|
||||
h.disk = disk
|
||||
h.ID = diskID
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
// newHealingTracker will create a new healing tracker for the disk.
|
||||
func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
diskID, _ := disk.GetDiskID()
|
||||
h := healingTracker{
|
||||
disk: disk,
|
||||
ID: diskID,
|
||||
Path: disk.String(),
|
||||
Endpoint: disk.Endpoint().String(),
|
||||
Started: time.Now().UTC(),
|
||||
}
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
|
||||
return &h
|
||||
}
|
||||
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
|
||||
}
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
|
||||
}
|
||||
return h.save(ctx)
|
||||
}
|
||||
|
||||
// save will unconditionally save the tracker and will be created if not existing.
|
||||
func (h *healingTracker) save(ctx context.Context) error {
|
||||
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
// Attempt to get location.
|
||||
if api := newObjectLayerFn(); api != nil {
|
||||
if ep, ok := api.(*erasureServerPools); ok {
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
h.LastUpdate = time.Now().UTC()
|
||||
htrackerBytes, err := h.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalBackgroundHealState.updateHealStatus(h)
|
||||
return h.disk.WriteAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
htrackerBytes)
|
||||
}
|
||||
|
||||
// delete the tracker on disk.
|
||||
func (h *healingTracker) delete(ctx context.Context) error {
|
||||
return h.disk.Delete(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
false)
|
||||
}
|
||||
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
func (h *healingTracker) resume() {
|
||||
h.ObjectsHealed = h.ResumeObjectsHealed
|
||||
h.ObjectsFailed = h.ResumeObjectsFailed
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
// Adds the bucket to the list of healed buckets and updates resume numbers.
|
||||
func (h *healingTracker) bucketDone(bucket string) {
|
||||
h.ResumeObjectsHealed = h.ObjectsHealed
|
||||
h.ResumeObjectsFailed = h.ObjectsFailed
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
// Delete...
|
||||
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
|
||||
// Order is preserved.
|
||||
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
|
||||
s := set.CreateStringSet(h.HealedBuckets...)
|
||||
h.QueuedBuckets = make([]string, 0, len(buckets))
|
||||
for _, b := range buckets {
|
||||
if !s.Contains(b.Name) {
|
||||
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healingTracker) printTo(writer io.Writer) {
|
||||
b, err := json.MarshalIndent(h, "", " ")
|
||||
if err != nil {
|
||||
writer.Write([]byte(err.Error()))
|
||||
}
|
||||
writer.Write(b)
|
||||
}
|
||||
|
||||
// toHealingDisk converts the information to madmin.HealingDisk
|
||||
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
return madmin.HealingDisk{
|
||||
ID: h.ID,
|
||||
Endpoint: h.Endpoint,
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
ObjectsHealed: h.ObjectsHealed,
|
||||
ObjectsFailed: h.ObjectsFailed,
|
||||
BytesDone: h.BytesDone,
|
||||
BytesFailed: h.BytesFailed,
|
||||
Bucket: h.Bucket,
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
@@ -90,7 +287,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -114,7 +311,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
|
||||
defer diskCheckTimer.Stop()
|
||||
wait:
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -125,7 +322,7 @@ wait:
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
@@ -174,55 +371,76 @@ wait:
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
|
||||
if a != b {
|
||||
return a
|
||||
}
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
// TODO(klauspost): This will block until all heals are done,
|
||||
// in the future this should be able to start healing other sets at once.
|
||||
var wg sync.WaitGroup
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
i := i
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
if len(disks) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
|
||||
tracker.setQueuedBuckets(buckets)
|
||||
if err := tracker.save(ctx); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
|
||||
err := z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}(setIndex, disks)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,146 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -42,10 +182,10 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 20
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -54,16 +194,283 @@ func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
// write "PoolIndex"
|
||||
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.PoolIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
// write "SetIndex"
|
||||
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.SetIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
// write "DiskIndex"
|
||||
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.DiskIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
// write "Path"
|
||||
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Path)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
// write "Endpoint"
|
||||
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Endpoint)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
// write "Started"
|
||||
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Started)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
// write "LastUpdate"
|
||||
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.LastUpdate)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
// write "ObjectsHealed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ObjectsFailed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "BytesDone"
|
||||
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
// write "BytesFailed"
|
||||
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
// write "Bucket"
|
||||
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
// write "Object"
|
||||
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Object)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsHealed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsFailed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesFailed"
|
||||
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
err = en.WriteString(z.QueuedBuckets[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "HealedBuckets"
|
||||
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
err = en.WriteString(z.HealedBuckets[za0002])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// map header, size 20
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.PoolIndex)
|
||||
// string "SetIndex"
|
||||
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.SetIndex)
|
||||
// string "DiskIndex"
|
||||
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.DiskIndex)
|
||||
// string "Path"
|
||||
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
o = msgp.AppendString(o, z.Path)
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
// string "Started"
|
||||
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Started)
|
||||
// string "LastUpdate"
|
||||
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
o = msgp.AppendTime(o, z.LastUpdate)
|
||||
// string "ObjectsHealed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsHealed)
|
||||
// string "ObjectsFailed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsFailed)
|
||||
// string "BytesDone"
|
||||
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.BytesDone)
|
||||
// string "BytesFailed"
|
||||
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesFailed)
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Object"
|
||||
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
o = msgp.AppendString(o, z.Object)
|
||||
// string "ResumeObjectsHealed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsHealed)
|
||||
// string "ResumeObjectsFailed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsFailed)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
|
||||
}
|
||||
// string "HealedBuckets"
|
||||
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
|
||||
for za0002 := range z.HealedBuckets {
|
||||
o = msgp.AppendString(o, z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -91,6 +498,146 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -104,7 +651,14 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
s += 14 + msgp.ArrayHeaderSize
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (err *errHashMismatch) Error() string {
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
iow io.WriteCloser
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
@@ -71,9 +71,10 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
|
||||
go func() {
|
||||
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
||||
@@ -81,8 +82,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
|
||||
@@ -96,9 +96,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -25,7 +27,6 @@ import (
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -291,7 +292,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -337,16 +338,17 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
@@ -411,6 +413,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
}
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -489,7 +496,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
@@ -504,9 +511,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
@@ -550,13 +554,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
@@ -612,12 +621,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
if objInfo.DeleteMarker {
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
@@ -797,13 +806,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != path.Clean(resource[1:]) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -846,13 +857,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
defer fileBody.Close()
|
||||
|
||||
formValues.Set("Bucket", bucket)
|
||||
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
}
|
||||
object := formValues.Get("Key")
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
successRedirect := formValues.Get("success_action_redirect")
|
||||
successStatus := formValues.Get("success_action_status")
|
||||
@@ -866,12 +876,51 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Verify policy signature.
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
cred, errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
@@ -880,10 +929,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
|
||||
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
|
||||
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1030,6 +1080,64 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
}
|
||||
|
||||
// GetBucketPolicyStatusHandler - Retrieves the policy status
|
||||
// for an MinIO bucket, indicating whether the bucket is public.
|
||||
func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicyStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyStatusAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if anonymous (non-owner) has access to list objects.
|
||||
readable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
// Check if anonymous (non-owner) has access to upload objects.
|
||||
writable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
encodedSuccessResponse := encodeResponse(PolicyStatus{
|
||||
IsPublic: func() string {
|
||||
// Silly to have special 'boolean' values yes
|
||||
// but complying with silly implementation
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
|
||||
if readable && writable {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
}(),
|
||||
})
|
||||
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// HeadBucketHandler - HEAD Bucket
|
||||
// ----------
|
||||
// This operation is useful to determine if a bucket exists.
|
||||
|
||||
@@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
authType := getRequestAuthType(r)
|
||||
var signatureVersion string
|
||||
switch authType {
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
signatureVersion = signV2Algorithm
|
||||
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
|
||||
signatureVersion = signV4Algorithm
|
||||
}
|
||||
|
||||
var authtype string
|
||||
switch authType {
|
||||
case authTypePresignedV2, authTypePresigned:
|
||||
authtype = "REST-QUERY-STRING"
|
||||
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
|
||||
authtype = "REST-HEADER"
|
||||
case authTypePostPolicy:
|
||||
authtype = "POST"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"signatureversion": {signatureVersion},
|
||||
"authType": {authtype},
|
||||
}
|
||||
|
||||
if lc != "" {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
@@ -174,10 +175,10 @@ func isStandardHeader(matchHeaderKey string) bool {
|
||||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (replicate, sync bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false, sync
|
||||
return false, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
@@ -185,6 +186,7 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: dobj.VersionID,
|
||||
OpType: replication.DeleteReplicationType,
|
||||
}
|
||||
replicate = rcfg.Replicate(opts)
|
||||
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
|
||||
@@ -196,19 +198,19 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
return oi.DeleteMarker, true, sync
|
||||
return true, sync
|
||||
}
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
return oi.DeleteMarker, oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
return oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
if tgt == nil {
|
||||
return oi.DeleteMarker, false, false
|
||||
return false, false
|
||||
}
|
||||
return oi.DeleteMarker, replicate, tgt.replicateSync
|
||||
return replicate, tgt.replicateSync
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
@@ -268,6 +270,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
|
||||
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
|
||||
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
})
|
||||
|
||||
@@ -412,10 +415,11 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: sc,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
@@ -653,7 +657,11 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
Bucket: dest.Bucket,
|
||||
Object: object,
|
||||
VersionID: objInfo.VersionID}
|
||||
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
|
||||
dstOpts := miniogo.PutObjectOptions{
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
}}
|
||||
if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), srcOpts, dstOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
@@ -689,19 +697,25 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
||||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
|
||||
// r takes over closing gr.
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
opts := &bandwidth.MonitorReaderOptions{
|
||||
Bucket: objInfo.Bucket,
|
||||
Object: objInfo.Name,
|
||||
HeaderSize: headerSize,
|
||||
BandwidthBytesPerSec: target.BandwidthLimit / int64(totalNodesCount),
|
||||
ClusterBandwidth: target.BandwidthLimit,
|
||||
}
|
||||
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
defer r.Close()
|
||||
}
|
||||
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
@@ -764,81 +778,102 @@ type DeletedObjectVersionInfo struct {
|
||||
DeletedObject
|
||||
Bucket string
|
||||
}
|
||||
type replicationState struct {
|
||||
// add future metrics here
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case r.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalReplicationState *replicationState
|
||||
globalReplicationPool *ReplicationPool
|
||||
)
|
||||
|
||||
func newReplicationState() *replicationState {
|
||||
rs := &replicationState{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(rs.replicaCh)
|
||||
close(rs.replicaDeleteCh)
|
||||
}()
|
||||
return rs
|
||||
// ReplicationPool describes replication pool
|
||||
type ReplicationPool struct {
|
||||
mu sync.Mutex
|
||||
size int
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
killCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
objLayer ObjectLayer
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
// NewReplicationPool creates a pool of replication workers of specified size
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, sz int) *ReplicationPool {
|
||||
pool := &ReplicationPool{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
ctx: ctx,
|
||||
objLayer: o,
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-r.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(ctx, oi, objectAPI)
|
||||
case doi, ok := <-r.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(ctx, doi, objectAPI)
|
||||
}
|
||||
}
|
||||
<-ctx.Done()
|
||||
close(pool.replicaCh)
|
||||
close(pool.replicaDeleteCh)
|
||||
}()
|
||||
pool.Resize(sz)
|
||||
return pool
|
||||
}
|
||||
|
||||
// AddWorker adds a replication worker to the pool
|
||||
func (p *ReplicationPool) AddWorker() {
|
||||
defer p.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case oi, ok := <-p.replicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(p.ctx, oi, p.objLayer)
|
||||
case doi, ok := <-p.replicaDeleteCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateDelete(p.ctx, doi, p.objLayer)
|
||||
case <-p.killCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Resize replication pool to new size
|
||||
func (p *ReplicationPool) Resize(n int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
for p.size < n {
|
||||
p.size++
|
||||
p.wg.Add(1)
|
||||
go p.AddWorker()
|
||||
}
|
||||
for p.size > n {
|
||||
p.size--
|
||||
go func() { p.killCh <- struct{}{} }()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaTask(oi ObjectInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case p.replicaCh <- oi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case p.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalReplicationState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start replication workers per count set in api config or MINIO_API_REPLICATION_WORKERS.
|
||||
for i := 0; i < globalAPIConfig.getReplicationWorkers(); i++ {
|
||||
globalReplicationState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationWorkers())
|
||||
}
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
@@ -978,7 +1013,7 @@ func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer,
|
||||
if sync {
|
||||
replicateObject(ctx, objInfo, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaTask(objInfo)
|
||||
globalReplicationPool.queueReplicaTask(objInfo)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -986,6 +1021,6 @@ func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo,
|
||||
if sync {
|
||||
replicateDelete(ctx, dv, o)
|
||||
} else {
|
||||
globalReplicationState.queueReplicaDeleteTask(dv)
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
@@ -111,7 +111,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -124,7 +124,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
|
||||
@@ -73,7 +73,6 @@ func init() {
|
||||
},
|
||||
})
|
||||
|
||||
globalReplicationState = newReplicationState()
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", color.New())
|
||||
|
||||
@@ -69,7 +69,7 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -553,7 +553,7 @@ If you need help to migrate smoothly visit: https://min.io/pricing`
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
http.WithTransport(NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)),
|
||||
),
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
|
||||
16
cmd/config/cache/help.go
vendored
@@ -40,19 +40,13 @@ var (
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Exclude,
|
||||
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Description: `exclude cache for following patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: After,
|
||||
Description: `minimum accesses before caching an object`,
|
||||
Description: `minimum number of access before caching an object`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
@@ -80,5 +74,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
@@ -113,3 +114,12 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// EnsureCertAndKey checks if both client certificate and key paths are provided
|
||||
func EnsureCertAndKey(ClientCert, ClientKey string) error {
|
||||
if (ClientCert != "" && ClientKey == "") ||
|
||||
(ClientCert == "" && ClientKey != "") {
|
||||
return errors.New("cert and key must be specified as a pair")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,21 +23,23 @@ const (
|
||||
|
||||
// Top level common ENVs
|
||||
const (
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
|
||||
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -180,10 +181,16 @@ func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
|
||||
}
|
||||
|
||||
func (l *Config) lookupBind(conn *ldap.Conn) error {
|
||||
var err error
|
||||
if l.LookupBindPassword == "" {
|
||||
return conn.UnauthenticatedBind(l.LookupBindDN)
|
||||
err = conn.UnauthenticatedBind(l.LookupBindDN)
|
||||
} else {
|
||||
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
|
||||
}
|
||||
return conn.Bind(l.LookupBindDN, l.LookupBindPassword)
|
||||
if ldap.IsErrorWithCode(err, 49) {
|
||||
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// usernameFormatsBind - Iterates over all given username formats and expects
|
||||
@@ -205,6 +212,8 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
|
||||
if errs[i] == nil {
|
||||
bindDistNames = append(bindDistNames, bindDN)
|
||||
successCount++
|
||||
} else if !ldap.IsErrorWithCode(errs[i], 49) {
|
||||
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %v", errs[i])
|
||||
}
|
||||
}
|
||||
if successCount == 0 {
|
||||
@@ -214,7 +223,7 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
}
|
||||
outErr := fmt.Sprintf("All username formats failed with: %s", strings.Join(errStrings, "; "))
|
||||
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
|
||||
return "", errors.New(outErr)
|
||||
}
|
||||
if successCount > 1 {
|
||||
@@ -366,6 +375,38 @@ func (l Config) GetExpiryDuration() time.Duration {
|
||||
return l.stsExpiryDuration
|
||||
}
|
||||
|
||||
func (l Config) testConnection() error {
|
||||
conn, err := l.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating connection to LDAP server: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if l.isUsingLookupBind {
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate some random user credentials for username formats mode test.
|
||||
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
|
||||
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||
rand.Shuffle(len(charset), func(i, j int) {
|
||||
charset[i], charset[j] = charset[j], charset[i]
|
||||
})
|
||||
password := string(charset[:20])
|
||||
_, err = l.usernameFormatsBind(conn, username, password)
|
||||
if err == nil {
|
||||
// We don't expect to successfully guess a credential in this
|
||||
// way.
|
||||
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
|
||||
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("LDAP connection test error: %v", err)
|
||||
}
|
||||
|
||||
// Enabled returns if jwks is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(ServerAddr) != ""
|
||||
@@ -459,6 +500,11 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
return l, errors.New("Either Lookup Bind mode or Username Format mode is required.")
|
||||
}
|
||||
|
||||
// Test connection to LDAP server.
|
||||
if err := l.testConnection(); err != nil {
|
||||
return l, fmt.Errorf("Connection test for LDAP server failed: %v", err)
|
||||
}
|
||||
|
||||
// Group search params configuration
|
||||
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
|
||||
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -410,6 +411,9 @@ func NewKMS(cfg KMSConfig) (kms KMS, err error) {
|
||||
} else if cfg.Vault.Enabled && cfg.Kes.Enabled {
|
||||
return kms, errors.New("Ambiguous KMS configuration: vault configuration and kes configuration are provided at the same time")
|
||||
} else if cfg.Vault.Enabled {
|
||||
if v, ok := os.LookupEnv("MINIO_KMS_VAULT_DEPRECATION"); !ok || v != "off" { // TODO(aead): Remove once Vault support has been removed
|
||||
return kms, errors.New("Hashicorp Vault is deprecated and will be removed Oct. 2021. To temporarily enable Hashicorp Vault support, set MINIO_KMS_VAULT_DEPRECATION=off")
|
||||
}
|
||||
kms, err = NewVault(cfg.Vault)
|
||||
if err != nil {
|
||||
return kms, err
|
||||
|
||||
@@ -81,8 +81,6 @@ var (
|
||||
|
||||
errInvalidInternalIV = Errorf("The internal encryption IV is malformed")
|
||||
errInvalidInternalSealAlgorithm = Errorf("The internal seal algorithm is invalid and not supported")
|
||||
|
||||
errMissingUpdatedKey = Errorf("The key update returned no error but also no sealed key")
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -181,7 +181,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, nil, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
|
||||
@@ -203,7 +206,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
@@ -33,74 +32,29 @@ import (
|
||||
// associated with a certain object.
|
||||
type Context map[string]string
|
||||
|
||||
// WriteTo writes the context in a canonical from to w.
|
||||
// It returns the number of bytes and the first error
|
||||
// encounter during writing to w, if any.
|
||||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Sort(sortedKeys)
|
||||
// MarshalText returns a canonical text representation of
|
||||
// the Context.
|
||||
|
||||
escape := func(s string) string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
|
||||
EscapeStringJSON(buf, s)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
nn, err := io.WriteString(w, "{")
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
for i, k := range sortedKeys {
|
||||
s := fmt.Sprintf("\"%s\":\"%s\",", escape(k), escape(c[k]))
|
||||
if i == len(sortedKeys)-1 {
|
||||
s = s[:len(s)-1] // remove last ','
|
||||
}
|
||||
|
||||
nn, err = io.WriteString(w, s)
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
}
|
||||
nn, err = io.WriteString(w, "}")
|
||||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// AppendTo appends the context in a canonical from to dst.
|
||||
//
|
||||
// AppendTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
// Sort order is based on the un-escaped keys.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
// MarshalText sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object. The sort order
|
||||
// is based on the un-escaped keys.
|
||||
func (c Context) MarshalText() ([]byte, error) {
|
||||
if len(c) == 0 {
|
||||
return append(dst, '{', '}')
|
||||
return []byte{'{', '}'}, nil
|
||||
}
|
||||
|
||||
// out should not escape.
|
||||
out := bytes.NewBuffer(dst)
|
||||
|
||||
// No need to copy+sort
|
||||
// Pre-allocate a buffer - 128 bytes is an arbitrary
|
||||
// heuristic value that seems like a good starting size.
|
||||
var b = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
if len(c) == 1 {
|
||||
for k, v := range c {
|
||||
out.WriteString(`{"`)
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, v)
|
||||
out.WriteString(`"}`)
|
||||
b.WriteString(`{"`)
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, v)
|
||||
b.WriteString(`"}`)
|
||||
}
|
||||
return out.Bytes()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
sortedKeys := make([]string, 0, len(c))
|
||||
@@ -109,19 +63,19 @@ func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
out.WriteByte('{')
|
||||
b.WriteByte('{')
|
||||
for i, k := range sortedKeys {
|
||||
out.WriteByte('"')
|
||||
EscapeStringJSON(out, k)
|
||||
out.WriteString(`":"`)
|
||||
EscapeStringJSON(out, c[k])
|
||||
out.WriteByte('"')
|
||||
b.WriteByte('"')
|
||||
EscapeStringJSON(b, k)
|
||||
b.WriteString(`":"`)
|
||||
EscapeStringJSON(b, c[k])
|
||||
b.WriteByte('"')
|
||||
if i < len(sortedKeys)-1 {
|
||||
out.WriteByte(',')
|
||||
b.WriteByte(',')
|
||||
}
|
||||
}
|
||||
out.WriteByte('}')
|
||||
return out.Bytes()
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
@@ -225,9 +179,11 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
}
|
||||
ctxBytes, _ := context.MarshalText()
|
||||
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
mac.Write(context.AppendTo(make([]byte, 0, 128)))
|
||||
mac.Write(ctxBytes)
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -61,7 +60,7 @@ func TestMasterKeyKMS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var contextWriteToTests = []struct {
|
||||
var contextMarshalTextTests = []struct {
|
||||
Context Context
|
||||
ExpectedJSON string
|
||||
}{
|
||||
@@ -76,43 +75,29 @@ var contextWriteToTests = []struct {
|
||||
6: {Context: Context{"a": "<>&"}, ExpectedJSON: `{"a":"\u003c\u003e\u0026"}`},
|
||||
}
|
||||
|
||||
func TestContextWriteTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
var jsonContext strings.Builder
|
||||
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
|
||||
t.Errorf("Test %d: Failed to encode context: %v", i, err)
|
||||
continue
|
||||
func TestContextMarshalText(t *testing.T) {
|
||||
for i, test := range contextMarshalTextTests {
|
||||
text, err := test.Context.MarshalText()
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encode context: %v", i, err)
|
||||
}
|
||||
if s := jsonContext.String(); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
if string(text) != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, string(text), test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextAppendTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
dst := make([]byte, 0, 1024)
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
// Append one more
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContext_AppendTo(b *testing.B) {
|
||||
func BenchmarkContext(b *testing.B) {
|
||||
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%<>&"}}
|
||||
for _, test := range tests {
|
||||
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
|
||||
dst := make([]byte, 0, 1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst = test.AppendTo(dst[:0])
|
||||
_, err := test.MarshalText()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -223,7 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
@@ -258,7 +261,10 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
context, err := ctx.MarshalText()
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
@@ -282,29 +288,3 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
||||
copy(key[:], plainKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, Errorf("crypto: client error %w", err)
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = []byte(ciphertext.(string))
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
||||
@@ -70,11 +70,12 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one scanner running per cluster.
|
||||
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
var err error
|
||||
// Make sure only 1 scanner is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
err := locker.GetLock(ctx, dataScannerLeaderLockTimeout)
|
||||
ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataScannerStartDelay)))
|
||||
continue
|
||||
@@ -543,6 +544,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
foundObjs := false
|
||||
dangling := false
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
err := listPathRaw(ctx, listPathRawOptions{
|
||||
disks: f.disks,
|
||||
bucket: bucket,
|
||||
@@ -795,42 +797,39 @@ type actionMeta struct {
|
||||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return res.ObjectSize
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
|
||||
size, err := meta.oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
versionID := meta.oi.VersionID
|
||||
@@ -864,7 +863,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
@@ -876,26 +875,46 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
case ObjectNotFound, VersionNotFound:
|
||||
// object not found or version not found return 0
|
||||
return 0
|
||||
return false, 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
}
|
||||
|
||||
var applied bool
|
||||
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
|
||||
if action != lifecycle.NoneAction {
|
||||
applied = applyLifecycleAction(ctx, action, o, obj)
|
||||
}
|
||||
|
||||
if applied {
|
||||
return 0
|
||||
switch action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
return true, size
|
||||
}
|
||||
// For all other lifecycle actions that remove data
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, size
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) int64 {
|
||||
applied, size := i.applyLifecycle(ctx, o, meta)
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied && i.heal {
|
||||
size = i.applyHealing(ctx, o, meta)
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -1038,7 +1057,7 @@ func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo,
|
||||
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion)
|
||||
}
|
||||
|
||||
// Perform actions (removal of transitioning of objects), return true the action is successfully performed
|
||||
// Perform actions (removal or transitioning of objects), return true the action is successfully performed
|
||||
func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) {
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||
@@ -1070,10 +1089,10 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
|
||||
switch oi.ReplicationStatus {
|
||||
case replication.Pending:
|
||||
sizeS.pendingSize += oi.Size
|
||||
globalReplicationState.queueReplicaTask(oi)
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
case replication.Failed:
|
||||
sizeS.failedSize += oi.Size
|
||||
globalReplicationState.queueReplicaTask(oi)
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
case replication.Completed, "COMPLETE":
|
||||
sizeS.replicatedSize += oi.Size
|
||||
case replication.Replica:
|
||||
@@ -1092,7 +1111,7 @@ func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer,
|
||||
} else {
|
||||
versionID = oi.VersionID
|
||||
}
|
||||
globalReplicationState.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
globalReplicationPool.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
DeletedObject: DeletedObject{
|
||||
ObjectName: oi.Name,
|
||||
DeleteMarkerVersionID: dmVersionID,
|
||||
|
||||
@@ -522,7 +522,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(r),
|
||||
ObjectOptions{NoLock: true})
|
||||
ObjectOptions{})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -437,7 +437,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
if ctx, err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -515,9 +515,11 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
// saves object metadata to disk cache
|
||||
// incHitsOnly is true if metadata update is incrementing only the hit counter
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
var err error
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachedPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -694,7 +696,8 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@@ -908,7 +911,8 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = cLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
@@ -972,7 +976,8 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
_, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
||||
@@ -144,12 +144,12 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs
|
||||
if err == errVolumeNotEmpty {
|
||||
// Attempt to delete bucket again.
|
||||
if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty {
|
||||
_ = cleanupDir(ctx, storageDisks[index], bucket, "")
|
||||
_ = storageDisks[index].Delete(ctx, bucket, "", true)
|
||||
|
||||
_ = storageDisks[index].DeleteVol(ctx, bucket, false)
|
||||
|
||||
// Cleanup all the previously incomplete multiparts.
|
||||
_ = cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
_ = storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,8 +170,7 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
|
||||
if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
err := cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
||||
if err != nil && err != errVolumeNotFound {
|
||||
if err := storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true); err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -23,12 +23,22 @@ import (
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func (er erasureObjects) getLocalDisks() (localDisks []StorageAPI) {
|
||||
disks := er.getDisks()
|
||||
for _, disk := range disks {
|
||||
if disk != nil && disk.IsLocal() {
|
||||
localDisks = append(localDisks, disk)
|
||||
}
|
||||
}
|
||||
return localDisks
|
||||
}
|
||||
|
||||
func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
|
||||
disks := er.getDisks()
|
||||
// Based on the random shuffling return back randomized disks.
|
||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||
if disks[i-1] != nil && disks[i-1].IsLocal() {
|
||||
if !disks[i-1].Healing() && disks[i-1].IsOnline() {
|
||||
if disks[i-1].Healing() == nil && disks[i-1].IsOnline() {
|
||||
newDisks = append(newDisks, disks[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
|
||||
// For the last shard, the shardsize might be less than previous shard sizes.
|
||||
// Hence the following statement ensures that the buffer size is reset to the right size.
|
||||
p.buf[bufIdx] = p.buf[bufIdx][:p.shardSize]
|
||||
_, err := rr.ReadAt(p.buf[bufIdx], p.offset)
|
||||
n, err := rr.ReadAt(p.buf[bufIdx], p.offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
atomic.StoreInt32(&missingPartsHeal, 1)
|
||||
@@ -179,7 +179,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
|
||||
return
|
||||
}
|
||||
newBufLK.Lock()
|
||||
newBuf[bufIdx] = p.buf[bufIdx]
|
||||
newBuf[bufIdx] = p.buf[bufIdx][:n]
|
||||
newBufLK.Unlock()
|
||||
// Since ReadAt returned success, there is no need to trigger another read.
|
||||
readTriggerCh <- false
|
||||
|
||||
@@ -41,46 +41,46 @@ var erasureDecodeTests = []struct {
|
||||
algorithm BitrotAlgorithm
|
||||
shouldFail, shouldFailQuorum bool
|
||||
}{
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},
|
||||
// 4
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV1), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false},
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV2), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false},
|
||||
// 19
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 20
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 21
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 20
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 21
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 9, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 22
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 23
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 24
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 25
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(blockSizeV1) + 1, offset: 0, length: int64(blockSizeV1) + 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 26
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 12, length: int64(blockSizeV1) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 27
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 1023, length: int64(blockSizeV1) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 28
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 11, length: int64(blockSizeV1) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 29
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 512, length: int64(blockSizeV1) + 8*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 30
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: int64(blockSizeV1), length: int64(blockSizeV1) - 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 31
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(oneMiByte), offset: -1, length: 3, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 32
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(oneMiByte), offset: 1024, length: -1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 33
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: int64(blockSizeV1), offset: 0, length: int64(blockSizeV1), algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 34
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 12, length: int64(blockSizeV1) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 35
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 3, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 1023, length: int64(blockSizeV1) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 36
|
||||
{dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 11, length: int64(blockSizeV1) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 23
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 24
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 25
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(blockSizeV2) + 1, offset: 0, length: int64(blockSizeV2) + 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 26
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 12, length: int64(blockSizeV2) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 27
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 1023, length: int64(blockSizeV2) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 28
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 11, length: int64(blockSizeV2) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 29
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 512, length: int64(blockSizeV2) + 8*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 30
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: int64(blockSizeV2), length: int64(blockSizeV2) - 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 31
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(oneMiByte), offset: -1, length: 3, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 32
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(oneMiByte), offset: 1024, length: -1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 33
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(blockSizeV2), offset: 0, length: int64(blockSizeV2), algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 34
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 12, length: int64(blockSizeV2) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 35
|
||||
{dataBlocks: 4, onDisks: 6, offDisks: 3, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 1023, length: int64(blockSizeV2) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 36
|
||||
{dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 11, length: int64(blockSizeV2) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37
|
||||
}
|
||||
|
||||
func TestErasureDecode(t *testing.T) {
|
||||
@@ -108,7 +108,8 @@ func TestErasureDecode(t *testing.T) {
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -234,7 +235,8 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
// 10000 iterations with random offsets and lengths.
|
||||
@@ -288,13 +290,13 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
// Benchmarks
|
||||
|
||||
func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
|
||||
setup, err := newErasureTestSetup(data, parity, blockSizeV1)
|
||||
setup, err := newErasureTestSetup(data, parity, blockSizeV2)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create test setup: %v", err)
|
||||
}
|
||||
defer setup.Remove()
|
||||
disks := setup.disks
|
||||
erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1)
|
||||
erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create ErasureStorage: %v", err)
|
||||
}
|
||||
@@ -304,11 +306,12 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
content := make([]byte, size)
|
||||
buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
|
||||
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
|
||||
_, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
|
||||
@@ -62,26 +62,26 @@ var erasureEncodeTests = []struct {
|
||||
algorithm BitrotAlgorithm
|
||||
shouldFail, shouldFailQuorum bool
|
||||
}{
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 1, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 4
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 5, blocksize: int64(blockSizeV1), data: 0, offset: 0, shouldFail: false, algorithm: SHA256, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV1), data: 0, offset: 0, shouldFail: false, algorithm: DefaultBitrotAlgorithm, shouldFailQuorum: false}, // 6
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: true}, // 7
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: SHA256, shouldFail: false, shouldFailQuorum: true}, // 8
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 9
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 10
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 1, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 4
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 5, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: SHA256, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: DefaultBitrotAlgorithm, shouldFailQuorum: false}, // 6
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: true}, // 7
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: SHA256, shouldFail: false, shouldFailQuorum: true}, // 8
|
||||
{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 9
|
||||
{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 10
|
||||
{dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 3, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
|
||||
{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(oneMiByte / 2), data: oneMiByte, offset: oneMiByte/2 + 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
|
||||
{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(oneMiByte - 1), data: oneMiByte, offset: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 14
|
||||
{dataBlocks: 8, onDisks: 12, offDisks: 2, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
|
||||
{dataBlocks: 8, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 16
|
||||
{dataBlocks: 10, onDisks: 14, offDisks: 0, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 17, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 8, onDisks: 12, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
|
||||
{dataBlocks: 8, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 16
|
||||
{dataBlocks: 10, onDisks: 14, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 17, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 2, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19
|
||||
{dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19
|
||||
}
|
||||
|
||||
func TestErasureEncode(t *testing.T) {
|
||||
@@ -108,7 +108,7 @@ func TestErasureEncode(t *testing.T) {
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -132,14 +132,14 @@ func TestErasureEncode(t *testing.T) {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
for j := range disks[:test.offDisks] {
|
||||
switch w := writers[j].(type) {
|
||||
case *wholeBitrotWriter:
|
||||
w.disk = badDisk{nil}
|
||||
case *streamingBitrotWriter:
|
||||
w.iow.CloseWithError(errFaultyDisk)
|
||||
w.iow.(*io.PipeWriter).CloseWithError(errFaultyDisk)
|
||||
}
|
||||
}
|
||||
if test.offDisks > 0 {
|
||||
@@ -166,17 +166,17 @@ func TestErasureEncode(t *testing.T) {
|
||||
// Benchmarks
|
||||
|
||||
func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
|
||||
setup, err := newErasureTestSetup(data, parity, blockSizeV1)
|
||||
setup, err := newErasureTestSetup(data, parity, blockSizeV2)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create test setup: %v", err)
|
||||
}
|
||||
defer setup.Remove()
|
||||
erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1)
|
||||
erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create ErasureStorage: %v", err)
|
||||
}
|
||||
disks := setup.disks
|
||||
buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
|
||||
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
|
||||
content := make([]byte, size)
|
||||
|
||||
for i := 0; i < dataDown; i++ {
|
||||
@@ -196,7 +196,8 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
|
||||
continue
|
||||
}
|
||||
disk.Delete(context.Background(), "testbucket", "object", false)
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object",
|
||||
erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
|
||||
@@ -39,26 +39,26 @@ var erasureHealTests = []struct {
|
||||
algorithm BitrotAlgorithm
|
||||
shouldFail bool
|
||||
}{
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: SHA256, shouldFail: false}, // 0
|
||||
{dataBlocks: 3, disks: 6, offDisks: 2, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 1
|
||||
{dataBlocks: 4, disks: 8, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 2
|
||||
{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 3
|
||||
{dataBlocks: 6, disks: 12, offDisks: 2, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: SHA256, shouldFail: false}, // 4
|
||||
{dataBlocks: 7, disks: 14, offDisks: 4, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 5
|
||||
{dataBlocks: 8, disks: 16, offDisks: 6, badDisks: 1, badStaleDisks: 1, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 6
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: SHA256, shouldFail: false}, // 0
|
||||
{dataBlocks: 3, disks: 6, offDisks: 2, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 1
|
||||
{dataBlocks: 4, disks: 8, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 2
|
||||
{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 3
|
||||
{dataBlocks: 6, disks: 12, offDisks: 2, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: SHA256, shouldFail: false}, // 4
|
||||
{dataBlocks: 7, disks: 14, offDisks: 4, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 5
|
||||
{dataBlocks: 8, disks: 16, offDisks: 6, badDisks: 1, badStaleDisks: 1, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 6
|
||||
{dataBlocks: 7, disks: 14, offDisks: 2, badDisks: 3, badStaleDisks: 0, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 7
|
||||
{dataBlocks: 6, disks: 12, offDisks: 1, badDisks: 0, badStaleDisks: 1, blocksize: int64(oneMiByte - 1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 8
|
||||
{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 0, badStaleDisks: 3, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: SHA256, shouldFail: true}, // 9
|
||||
{dataBlocks: 4, disks: 8, offDisks: 1, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 10
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 1, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 11
|
||||
{dataBlocks: 6, disks: 12, offDisks: 8, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 12
|
||||
{dataBlocks: 7, disks: 14, offDisks: 3, badDisks: 4, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 13
|
||||
{dataBlocks: 7, disks: 14, offDisks: 6, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 14
|
||||
{dataBlocks: 8, disks: 16, offDisks: 4, badDisks: 5, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 15
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 16
|
||||
{dataBlocks: 12, disks: 16, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 17
|
||||
{dataBlocks: 6, disks: 8, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 18
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false}, // 19
|
||||
{dataBlocks: 4, disks: 8, offDisks: 1, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 10
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 1, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 11
|
||||
{dataBlocks: 6, disks: 12, offDisks: 8, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 12
|
||||
{dataBlocks: 7, disks: 14, offDisks: 3, badDisks: 4, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 13
|
||||
{dataBlocks: 7, disks: 14, offDisks: 6, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 14
|
||||
{dataBlocks: 8, disks: 16, offDisks: 4, badDisks: 5, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 15
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 16
|
||||
{dataBlocks: 12, disks: 16, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 17
|
||||
{dataBlocks: 6, disks: 8, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false}, // 18
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false}, // 19
|
||||
}
|
||||
|
||||
func TestErasureHeal(t *testing.T) {
|
||||
@@ -87,7 +87,8 @@ func TestErasureHeal(t *testing.T) {
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "testobject",
|
||||
erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
@@ -130,7 +131,8 @@ func TestErasureHeal(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
|
||||
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject",
|
||||
erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
|
||||
// test case setup is complete - now call Heal()
|
||||
|
||||
@@ -239,13 +239,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
storageDisks := er.getDisks()
|
||||
storageEndpoints := er.getEndpoints()
|
||||
|
||||
// List of disks having latest version of the object er.meta
|
||||
// (by modtime).
|
||||
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
|
||||
// List of disks having all parts as per latest er.meta.
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
|
||||
|
||||
// Initialize heal result object
|
||||
result = madmin.HealResultItem{
|
||||
Type: madmin.HealItemObject,
|
||||
@@ -256,6 +249,21 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
||||
}
|
||||
|
||||
if !opts.NoLock {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
}
|
||||
|
||||
// List of disks having latest version of the object er.meta
|
||||
// (by modtime).
|
||||
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
|
||||
// List of disks having all parts as per latest er.meta.
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
|
||||
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
// to be healed.
|
||||
@@ -414,7 +422,8 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
continue
|
||||
}
|
||||
partPath := pathJoin(tmpID, dataDir, fmt.Sprintf("part.%d", partNumber))
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath,
|
||||
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize(), true)
|
||||
}
|
||||
err = erasure.Heal(ctx, readers, writers, partSize)
|
||||
closeBitrotReaders(readers)
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -134,7 +135,8 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
|
||||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.LogOnceIf(ctx, err, disks[index].String())
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s returned an error (%w)", disks[index], err),
|
||||
disks[index].String())
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -145,10 +147,11 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
|
||||
return metadataArray, g.Wait()
|
||||
}
|
||||
|
||||
func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, distribution []int) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
|
||||
func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, fi FileInfo) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
|
||||
shuffledDisks = make([]StorageAPI, len(disks))
|
||||
shuffledPartsMetadata = make([]FileInfo, len(disks))
|
||||
var inconsistent int
|
||||
distribution := fi.Erasure.Distribution
|
||||
for i, meta := range metaArr {
|
||||
if disks[i] == nil {
|
||||
// Assuming offline drives as inconsistent,
|
||||
@@ -171,7 +174,7 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
|
||||
// Inconsistent meta info is with in the limit of
|
||||
// expected quorum, proceed with EcIndex based
|
||||
// disk order.
|
||||
if inconsistent < len(disks)/2 {
|
||||
if inconsistent < fi.Erasure.ParityBlocks {
|
||||
return shuffledDisks, shuffledPartsMetadata
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,9 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
@@ -199,3 +202,22 @@ func TestEvalDisks(t *testing.T) {
|
||||
z := objLayer.(*erasureServerPools)
|
||||
testShuffleDisks(t, z)
|
||||
}
|
||||
|
||||
func Test_hashOrder(t *testing.T) {
|
||||
for x := 1; x < 17; x++ {
|
||||
t.Run(fmt.Sprintf("%d", x), func(t *testing.T) {
|
||||
var first [17]int
|
||||
rng := rand.New(rand.NewSource(0))
|
||||
var tmp [16]byte
|
||||
rng.Read(tmp[:])
|
||||
prefix := hex.EncodeToString(tmp[:])
|
||||
for i := 0; i < 10000; i++ {
|
||||
rng.Read(tmp[:])
|
||||
|
||||
y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x)
|
||||
first[y[0]]++
|
||||
}
|
||||
t.Log("first:", first[:x])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,7 +276,6 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
// disks. `uploads.json` carries metadata regarding on-going multipart
|
||||
// operation(s) on the object.
|
||||
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
|
||||
|
||||
onlineDisks := er.getDisks()
|
||||
parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
||||
if parityBlocks <= 0 {
|
||||
@@ -317,7 +316,12 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
// Delete the tmp path later in case we fail to commit (ignore
|
||||
// returned errors) - this will be a no-op in case of a commit
|
||||
// success.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
var partsMetadata = make([]FileInfo, len(onlineDisks))
|
||||
for i := range onlineDisks {
|
||||
@@ -339,6 +343,8 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
// Return success.
|
||||
return uploadID, nil
|
||||
}
|
||||
@@ -378,7 +384,8 @@ func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObjec
|
||||
// Implements S3 compatible Upload Part API.
|
||||
func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
readLocked := true
|
||||
@@ -440,7 +447,12 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
tmpPartPath := pathJoin(tmpPart, partSuffix)
|
||||
|
||||
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tmpPart, writeQuorum)
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tmpPart, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
@@ -452,7 +464,14 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
switch size := data.Size(); {
|
||||
case size == 0:
|
||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
||||
case size == -1 || size >= fi.Erasure.BlockSize:
|
||||
case size == -1:
|
||||
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
|
||||
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
|
||||
} else {
|
||||
buffer = er.bp.Get()
|
||||
defer er.bp.Put(buffer)
|
||||
}
|
||||
case size >= fi.Erasure.BlockSize:
|
||||
buffer = er.bp.Get()
|
||||
defer er.bp.Put(buffer)
|
||||
case size < fi.Erasure.BlockSize:
|
||||
@@ -468,7 +487,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath,
|
||||
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
n, err := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
@@ -493,7 +513,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
// PutObjectParts would serialize here updating `xl.meta`
|
||||
uploadIDLock.RUnlock()
|
||||
readLocked = false
|
||||
if err = uploadIDLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = uploadIDLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.Unlock()
|
||||
@@ -553,6 +574,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
// Return success.
|
||||
return PartInfo{
|
||||
PartNumber: partID,
|
||||
@@ -574,8 +597,10 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
||||
UploadID: uploadID,
|
||||
}
|
||||
|
||||
var err error
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return MultipartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
@@ -621,9 +646,10 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
||||
// Implements S3 compatible ListObjectParts API. The resulting
|
||||
// ListPartsInfo structure is marshaled directly into XML and
|
||||
// replied back to the client.
|
||||
func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
|
||||
func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
@@ -716,7 +742,8 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
// Hold read-locks to verify uploaded parts, also disallows
|
||||
// parallel part uploads as well.
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
@@ -770,7 +797,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
|
||||
// Order online disks in accordance with distribution order.
|
||||
// Order parts metadata in accordance with distribution order.
|
||||
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi.Erasure.Distribution)
|
||||
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
|
||||
|
||||
// Save current erasure metadata for validation.
|
||||
var currentFI = fi
|
||||
@@ -871,7 +898,8 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
|
||||
// Hold namespace to complete the transaction
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@@ -911,9 +939,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
// All parts are purged from all disks and reference to the uploadID
|
||||
// would be removed from the system, rollback is not possible on this
|
||||
// operation.
|
||||
func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) {
|
||||
lk := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
@@ -42,10 +42,19 @@ var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
|
||||
|
||||
/// Object Operations
|
||||
|
||||
func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
|
||||
for _, onlineDisk := range onlineDisks {
|
||||
if onlineDisk != nil && onlineDisk.IsOnline() {
|
||||
online++
|
||||
}
|
||||
}
|
||||
return online
|
||||
}
|
||||
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) {
|
||||
// This call shouldn't be used for anything other than metadata updates or adding self referential versions.
|
||||
if !srcInfo.metadataOnly {
|
||||
return oi, NotImplemented{}
|
||||
@@ -54,7 +63,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
defer ObjectPathUpdated(pathJoin(dstBucket, dstObject))
|
||||
|
||||
lk := er.NewNSLock(dstBucket, dstObject)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@@ -84,7 +94,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
|
||||
|
||||
versionID := srcInfo.VersionID
|
||||
if srcInfo.versionOnly {
|
||||
@@ -115,8 +125,13 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
|
||||
tempObj := mustGetUUID()
|
||||
|
||||
var online int
|
||||
// Cleanup in case of xl.meta writing failure
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
// Write unique `xl.meta` for each disk.
|
||||
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||
@@ -128,6 +143,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
return fi.ToObjectInfo(srcBucket, srcObject), nil
|
||||
}
|
||||
|
||||
@@ -147,12 +164,14 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
lock := er.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
@@ -208,10 +227,11 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@@ -234,7 +254,7 @@ func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, s
|
||||
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
||||
// Reorder online disks based on erasure distribution order.
|
||||
// Reorder parts metadata based on erasure distribution order.
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
|
||||
|
||||
// For negative length read everything.
|
||||
if length < 0 {
|
||||
@@ -375,7 +395,8 @@ func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object strin
|
||||
if !opts.NoLock {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@@ -619,10 +640,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
|
||||
// Get parity and data drive count based on storage class metadata
|
||||
parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
||||
if parityDrives <= 0 {
|
||||
parityDrives = er.defaultParityCount
|
||||
parityDrives := len(storageDisks) / 2
|
||||
if !opts.MaxParity {
|
||||
// Get parity and data drive count based on storage class metadata
|
||||
parityDrives = globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
||||
if parityDrives <= 0 {
|
||||
parityDrives = er.defaultParityCount
|
||||
}
|
||||
}
|
||||
dataDrives := len(storageDisks) - parityDrives
|
||||
|
||||
@@ -633,11 +657,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
writeQuorum++
|
||||
}
|
||||
|
||||
// Delete temporary object in the event of failure.
|
||||
// If PutObject succeeded there would be no temporary
|
||||
// object to delete.
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
||||
@@ -683,7 +702,14 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
switch size := data.Size(); {
|
||||
case size == 0:
|
||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
||||
case size == -1 || size >= fi.Erasure.BlockSize:
|
||||
case size == -1:
|
||||
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
|
||||
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
|
||||
} else {
|
||||
buffer = er.bp.Get()
|
||||
defer er.bp.Put(buffer)
|
||||
}
|
||||
case size >= fi.Erasure.BlockSize:
|
||||
buffer = er.bp.Get()
|
||||
defer er.bp.Put(buffer)
|
||||
case size < fi.Erasure.BlockSize:
|
||||
@@ -698,12 +724,23 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
partName := "part.1"
|
||||
tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName)
|
||||
|
||||
// Delete temporary object in the event of failure.
|
||||
// If PutObject succeeded there would be no temporary
|
||||
// object to delete.
|
||||
var online int
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
writers := make([]io.Writer, len(onlineDisks))
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj,
|
||||
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
@@ -719,8 +756,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
}
|
||||
|
||||
if !opts.NoLock {
|
||||
var err error
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@@ -788,6 +827,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
break
|
||||
}
|
||||
}
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
return fi.ToObjectInfo(bucket, object), nil
|
||||
}
|
||||
@@ -841,8 +881,8 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
|
||||
var err error
|
||||
defer ObjectPathUpdated(pathJoin(bucket, object))
|
||||
|
||||
tmpObj := mustGetUUID()
|
||||
disks := er.getDisks()
|
||||
tmpObj := mustGetUUID()
|
||||
if bucket == minioMetaTmpBucket {
|
||||
tmpObj = object
|
||||
} else {
|
||||
@@ -863,7 +903,7 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
}
|
||||
return cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj)
|
||||
return disks[index].Delete(ctx, minioMetaTmpBucket, tmpObj, true)
|
||||
}, index)
|
||||
}
|
||||
|
||||
@@ -1029,7 +1069,8 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
}
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalDeleteOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@@ -1138,9 +1179,11 @@ func (er erasureObjects) addPartial(bucket, object, versionID string) {
|
||||
|
||||
// PutObjectTags - replace or add tags to an existing object
|
||||
func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
var err error
|
||||
// Lock the object before updating tags.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@@ -1170,7 +1213,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
||||
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
|
||||
for i, metaFi := range metaArr {
|
||||
if metaFi.IsValid() {
|
||||
// clean fi.Meta of tag key, before updating the new tags
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
type erasureServerPools struct {
|
||||
@@ -106,11 +107,10 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||
return nil, fmt.Errorf("All serverPools should have same deployment ID expected %s, got %s", deploymentID, formats[i].ID)
|
||||
}
|
||||
|
||||
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i], commonParityDrives)
|
||||
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i], commonParityDrives, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
z.serverPools[i].poolNumber = i
|
||||
}
|
||||
ctx, z.shutdown = context.WithCancel(ctx)
|
||||
go intDataUpdateTracker.start(ctx, localDrives...)
|
||||
@@ -244,6 +244,50 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, s
|
||||
return serverPools
|
||||
}
|
||||
|
||||
// getPoolIdxExisting returns the (first) found object pool index containing an object.
|
||||
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
||||
// If the object does not exist ObjectNotFound error is returned.
|
||||
// If any other error is found, it is returned.
|
||||
// The check is skipped if there is only one zone, and 0, nil is always returned in that case.
|
||||
func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||
if z.SinglePool() {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
errs := make([]error, len(z.serverPools))
|
||||
objInfos := make([]ObjectInfo, len(z.serverPools))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i, pool := range z.serverPools {
|
||||
wg.Add(1)
|
||||
go func(i int, pool *erasureSets) {
|
||||
defer wg.Done()
|
||||
objInfos[i], errs[i] = pool.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
}(i, pool)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i, err := range errs {
|
||||
if err == nil {
|
||||
return i, nil
|
||||
}
|
||||
if isErrObjectNotFound(err) {
|
||||
// No object exists or its a delete marker,
|
||||
// check objInfo to confirm.
|
||||
if objInfos[i].DeleteMarker && objInfos[i].Name != "" {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// objInfo is not valid, truly the object doesn't
|
||||
// exist proceed to next pool.
|
||||
continue
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return -1, toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
|
||||
// getPoolIdx returns the found previous object and its corresponding pool idx,
|
||||
// if none are found falls back to most available space pool.
|
||||
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
|
||||
@@ -311,8 +355,8 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) BackendInfo() (b BackendInfo) {
|
||||
b.Type = BackendErasure
|
||||
func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
|
||||
b.Type = madmin.Erasure
|
||||
|
||||
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
||||
if scParity <= 0 {
|
||||
@@ -331,6 +375,35 @@ func (z *erasureServerPools) BackendInfo() (b BackendInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
storageInfos := make([]StorageInfo, len(z.serverPools))
|
||||
storageInfosErrs := make([][]error, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index], storageInfosErrs[index] = z.serverPools[index].LocalStorageInfo(ctx)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
// Wait for the go routines.
|
||||
g.Wait()
|
||||
|
||||
storageInfo.Backend = z.BackendInfo()
|
||||
for _, lstorageInfo := range storageInfos {
|
||||
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for i := range z.serverPools {
|
||||
errs = append(errs, storageInfosErrs[i]...)
|
||||
}
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
@@ -541,12 +614,14 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
||||
lock := z.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
@@ -608,7 +683,8 @@ func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object s
|
||||
|
||||
// Lock the object before reading.
|
||||
lk := z.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@@ -683,8 +759,7 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
|
||||
return z.serverPools[0].DeleteObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// We don't know the size here set 1GiB atleast.
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30)
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
@@ -707,7 +782,11 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
||||
origIndexMap := map[int][]int{}
|
||||
if !z.SinglePool() {
|
||||
for j, obj := range objects {
|
||||
idx, err := z.getPoolIdx(ctx, bucket, obj.ObjectName, 1<<30)
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, obj.ObjectName)
|
||||
if isErrObjectNotFound(err) {
|
||||
derrs[j] = err
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
// Unhandled errors return right here.
|
||||
for i := range derrs {
|
||||
@@ -720,9 +799,11 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
// Acquire a bulk write lock across 'objects'
|
||||
multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...)
|
||||
if err := multiDeleteLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = multiDeleteLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
for i := range derrs {
|
||||
derrs[i] = err
|
||||
}
|
||||
@@ -823,7 +904,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Separator: delimiter,
|
||||
Limit: maxKeys,
|
||||
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
||||
Marker: marker,
|
||||
InclDeleted: true,
|
||||
AskDisks: globalAPIConfig.getListQuorum(),
|
||||
@@ -843,6 +924,11 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
||||
if err != nil && err != io.EOF {
|
||||
return loi, err
|
||||
}
|
||||
if versionMarker == "" {
|
||||
// If we are not looking for a specific version skip it.
|
||||
marker, _ = parseMarker(marker)
|
||||
merged.forwardPast(marker)
|
||||
}
|
||||
objects := merged.fileInfoVersions(bucket, prefix, delimiter, versionMarker)
|
||||
loi.IsTruncated = err == nil && len(objects) > 0
|
||||
if maxKeys > 0 && len(objects) > maxKeys {
|
||||
@@ -864,6 +950,16 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
func maxKeysPlusOne(maxKeys int, addOne bool) int {
|
||||
if maxKeys < 0 || maxKeys > maxObjectList {
|
||||
maxKeys = maxObjectList
|
||||
}
|
||||
if addOne {
|
||||
maxKeys++
|
||||
}
|
||||
return maxKeys
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
var loi ListObjectsInfo
|
||||
|
||||
@@ -871,7 +967,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Separator: delimiter,
|
||||
Limit: maxKeys,
|
||||
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
||||
Marker: marker,
|
||||
InclDeleted: false,
|
||||
AskDisks: globalAPIConfig.getListQuorum(),
|
||||
@@ -880,6 +976,8 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
||||
logger.LogIf(ctx, err)
|
||||
return loi, err
|
||||
}
|
||||
marker, _ = parseMarker(marker)
|
||||
merged.forwardPast(marker)
|
||||
|
||||
// Default is recursive, if delimiter is set then list non recursive.
|
||||
objects := merged.fileInfos(bucket, prefix, delimiter)
|
||||
@@ -1266,9 +1364,11 @@ func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketI
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
var err error
|
||||
// Acquire lock on format.json
|
||||
formatLock := z.NewNSLock(minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = formatLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer formatLock.Unlock()
|
||||
@@ -1398,90 +1498,107 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
||||
type HealObjectFn func(bucket, object, versionID string) error
|
||||
|
||||
func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error {
|
||||
// If listing did not return any entries upon first attempt, we
|
||||
// return `ObjectNotFound`, to indicate the caller for any
|
||||
// actions they may want to take as if `prefix` is missing.
|
||||
err := toObjectErr(errFileNotFound, bucket, prefix)
|
||||
for _, erasureSet := range z.serverPools {
|
||||
for _, set := range erasureSet.sets {
|
||||
var entryChs []FileInfoVersionsCh
|
||||
var mu sync.Mutex
|
||||
errCh := make(chan error)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
defer cancel()
|
||||
|
||||
for _, erasureSet := range z.serverPools {
|
||||
var wg sync.WaitGroup
|
||||
for _, disk := range set.getOnlineDisks() {
|
||||
disk := disk
|
||||
for _, set := range erasureSet.sets {
|
||||
set := set
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
entryCh, err := disk.WalkVersions(ctx, bucket, prefix, "", true, ctx.Done())
|
||||
if err != nil {
|
||||
// Disk walk returned error, ignore it.
|
||||
|
||||
disks, _ := set.getOnlineDisksWithHealing()
|
||||
if len(disks) == 0 {
|
||||
errCh <- errors.New("HealObjects: No non-healing disks found")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
healEntry := func(entry metaCacheEntry) {
|
||||
if entry.isDir() {
|
||||
return
|
||||
}
|
||||
// We might land at .metacache, .trash, .multipart
|
||||
// no need to heal them skip, only when bucket
|
||||
// is '.minio.sys'
|
||||
if bucket == minioMetaBucket {
|
||||
if wildcard.Match("buckets/*/.metacache/*", entry.name) {
|
||||
return
|
||||
}
|
||||
if wildcard.Match("tmp/*", entry.name) {
|
||||
return
|
||||
}
|
||||
if wildcard.Match("multipart/*", entry.name) {
|
||||
return
|
||||
}
|
||||
if wildcard.Match("tmp-old/*", entry.name) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fivs, err := entry.fileInfoVersions(bucket)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
for _, version := range fivs.Versions {
|
||||
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
|
||||
errCh <- err
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// How to resolve partial results.
|
||||
resolver := metadataResolutionParams{
|
||||
dirQuorum: 1,
|
||||
objQuorum: 1,
|
||||
bucket: bucket,
|
||||
}
|
||||
|
||||
path := baseDirFromPrefix(prefix)
|
||||
if path == "" {
|
||||
path = prefix
|
||||
}
|
||||
|
||||
if err := listPathRaw(ctx, listPathRawOptions{
|
||||
disks: disks,
|
||||
bucket: bucket,
|
||||
path: path,
|
||||
recursive: true,
|
||||
forwardTo: "",
|
||||
minDisks: 1,
|
||||
reportNotFound: false,
|
||||
agreed: healEntry,
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if ok {
|
||||
healEntry(*entry)
|
||||
}
|
||||
},
|
||||
finished: nil,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||
Ch: entryCh,
|
||||
})
|
||||
mu.Unlock()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
entriesValid := make([]bool, len(entryChs))
|
||||
entries := make([]FileInfoVersions, len(entryChs))
|
||||
|
||||
for {
|
||||
entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
// Remove empty directories if found - they have no meaning.
|
||||
// Can be left over from highly concurrent put/remove.
|
||||
if quorumCount > set.setDriveCount/2 && entry.IsEmptyDir {
|
||||
if !opts.DryRun && opts.Remove {
|
||||
set.deleteEmptyDir(ctx, bucket, entry.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Indicate that first attempt was a success and subsequent loop
|
||||
// knows that its not our first attempt at 'prefix'
|
||||
err = nil
|
||||
|
||||
if quorumCount == set.setDriveCount && opts.ScanMode == madmin.HealNormalScan {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, version := range entry.Versions {
|
||||
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
|
||||
return toObjectErr(err, bucket, version.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}()
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
object = encodeDirObject(object)
|
||||
|
||||
lk := z.NewNSLock(bucket, object)
|
||||
if bucket == minioMetaBucket {
|
||||
// For .minio.sys bucket heals we should hold write locks.
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
} else {
|
||||
// Lock the object before healing. Use read lock since healing
|
||||
// will only regenerate parts & xl.meta of outdated disks.
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
}
|
||||
|
||||
for _, pool := range z.serverPools {
|
||||
result, err := pool.HealObject(ctx, bucket, object, versionID, opts)
|
||||
result.Object = decodeDirObject(result.Object)
|
||||
@@ -1506,24 +1623,24 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics - no op
|
||||
// GetMetrics - returns metrics of local disks
|
||||
func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, error) {
|
||||
logger.LogIf(ctx, NotImplemented{})
|
||||
return &BackendMetrics{}, NotImplemented{}
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) getPoolAndSet(id string) (int, int, error) {
|
||||
func (z *erasureServerPools) getPoolAndSet(id string) (poolIdx, setIdx, diskIdx int, err error) {
|
||||
for poolIdx := range z.serverPools {
|
||||
format := z.serverPools[poolIdx].format
|
||||
for setIdx, set := range format.Erasure.Sets {
|
||||
for _, diskID := range set {
|
||||
for i, diskID := range set {
|
||||
if diskID == id {
|
||||
return poolIdx, setIdx, nil
|
||||
return poolIdx, setIdx, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, 0, fmt.Errorf("DiskID(%s) %w", id, errDiskNotFound)
|
||||
return -1, -1, -1, fmt.Errorf("DiskID(%s) %w", id, errDiskNotFound)
|
||||
}
|
||||
|
||||
// HealthOptions takes input options to return sepcific information
|
||||
@@ -1553,7 +1670,7 @@ func (z *erasureServerPools) ReadHealth(ctx context.Context) bool {
|
||||
|
||||
for _, localDiskIDs := range diskIDs {
|
||||
for _, id := range localDiskIDs {
|
||||
poolIdx, setIdx, err := z.getPoolAndSet(id)
|
||||
poolIdx, setIdx, _, err := z.getPoolAndSet(id)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -1592,7 +1709,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
||||
|
||||
for _, localDiskIDs := range diskIDs {
|
||||
for _, id := range localDiskIDs {
|
||||
poolIdx, setIdx, err := z.getPoolAndSet(id)
|
||||
poolIdx, setIdx, _, err := z.getPoolAndSet(id)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@@ -1615,7 +1732,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
||||
// we need to tell healthy status as 'false' so that this server
|
||||
// is not taken down for maintenance
|
||||
var err error
|
||||
aggHealStateResult, err = getAggregatedBackgroundHealState(ctx)
|
||||
aggHealStateResult, err = getAggregatedBackgroundHealState(ctx, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Unable to verify global heal status: %w", err))
|
||||
return HealthResult{
|
||||
@@ -1669,7 +1786,7 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s
|
||||
}
|
||||
|
||||
// We don't know the size here set 1GiB atleast.
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30)
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@@ -1684,8 +1801,7 @@ func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, objec
|
||||
return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// We don't know the size here set 1GiB atleast.
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30)
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@@ -1700,8 +1816,7 @@ func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object s
|
||||
return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// We don't know the size here set 1GiB atleast.
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, 1<<30)
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dchest/siphash"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
@@ -36,6 +37,7 @@ import (
|
||||
"github.com/minio/minio/pkg/bpool"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
@@ -43,10 +45,7 @@ import (
|
||||
// setsDsyncLockers is encapsulated type for Close()
|
||||
type setsDsyncLockers [][]dsync.NetLocker
|
||||
|
||||
// Information of a new disk connection
|
||||
type diskConnectInfo struct {
|
||||
setIndex int
|
||||
}
|
||||
const envMinioDeleteCleanupInterval = "MINIO_DELETE_CLEANUP_INTERVAL"
|
||||
|
||||
// erasureSets implements ObjectLayer combining a static list of erasure coded
|
||||
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
||||
@@ -83,9 +82,11 @@ type erasureSets struct {
|
||||
setCount, setDriveCount int
|
||||
defaultParityCount int
|
||||
|
||||
poolNumber int
|
||||
poolIndex int
|
||||
|
||||
disksConnectEvent chan diskConnectInfo
|
||||
// A channel to send the set index to the MRF when
|
||||
// any disk belonging to that set is connected
|
||||
setReconnectEvent chan int
|
||||
|
||||
// Distribution algorithm of choice.
|
||||
distributionAlgo string
|
||||
@@ -195,6 +196,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
||||
// and re-arranges the disks in proper position.
|
||||
func (s *erasureSets) connectDisks() {
|
||||
var wg sync.WaitGroup
|
||||
var setsJustConnected = make([]bool, s.setCount)
|
||||
diskMap := s.getDiskMap()
|
||||
for _, endpoint := range s.endpoints {
|
||||
diskPath := endpoint.String()
|
||||
@@ -217,7 +219,7 @@ func (s *erasureSets) connectDisks() {
|
||||
}
|
||||
return
|
||||
}
|
||||
if disk.IsLocal() && disk.Healing() {
|
||||
if disk.IsLocal() && disk.Healing() != nil {
|
||||
globalBackgroundHealState.pushHealLocalDisks(disk.Endpoint())
|
||||
logger.Info(fmt.Sprintf("Found the drive %s that needs healing, attempting to heal...", disk))
|
||||
}
|
||||
@@ -246,21 +248,32 @@ func (s *erasureSets) connectDisks() {
|
||||
disk.SetDiskID(format.Erasure.This)
|
||||
s.erasureDisks[setIndex][diskIndex] = disk
|
||||
}
|
||||
disk.SetDiskLoc(s.poolIndex, setIndex, diskIndex)
|
||||
s.endpointStrings[setIndex*s.setDriveCount+diskIndex] = disk.String()
|
||||
setsJustConnected[setIndex] = true
|
||||
s.erasureDisksMu.Unlock()
|
||||
go func(setIndex int) {
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
// Send a new disk connect event with a timeout
|
||||
select {
|
||||
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
|
||||
case <-idler.C:
|
||||
}
|
||||
}(setIndex)
|
||||
}(endpoint)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
go func() {
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
for setIndex, justConnected := range setsJustConnected {
|
||||
if !justConnected {
|
||||
continue
|
||||
}
|
||||
|
||||
// Send a new set connect event with a timeout
|
||||
idler.Reset(100 * time.Millisecond)
|
||||
select {
|
||||
case s.setReconnectEvent <- setIndex:
|
||||
case <-idler.C:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
|
||||
@@ -331,7 +344,7 @@ func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI {
|
||||
const defaultMonitorConnectEndpointInterval = defaultMonitorNewDiskInterval + time.Second*5
|
||||
|
||||
// Initialize new set of erasure coded sets.
|
||||
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3, defaultParityCount int) (*erasureSets, error) {
|
||||
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3, defaultParityCount, poolIdx int) (*erasureSets, error) {
|
||||
setCount := len(format.Erasure.Sets)
|
||||
setDriveCount := len(format.Erasure.Sets[0])
|
||||
|
||||
@@ -349,22 +362,21 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
setDriveCount: setDriveCount,
|
||||
defaultParityCount: defaultParityCount,
|
||||
format: format,
|
||||
disksConnectEvent: make(chan diskConnectInfo),
|
||||
setReconnectEvent: make(chan int),
|
||||
distributionAlgo: format.Erasure.DistributionAlgo,
|
||||
deploymentID: uuid.MustParse(format.ID),
|
||||
mrfOperations: make(map[healSource]int),
|
||||
poolIndex: poolIdx,
|
||||
}
|
||||
|
||||
mutex := newNSLock(globalIsDistErasure)
|
||||
|
||||
// Number of buffers, max 2GB.
|
||||
n := setCount * setDriveCount
|
||||
if n > 100 {
|
||||
n = 100
|
||||
}
|
||||
// Number of buffers, max 2GB
|
||||
n := (2 * humanize.GiByte) / (blockSizeV2 * 2)
|
||||
|
||||
// Initialize byte pool once for all sets, bpool size is set to
|
||||
// setCount * setDriveCount with each memory upto blockSizeV1.
|
||||
bp := bpool.NewBytePoolCap(n, blockSizeV1, blockSizeV1*2)
|
||||
// setCount * setDriveCount with each memory upto blockSizeV2.
|
||||
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||
@@ -398,27 +410,34 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
disk.SetDiskLoc(s.poolIndex, m, n)
|
||||
s.endpointStrings[m*setDriveCount+n] = disk.String()
|
||||
s.erasureDisks[m][n] = disk
|
||||
}
|
||||
|
||||
// Initialize erasure objects for a given set.
|
||||
s.sets[i] = &erasureObjects{
|
||||
setNumber: i,
|
||||
setIndex: i,
|
||||
poolIndex: poolIdx,
|
||||
setDriveCount: setDriveCount,
|
||||
defaultParityCount: defaultParityCount,
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 10*time.Second),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup ".trash/" folder every 30 minutes with sufficient sleep cycles.
|
||||
const deletedObjectsCleanupInterval = 30 * time.Minute
|
||||
// cleanup ".trash/" folder every 5m minutes with sufficient sleep cycles, between each
|
||||
// deletes a dynamic sleeper is used with a factor of 10 ratio with max delay between
|
||||
// deletes to be 2 seconds.
|
||||
deletedObjectsCleanupInterval, err := time.ParseDuration(env.Get(envMinioDeleteCleanupInterval, "5m"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start cleanup stale uploads go-routine.
|
||||
go s.cleanupStaleUploads(ctx, GlobalStaleUploadsCleanupInterval, GlobalStaleUploadsExpiry)
|
||||
@@ -480,7 +499,7 @@ type auditObjectOp struct {
|
||||
Disks []string `json:"disks"`
|
||||
}
|
||||
|
||||
func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects, poolNum int) {
|
||||
func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects) {
|
||||
if len(logger.AuditTargets) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -488,8 +507,8 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
|
||||
object = decodeDirObject(object)
|
||||
|
||||
op := auditObjectOp{
|
||||
Pool: poolNum + 1,
|
||||
Set: set.setNumber + 1,
|
||||
Pool: set.poolIndex + 1,
|
||||
Set: set.setIndex + 1,
|
||||
Disks: set.getEndpoints(),
|
||||
}
|
||||
|
||||
@@ -537,7 +556,7 @@ func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
|
||||
storageUsageInfo := func() StorageInfo {
|
||||
var storageInfo StorageInfo
|
||||
storageInfos := make([]StorageInfo, len(s.sets))
|
||||
storageInfo.Backend.Type = BackendErasure
|
||||
storageInfo.Backend.Type = madmin.Erasure
|
||||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
@@ -572,6 +591,37 @@ func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
|
||||
|
||||
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
var storageInfo madmin.StorageInfo
|
||||
|
||||
storageInfos := make([]madmin.StorageInfo, len(s.sets))
|
||||
storageInfoErrs := make([][]error, len(s.sets))
|
||||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
// Wait for the go routines.
|
||||
g.Wait()
|
||||
|
||||
for _, lstorageInfo := range storageInfos {
|
||||
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for i := range s.sets {
|
||||
errs = append(errs, storageInfoErrs[i]...)
|
||||
}
|
||||
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
func (s *erasureSets) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
storageInfos := make([]StorageInfo, len(s.sets))
|
||||
@@ -581,7 +631,7 @@ func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx)
|
||||
storageInfos[index], storageInfoErrs[index] = s.sets[index].LocalStorageInfo(ctx)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@@ -619,12 +669,12 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
select {
|
||||
case _, ok := <-s.disksConnectEvent:
|
||||
case _, ok := <-s.setReconnectEvent:
|
||||
if ok {
|
||||
close(s.disksConnectEvent)
|
||||
close(s.setReconnectEvent)
|
||||
}
|
||||
default:
|
||||
close(s.disksConnectEvent)
|
||||
close(s.setReconnectEvent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -808,7 +858,7 @@ func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, er
|
||||
// GetObjectNInfo - returns object info and locked object ReadCloser
|
||||
func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
}
|
||||
|
||||
@@ -822,7 +872,7 @@ func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent stri
|
||||
// PutObject - writes an object to hashedSet based on the object name.
|
||||
func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
opts.ParentIsObject = s.parentDirIsObject
|
||||
return set.PutObject(ctx, bucket, object, data, opts)
|
||||
}
|
||||
@@ -830,14 +880,14 @@ func (s *erasureSets) PutObject(ctx context.Context, bucket string, object strin
|
||||
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
||||
func (s *erasureSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.GetObjectInfo(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
||||
func (s *erasureSets) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.DeleteObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
@@ -889,7 +939,7 @@ func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
delErrs[obj.origIndex] = errs[i]
|
||||
delObjects[obj.origIndex] = dobjects[i]
|
||||
if errs[i] == nil {
|
||||
auditObjectErasureSet(ctx, obj.object.ObjectName, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, obj.object.ObjectName, set)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -902,7 +952,7 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
||||
srcSet := s.getHashedSet(srcObject)
|
||||
dstSet := s.getHashedSet(dstObject)
|
||||
|
||||
auditObjectErasureSet(ctx, dstObject, dstSet, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, dstObject, dstSet)
|
||||
|
||||
cpSrcDstSame := srcSet == dstSet
|
||||
// Check if this request is only metadata update.
|
||||
@@ -938,162 +988,18 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
||||
return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
}
|
||||
|
||||
// FileInfoVersionsCh - file info versions channel
|
||||
type FileInfoVersionsCh struct {
|
||||
Ch chan FileInfoVersions
|
||||
Prev FileInfoVersions
|
||||
Valid bool
|
||||
}
|
||||
|
||||
// Pop - pops a cached entry if any, or from the cached channel.
|
||||
func (f *FileInfoVersionsCh) Pop() (fi FileInfoVersions, ok bool) {
|
||||
if f.Valid {
|
||||
f.Valid = false
|
||||
return f.Prev, true
|
||||
} // No cached entries found, read from channel
|
||||
f.Prev, ok = <-f.Ch
|
||||
return f.Prev, ok
|
||||
}
|
||||
|
||||
// Push - cache an entry, for Pop() later.
|
||||
func (f *FileInfoVersionsCh) Push(fi FileInfoVersions) {
|
||||
f.Prev = fi
|
||||
f.Valid = true
|
||||
}
|
||||
|
||||
// FileInfoCh - file info channel
|
||||
type FileInfoCh struct {
|
||||
Ch chan FileInfo
|
||||
Prev FileInfo
|
||||
Valid bool
|
||||
}
|
||||
|
||||
// Pop - pops a cached entry if any, or from the cached channel.
|
||||
func (f *FileInfoCh) Pop() (fi FileInfo, ok bool) {
|
||||
if f.Valid {
|
||||
f.Valid = false
|
||||
return f.Prev, true
|
||||
} // No cached entries found, read from channel
|
||||
f.Prev, ok = <-f.Ch
|
||||
return f.Prev, ok
|
||||
}
|
||||
|
||||
// Push - cache an entry, for Pop() later.
|
||||
func (f *FileInfoCh) Push(fi FileInfo) {
|
||||
f.Prev = fi
|
||||
f.Valid = true
|
||||
}
|
||||
|
||||
// Calculate lexically least entry across multiple FileInfo channels,
|
||||
// returns the lexically common entry and the total number of times
|
||||
// we found this entry. Additionally also returns a boolean
|
||||
// to indicate if the caller needs to call this function
|
||||
// again to list the next entry. It is callers responsibility
|
||||
// if the caller wishes to list N entries to call lexicallySortedEntry
|
||||
// N times until this boolean is 'false'.
|
||||
func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) {
|
||||
for j := range entryChs {
|
||||
entries[j], entriesValid[j] = entryChs[j].Pop()
|
||||
}
|
||||
|
||||
var isTruncated = false
|
||||
for _, valid := range entriesValid {
|
||||
if !valid {
|
||||
continue
|
||||
}
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
|
||||
var lentry FileInfoVersions
|
||||
var found bool
|
||||
for i, valid := range entriesValid {
|
||||
if !valid {
|
||||
continue
|
||||
}
|
||||
if !found {
|
||||
lentry = entries[i]
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
if entries[i].Name < lentry.Name {
|
||||
lentry = entries[i]
|
||||
}
|
||||
}
|
||||
|
||||
// We haven't been able to find any lexically least entry,
|
||||
// this would mean that we don't have valid entry.
|
||||
if !found {
|
||||
return lentry, 0, isTruncated
|
||||
}
|
||||
|
||||
lexicallySortedEntryCount := 0
|
||||
for i, valid := range entriesValid {
|
||||
if !valid {
|
||||
continue
|
||||
}
|
||||
|
||||
// Entries are duplicated across disks,
|
||||
// we should simply skip such entries.
|
||||
if lentry.Name == entries[i].Name && lentry.LatestModTime.Equal(entries[i].LatestModTime) {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Push all entries which are lexically higher
|
||||
// and will be returned later in Pop()
|
||||
entryChs[i].Push(entries[i])
|
||||
}
|
||||
|
||||
return lentry, lexicallySortedEntryCount, isTruncated
|
||||
}
|
||||
|
||||
func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh {
|
||||
return s.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1)
|
||||
}
|
||||
|
||||
// Starts a walk versions channel across N number of disks and returns a slice.
|
||||
// FileInfoCh which can be read from.
|
||||
func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh {
|
||||
var entryChs []FileInfoVersionsCh
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
for _, set := range s.sets {
|
||||
// Reset for the next erasure set.
|
||||
for _, disk := range set.getLoadBalancedNDisks(ndisks) {
|
||||
wg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
|
||||
entryCh, err := disk.WalkVersions(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
mutex.Lock()
|
||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||
Ch: entryCh,
|
||||
})
|
||||
mutex.Unlock()
|
||||
}(disk)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return entryChs
|
||||
}
|
||||
|
||||
func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
// In list multipart uploads we are going to treat input prefix as the object,
|
||||
// this means that we are not supporting directory navigation.
|
||||
set := s.getHashedSet(prefix)
|
||||
auditObjectErasureSet(ctx, prefix, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, prefix, set)
|
||||
return set.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||
func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
@@ -1101,42 +1007,42 @@ func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object str
|
||||
func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
||||
destSet := s.getHashedSet(destObject)
|
||||
auditObjectErasureSet(ctx, destObject, destSet, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, destObject, destSet)
|
||||
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
|
||||
}
|
||||
|
||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||
func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
// GetMultipartInfo - return multipart metadata info uploaded at hashedSet.
|
||||
func (s *erasureSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
|
||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||
func (s *erasureSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
||||
}
|
||||
|
||||
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
||||
func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
return set.AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||
func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
||||
auditObjectErasureSet(ctx, object, set)
|
||||
opts.ParentIsObject = s.parentDirIsObject
|
||||
return set.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
@@ -1320,8 +1226,8 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
||||
res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
||||
// Copy "after" drive state too from before.
|
||||
for k, v := range beforeDrives {
|
||||
res.Before.Drives[k] = madmin.HealDriveInfo(v)
|
||||
res.After.Drives[k] = madmin.HealDriveInfo(v)
|
||||
res.Before.Drives[k] = v
|
||||
res.After.Drives[k] = v
|
||||
}
|
||||
|
||||
if countErrs(sErrs, errUnformattedDisk) == 0 {
|
||||
@@ -1364,7 +1270,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
||||
if s.erasureDisks[m][n] != nil {
|
||||
s.erasureDisks[m][n].Close()
|
||||
}
|
||||
|
||||
storageDisks[index].SetDiskLoc(s.poolIndex, m, n)
|
||||
s.erasureDisks[m][n] = storageDisks[index]
|
||||
s.endpointStrings[m*s.setDriveCount+n] = storageDisks[index].String()
|
||||
}
|
||||
@@ -1457,47 +1363,25 @@ func (s *erasureSets) maintainMRFList() {
|
||||
bucket: fOp.bucket,
|
||||
object: fOp.object,
|
||||
versionID: fOp.versionID,
|
||||
opts: &madmin.HealOpts{Remove: true},
|
||||
}] = fOp.failedSet
|
||||
s.mrfMU.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func toSourceChTimed(t *time.Timer, sourceCh chan healSource, u healSource) {
|
||||
t.Reset(100 * time.Millisecond)
|
||||
|
||||
// No defer, as we don't know which
|
||||
// case will be selected
|
||||
|
||||
select {
|
||||
case sourceCh <- u:
|
||||
case <-t.C:
|
||||
return
|
||||
}
|
||||
|
||||
// We still need to check the return value
|
||||
// of Stop, because t could have fired
|
||||
// between the send on sourceCh and this line.
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
}
|
||||
|
||||
// healMRFRoutine monitors new disks connection, sweep the MRF list
|
||||
// to find objects related to the new disk that needs to be healed.
|
||||
func (s *erasureSets) healMRFRoutine() {
|
||||
// Wait until background heal state is initialized
|
||||
bgSeq := mustGetHealSequence(GlobalContext)
|
||||
|
||||
idler := time.NewTimer(100 * time.Millisecond)
|
||||
defer idler.Stop()
|
||||
|
||||
for e := range s.disksConnectEvent {
|
||||
for setIndex := range s.setReconnectEvent {
|
||||
// Get the list of objects related the er.set
|
||||
// to which the connected disk belongs.
|
||||
var mrfOperations []healSource
|
||||
s.mrfMU.Lock()
|
||||
for k, v := range s.mrfOperations {
|
||||
if v == e.setIndex {
|
||||
if v == setIndex {
|
||||
mrfOperations = append(mrfOperations, k)
|
||||
}
|
||||
}
|
||||
@@ -1505,8 +1389,10 @@ func (s *erasureSets) healMRFRoutine() {
|
||||
|
||||
// Heal objects
|
||||
for _, u := range mrfOperations {
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
|
||||
// Send an object to background heal
|
||||
toSourceChTimed(idler, bgSeq.sourceCh, u)
|
||||
bgSeq.sourceCh <- u
|
||||
|
||||
s.mrfMU.Lock()
|
||||
delete(s.mrfOperations, u)
|
||||
|
||||
@@ -189,7 +189,7 @@ func TestNewErasureSets(t *testing.T) {
|
||||
t.Fatalf("Unable to format disks for erasure, %s", err)
|
||||
}
|
||||
|
||||
if _, err := newErasureSets(ctx, endpoints, storageDisks, format, ecDrivesNoConfig(16)); err != nil {
|
||||
if _, err := newErasureSets(ctx, endpoints, storageDisks, format, ecDrivesNoConfig(16), 0); err != nil {
|
||||
t.Fatalf("Unable to initialize erasure")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
@@ -46,13 +47,13 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
||||
|
||||
// Do we have enough blocks?
|
||||
if len(enBlocks) < dataBlocks {
|
||||
logger.LogIf(ctx, reedsolomon.ErrTooFewShards)
|
||||
logger.LogIf(ctx, fmt.Errorf("diskBlocks(%d)/dataBlocks(%d) - %w", len(enBlocks), dataBlocks, reedsolomon.ErrTooFewShards))
|
||||
return 0, reedsolomon.ErrTooFewShards
|
||||
}
|
||||
|
||||
// Do we have enough data?
|
||||
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
|
||||
logger.LogIf(ctx, reedsolomon.ErrShortData)
|
||||
logger.LogIf(ctx, fmt.Errorf("getDataBlockLen(enBlocks, dataBlocks)(%d)/length(%d) - %w", getDataBlockLen(enBlocks, dataBlocks), length, reedsolomon.ErrShortData))
|
||||
return 0, reedsolomon.ErrShortData
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,8 @@ type erasureObjects struct {
|
||||
setDriveCount int
|
||||
defaultParityCount int
|
||||
|
||||
setNumber int
|
||||
setIndex int
|
||||
poolIndex int
|
||||
|
||||
// getDisks returns list of storageAPIs.
|
||||
getDisks func() []StorageAPI
|
||||
@@ -186,7 +187,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
|
||||
}
|
||||
info, err := disks[index].DiskInfo(context.TODO())
|
||||
di := madmin.Disk{
|
||||
Endpoint: endpoints[index],
|
||||
Endpoint: info.Endpoint,
|
||||
DrivePath: info.MountPath,
|
||||
TotalSpace: info.Total,
|
||||
UsedSpace: info.Used,
|
||||
@@ -196,6 +197,23 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
|
||||
Healing: info.Healing,
|
||||
State: diskErrToDriveState(err),
|
||||
}
|
||||
di.PoolIndex, di.SetIndex, di.DiskIndex = disks[index].GetDiskLoc()
|
||||
if info.Healing {
|
||||
if hi := disks[index].Healing(); hi != nil {
|
||||
hd := hi.toHealingDisk()
|
||||
di.HealInfo = &hd
|
||||
}
|
||||
}
|
||||
di.Metrics = &madmin.DiskMetrics{
|
||||
APILatencies: make(map[string]string),
|
||||
APICalls: make(map[string]uint64),
|
||||
}
|
||||
for k, v := range info.Metrics.APILatencies {
|
||||
di.Metrics.APILatencies[k] = v
|
||||
}
|
||||
for k, v := range info.Metrics.APICalls {
|
||||
di.Metrics.APICalls[k] = v
|
||||
}
|
||||
if info.Total > 0 {
|
||||
di.Utilization = float64(info.Used / info.Total * 100)
|
||||
}
|
||||
@@ -218,7 +236,7 @@ func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []erro
|
||||
Disks: disksInfo,
|
||||
}
|
||||
|
||||
storageInfo.Backend.Type = BackendErasure
|
||||
storageInfo.Backend.Type = madmin.Erasure
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
@@ -229,6 +247,18 @@ func (er erasureObjects) StorageInfo(ctx context.Context) (StorageInfo, []error)
|
||||
return getStorageInfo(disks, endpoints)
|
||||
}
|
||||
|
||||
// LocalStorageInfo - returns underlying local storage statistics.
|
||||
func (er erasureObjects) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
disks := er.getLocalDisks()
|
||||
endpoints := make([]string, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk != nil {
|
||||
endpoints[i] = disk.String()
|
||||
}
|
||||
}
|
||||
return getStorageInfo(disks, endpoints)
|
||||
}
|
||||
|
||||
func (er erasureObjects) getOnlineDisksWithHealing() (newDisks []StorageAPI, healing bool) {
|
||||
var wg sync.WaitGroup
|
||||
disks := er.getDisks()
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestErasureEncodeDecode(t *testing.T) {
|
||||
buffer := make([]byte, len(data), 2*len(data))
|
||||
copy(buffer, data)
|
||||
|
||||
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV1)
|
||||
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV2)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: failed to create erasure: %v", i, err)
|
||||
}
|
||||
|
||||
@@ -340,19 +340,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
||||
return formats, g.Wait()
|
||||
}
|
||||
|
||||
func saveHealingTracker(disk StorageAPI, diskID string) error {
|
||||
htracker := healingTracker{
|
||||
ID: diskID,
|
||||
}
|
||||
htrackerBytes, err := htracker.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return disk.WriteAll(context.TODO(), minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
htrackerBytes)
|
||||
}
|
||||
|
||||
func saveFormatErasure(disk StorageAPI, format *formatErasureV3, heal bool) error {
|
||||
if disk == nil || format == nil {
|
||||
return errDiskNotFound
|
||||
@@ -387,7 +374,9 @@ func saveFormatErasure(disk StorageAPI, format *formatErasureV3, heal bool) erro
|
||||
|
||||
disk.SetDiskID(diskID)
|
||||
if heal {
|
||||
return saveHealingTracker(disk, diskID)
|
||||
ctx := context.Background()
|
||||
ht := newHealingTracker(disk)
|
||||
return ht.save(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -709,7 +709,8 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// Hold write lock on the object.
|
||||
destLock := fs.NewNSLock(bucket, object)
|
||||
if err = destLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = destLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer destLock.Unlock()
|
||||
|
||||
38
cmd/fs-v1.go
@@ -202,8 +202,13 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// BackendInfo - returns backend information
|
||||
func (fs *FSObjects) BackendInfo() BackendInfo {
|
||||
return BackendInfo{Type: BackendFS}
|
||||
func (fs *FSObjects) BackendInfo() madmin.BackendInfo {
|
||||
return madmin.BackendInfo{Type: madmin.FS}
|
||||
}
|
||||
|
||||
// LocalStorageInfo - returns underlying storage statistics.
|
||||
func (fs *FSObjects) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
return fs.StorageInfo(ctx)
|
||||
}
|
||||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
@@ -227,7 +232,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
},
|
||||
},
|
||||
}
|
||||
storageInfo.Backend.Type = BackendFS
|
||||
storageInfo.Backend.Type = madmin.FS
|
||||
return storageInfo, nil
|
||||
}
|
||||
|
||||
@@ -590,7 +595,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, forceDelet
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) {
|
||||
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
|
||||
return oi, VersionNotFound{
|
||||
Bucket: srcBucket,
|
||||
@@ -604,7 +609,8 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
||||
|
||||
if !cpSrcDstSame {
|
||||
objectDWLock := fs.NewNSLock(dstBucket, dstObject)
|
||||
if err := objectDWLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = objectDWLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer objectDWLock.Unlock()
|
||||
@@ -697,12 +703,14 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
lock := fs.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
@@ -971,10 +979,11 @@ func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (
|
||||
}
|
||||
|
||||
// getObjectInfoWithLock - reads object metadata and replies back ObjectInfo.
|
||||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@@ -1012,7 +1021,8 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
@@ -1052,7 +1062,7 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin
|
||||
// until EOF, writes data directly to configured filesystem path.
|
||||
// Additionally writes `fs.json` which carries the necessary metadata
|
||||
// for future object operations.
|
||||
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
if opts.Versioned {
|
||||
return objInfo, NotImplemented{}
|
||||
}
|
||||
@@ -1063,7 +1073,8 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
||||
|
||||
// Lock the object.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
@@ -1238,7 +1249,8 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, op
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
@@ -35,8 +35,15 @@ import (
|
||||
type GatewayUnsupported struct{}
|
||||
|
||||
// BackendInfo returns the underlying backend information
|
||||
func (a GatewayUnsupported) BackendInfo() BackendInfo {
|
||||
return BackendInfo{Type: BackendGateway}
|
||||
func (a GatewayUnsupported) BackendInfo() madmin.BackendInfo {
|
||||
return madmin.BackendInfo{Type: madmin.Gateway}
|
||||
}
|
||||
|
||||
// LocalStorageInfo returns the local disks information, mainly used
|
||||
// in prometheus - for gateway this just a no-op
|
||||
func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||
return StorageInfo{}, nil
|
||||
}
|
||||
|
||||
// NSScanner - scanner is not implemented for gateway
|
||||
|
||||
@@ -36,19 +36,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/cli"
|
||||
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -545,7 +544,7 @@ func (a *azureObjects) Shutdown(ctx context.Context) error {
|
||||
|
||||
// StorageInfo - Not relevant to Azure backend.
|
||||
func (a *azureObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.Type = madmin.Gateway
|
||||
host := a.endpoint.Host
|
||||
if a.endpoint.Port() == "" {
|
||||
host = a.endpoint.Host + ":" + a.endpoint.Scheme
|
||||
|
||||
@@ -29,9 +29,8 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -39,17 +38,16 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/cli"
|
||||
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -413,7 +411,7 @@ func (l *gcsGateway) Shutdown(ctx context.Context) error {
|
||||
|
||||
// StorageInfo - Not relevant to GCS backend.
|
||||
func (l *gcsGateway) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.Type = madmin.Gateway
|
||||
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, "storage.googleapis.com:443")
|
||||
return si, nil
|
||||
}
|
||||
|
||||
@@ -232,6 +232,10 @@ func (n *hdfsObjects) Shutdown(ctx context.Context) error {
|
||||
return n.clnt.Close()
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) LocalStorageInfo(ctx context.Context) (si minio.StorageInfo, errs []error) {
|
||||
return n.StorageInfo(ctx)
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, errs []error) {
|
||||
fsInfo, err := n.clnt.StatFs()
|
||||
if err != nil {
|
||||
@@ -240,7 +244,7 @@ func (n *hdfsObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, er
|
||||
si.Disks = []madmin.Disk{{
|
||||
UsedSpace: fsInfo.Used,
|
||||
}}
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.Type = madmin.Gateway
|
||||
si.Backend.GatewayOnline = true
|
||||
return si, nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/minio/cli"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -106,8 +107,8 @@ func (n *nasObjects) IsListenSupported() bool {
|
||||
|
||||
func (n *nasObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
|
||||
si, errs := n.ObjectLayer.StorageInfo(ctx)
|
||||
si.Backend.GatewayOnline = si.Backend.Type == minio.BackendFS
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.GatewayOnline = si.Backend.Type == madmin.FS
|
||||
si.Backend.Type = madmin.Gateway
|
||||
return si, errs
|
||||
}
|
||||
|
||||
|
||||
@@ -29,15 +29,15 @@ import (
|
||||
"github.com/minio/cli"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -276,7 +276,7 @@ func (l *s3Objects) Shutdown(ctx context.Context) error {
|
||||
|
||||
// StorageInfo is not relevant to S3 backend.
|
||||
func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.Type = madmin.Gateway
|
||||
host := l.Client.EndpointURL().Host
|
||||
if l.Client.EndpointURL().Port() == "" {
|
||||
host = l.Client.EndpointURL().Host + ":" + l.Client.EndpointURL().Scheme
|
||||
|
||||
@@ -369,13 +369,17 @@ var supportedDummyBucketAPIs = map[string][]string{
|
||||
|
||||
// List of not implemented bucket queries
|
||||
var notImplementedBucketResourceNames = map[string]struct{}{
|
||||
"cors": {},
|
||||
"metrics": {},
|
||||
"website": {},
|
||||
"logging": {},
|
||||
"inventory": {},
|
||||
"accelerate": {},
|
||||
"requestPayment": {},
|
||||
"cors": {},
|
||||
"metrics": {},
|
||||
"website": {},
|
||||
"logging": {},
|
||||
"inventory": {},
|
||||
"accelerate": {},
|
||||
"requestPayment": {},
|
||||
"analytics": {},
|
||||
"intelligent-tiering": {},
|
||||
"ownershipControls": {},
|
||||
"publicAccessBlock": {},
|
||||
}
|
||||
|
||||
// Checks requests for not implemented Bucket resources
|
||||
|
||||
@@ -19,12 +19,15 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,7 +67,8 @@ func newBgHealSequence() *healSequence {
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalBackgroundHealStatus() (madmin.BgHealState, bool) {
|
||||
// getBackgroundHealStatus will return the
|
||||
func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealState, bool) {
|
||||
if globalBackgroundHealState == nil {
|
||||
return madmin.BgHealState{}, false
|
||||
}
|
||||
@@ -78,24 +82,51 @@ func getLocalBackgroundHealStatus() (madmin.BgHealState, bool) {
|
||||
for _, ep := range getLocalDisksToHeal() {
|
||||
healDisksMap[ep.String()] = struct{}{}
|
||||
}
|
||||
|
||||
for _, ep := range globalBackgroundHealState.getHealLocalDisks() {
|
||||
if _, ok := healDisksMap[ep.String()]; !ok {
|
||||
healDisksMap[ep.String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var healDisks []string
|
||||
for disk := range healDisksMap {
|
||||
healDisks = append(healDisks, disk)
|
||||
}
|
||||
|
||||
return madmin.BgHealState{
|
||||
status := madmin.BgHealState{
|
||||
ScannedItemsCount: bgSeq.getScannedItemsCount(),
|
||||
LastHealActivity: bgSeq.lastHealActivity,
|
||||
HealDisks: healDisks,
|
||||
NextHealRound: UTCNow(),
|
||||
}, true
|
||||
}
|
||||
|
||||
if o == nil {
|
||||
healing := globalBackgroundHealState.getLocalHealingDisks()
|
||||
for _, disk := range healing {
|
||||
status.HealDisks = append(status.HealDisks, disk.Endpoint)
|
||||
}
|
||||
|
||||
return status, true
|
||||
}
|
||||
|
||||
// ignores any errors here.
|
||||
si, _ := o.StorageInfo(ctx)
|
||||
|
||||
indexed := make(map[string][]madmin.Disk)
|
||||
for _, disk := range si.Disks {
|
||||
setIdx := fmt.Sprintf("%d-%d", disk.PoolIndex, disk.SetIndex)
|
||||
indexed[setIdx] = append(indexed[setIdx], disk)
|
||||
}
|
||||
|
||||
for id, disks := range indexed {
|
||||
ss := madmin.SetStatus{
|
||||
ID: id,
|
||||
SetIndex: disks[0].SetIndex,
|
||||
PoolIndex: disks[0].PoolIndex,
|
||||
}
|
||||
for _, disk := range disks {
|
||||
ss.Disks = append(ss.Disks, disk)
|
||||
if disk.Healing {
|
||||
ss.HealStatus = "Healing"
|
||||
ss.HealPriority = "high"
|
||||
status.HealDisks = append(status.HealDisks, disk.Endpoint)
|
||||
}
|
||||
}
|
||||
sortDisks(ss.Disks)
|
||||
status.Sets = append(status.Sets, ss)
|
||||
}
|
||||
sort.Slice(status.Sets, func(i, j int) bool {
|
||||
return status.Sets[i].ID < status.Sets[j].ID
|
||||
})
|
||||
|
||||
return status, true
|
||||
|
||||
}
|
||||
|
||||
func mustGetHealSequence(ctx context.Context) *healSequence {
|
||||
@@ -120,7 +151,7 @@ func mustGetHealSequence(ctx context.Context) *healSequence {
|
||||
}
|
||||
|
||||
// healErasureSet lists and heals all objects in a specific erasure set
|
||||
func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketInfo) error {
|
||||
func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketInfo, tracker *healingTracker) error {
|
||||
bgSeq := mustGetHealSequence(ctx)
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
@@ -135,6 +166,21 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
||||
|
||||
// Heal all buckets with all objects
|
||||
for _, bucket := range buckets {
|
||||
if tracker.isHealed(bucket.Name) {
|
||||
continue
|
||||
}
|
||||
var forwardTo string
|
||||
// If we resume to the same bucket, forward to last known item.
|
||||
if tracker.Bucket != "" {
|
||||
if tracker.Bucket == bucket.Name {
|
||||
forwardTo = tracker.Bucket
|
||||
} else {
|
||||
// Reset to where last bucket ended if resuming.
|
||||
tracker.resume()
|
||||
}
|
||||
}
|
||||
tracker.Object = ""
|
||||
tracker.Bucket = bucket.Name
|
||||
// Heal current bucket
|
||||
if _, err := er.HealBucket(ctx, bucket.Name, madmin.HealOpts{}); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
@@ -143,21 +189,37 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
console.Debugf(color.Green("healDisk:")+" healing bucket %s content on erasure set %d\n", bucket.Name, er.setNumber+1)
|
||||
console.Debugf(color.Green("healDisk:")+" healing bucket %s content on erasure set %d\n", bucket.Name, tracker.SetIndex+1)
|
||||
}
|
||||
|
||||
disks, _ := er.getOnlineDisksWithHealing()
|
||||
if len(disks) == 0 {
|
||||
return errors.New("healErasureSet: No non-healing disks found")
|
||||
}
|
||||
|
||||
// Limit listing to 3 drives.
|
||||
if len(disks) > 3 {
|
||||
disks = disks[:3]
|
||||
}
|
||||
|
||||
healEntry := func(entry metaCacheEntry) {
|
||||
if entry.isDir() {
|
||||
return
|
||||
}
|
||||
// We might land at .metacache, .trash, .multipart
|
||||
// no need to heal them skip, only when bucket
|
||||
// is '.minio.sys'
|
||||
if bucket.Name == minioMetaBucket {
|
||||
if wildcard.Match("buckets/*/.metacache/*", entry.name) {
|
||||
return
|
||||
}
|
||||
if wildcard.Match("tmp/.trash/*", entry.name) {
|
||||
return
|
||||
}
|
||||
if wildcard.Match("multipart/*", entry.name) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fivs, err := entry.fileInfoVersions(bucket.Name)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -165,32 +227,62 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
||||
}
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
for _, version := range fivs.Versions {
|
||||
if _, err := er.HealObject(ctx, bucket.Name, version.Name, version.VersionID, madmin.HealOpts{ScanMode: madmin.HealNormalScan, Remove: healDeleteDangling}); err != nil {
|
||||
if _, err := er.HealObject(ctx, bucket.Name, version.Name, version.VersionID, madmin.HealOpts{
|
||||
ScanMode: madmin.HealNormalScan, Remove: healDeleteDangling}); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
// If not deleted, assume they failed.
|
||||
tracker.ObjectsFailed++
|
||||
tracker.BytesFailed += uint64(version.Size)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
} else {
|
||||
tracker.ObjectsHealed++
|
||||
tracker.BytesDone += uint64(version.Size)
|
||||
}
|
||||
bgSeq.logHeal(madmin.HealItemObject)
|
||||
}
|
||||
tracker.Object = entry.name
|
||||
if time.Since(tracker.LastUpdate) > time.Minute {
|
||||
logger.LogIf(ctx, tracker.update(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
// How to resolve partial results.
|
||||
resolver := metadataResolutionParams{
|
||||
dirQuorum: 1,
|
||||
objQuorum: 1,
|
||||
bucket: bucket.Name,
|
||||
}
|
||||
|
||||
err := listPathRaw(ctx, listPathRawOptions{
|
||||
disks: disks,
|
||||
bucket: bucket.Name,
|
||||
recursive: true,
|
||||
forwardTo: "", //TODO(klauspost): Set this to last known offset when resuming.
|
||||
forwardTo: forwardTo,
|
||||
minDisks: 1,
|
||||
reportNotFound: false,
|
||||
agreed: healEntry,
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
entry, _ := entries.firstFound()
|
||||
if entry != nil && !entry.isDir() {
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if ok {
|
||||
healEntry(*entry)
|
||||
}
|
||||
},
|
||||
finished: nil,
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
|
||||
select {
|
||||
// If context is canceled don't mark as done...
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
logger.LogIf(ctx, err)
|
||||
tracker.bucketDone(bucket.Name)
|
||||
logger.LogIf(ctx, tracker.update(ctx))
|
||||
}
|
||||
}
|
||||
tracker.Object = ""
|
||||
tracker.Bucket = ""
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||