Compare commits
396 Commits
RELEASE.20
...
release
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
404d2ebe3f | ||
|
|
46964eb764 | ||
|
|
bfab990c33 | ||
|
|
94018588fe | ||
|
|
8b76ba8d5d | ||
|
|
7eb7f65e48 | ||
|
|
c608c0688a | ||
|
|
41a9d1d778 | ||
|
|
e21e80841e | ||
|
|
98c792bbeb | ||
|
|
f687ba53bc | ||
|
|
e3da59c923 | ||
|
|
781b9b051c | ||
|
|
438becfde8 | ||
|
|
16ef338649 | ||
|
|
3242847ec0 | ||
|
|
cf87303094 | ||
|
|
90d8ec6310 | ||
|
|
b383522743 | ||
|
|
6d42036dd4 | ||
|
|
b4d8bcf644 | ||
|
|
d7f32ad649 | ||
|
|
906d68c356 | ||
|
|
749e9c5771 | ||
|
|
410e84d273 | ||
|
|
75741dbf4a | ||
|
|
fad7b27f15 | ||
|
|
79564656eb | ||
|
|
21cfc4aa49 | ||
|
|
e80239a661 | ||
|
|
6a2ed44095 | ||
|
|
8adfeb0d84 | ||
|
|
d23485e571 | ||
|
|
da70e6ddf6 | ||
|
|
922c7b57f5 | ||
|
|
726d80dbb7 | ||
|
|
23b03dadb8 | ||
|
|
7b3719c17b | ||
|
|
9a6487319a | ||
|
|
94ff624242 | ||
|
|
98ff91b484 | ||
|
|
4d86384dc7 | ||
|
|
27eb4ae3bc | ||
|
|
b5dcaaccb4 | ||
|
|
0843280dc3 | ||
|
|
61a1ea60c2 | ||
|
|
b92a220db1 | ||
|
|
be5910b87e | ||
|
|
51a8619a79 | ||
|
|
d46c3c07a8 | ||
|
|
14d89eaae4 | ||
|
|
32b088a2ff | ||
|
|
eed3b66d98 | ||
|
|
add3cd4e44 | ||
|
|
60b0f2324e | ||
|
|
0eb146e1b2 | ||
|
|
b379ca3bb0 | ||
|
|
e197800f90 | ||
|
|
980311fdfd | ||
|
|
771dea175c | ||
|
|
fa94682e83 | ||
|
|
6160188bf3 | ||
|
|
e5a1a2a974 | ||
|
|
642ba3f2d6 | ||
|
|
4d80de899a | ||
|
|
afbd3e41eb | ||
|
|
5e003549cc | ||
|
|
7fa3e4106b | ||
|
|
75db500e85 | ||
|
|
910097bbbc | ||
|
|
afd346417d | ||
|
|
feafccf007 | ||
|
|
9b54fcdf12 | ||
|
|
f92b7a5621 | ||
|
|
c25e75f0b5 | ||
|
|
3ffe520643 | ||
|
|
952b0f111d | ||
|
|
777344a594 | ||
|
|
9d118b372e | ||
|
|
878bc6c72b | ||
|
|
fdc2f69218 | ||
|
|
0d124095ea | ||
|
|
691035832a | ||
|
|
eac66e67ec | ||
|
|
57f3ed22d4 | ||
|
|
2f29719e6b | ||
|
|
209fe61dcc | ||
|
|
8ecffdb7a7 | ||
|
|
806df164b2 | ||
|
|
4ac9ed4248 | ||
|
|
3d8c512bba | ||
|
|
3ff5f55dcb | ||
|
|
097e5eba9f | ||
|
|
ba6930bb13 | ||
|
|
64662a49ff | ||
|
|
78e867e145 | ||
|
|
9ccc483df6 | ||
|
|
abce040088 | ||
|
|
558762bdf6 | ||
|
|
d971061305 | ||
|
|
509bcc01ad | ||
|
|
7ea95fcec8 | ||
|
|
79b0d056a2 | ||
|
|
ec547c0fa8 | ||
|
|
bcf9825082 | ||
|
|
651487507a | ||
|
|
124816f6a6 | ||
|
|
fa9cf1251b | ||
|
|
97e7a902d0 | ||
|
|
d73d756a80 | ||
|
|
7488c77e7c | ||
|
|
786585009e | ||
|
|
7be7109471 | ||
|
|
464fa08f2e | ||
|
|
c3217bd6eb | ||
|
|
f14cc6c943 | ||
|
|
2c198ae7b6 | ||
|
|
690434514d | ||
|
|
039f59b552 | ||
|
|
c6a120df0e | ||
|
|
cd9e30c0f4 | ||
|
|
f96d4cf7d3 | ||
|
|
879599b0cf | ||
|
|
b1bb3f7016 | ||
|
|
e8d8dfa3ae | ||
|
|
bbd1244a88 | ||
|
|
10bdb78699 | ||
|
|
289b22d911 | ||
|
|
0b9c17443e | ||
|
|
23f7ab40b3 | ||
|
|
e3f8830ab7 | ||
|
|
2f4af09c01 | ||
|
|
37960cbc2f | ||
|
|
c67d1bf120 | ||
|
|
c5b3a675fa | ||
|
|
b690304eed | ||
|
|
9171d6ef65 | ||
|
|
6386b45c08 | ||
|
|
1f659204a2 | ||
|
|
f9f6fd0421 | ||
|
|
85620dfe93 | ||
|
|
a8e4f64ff3 | ||
|
|
ca5c6e3160 | ||
|
|
b23659927c | ||
|
|
b912e9ab41 | ||
|
|
c1a49be639 | ||
|
|
03172b89e2 | ||
|
|
b517c791e9 | ||
|
|
14aef52004 | ||
|
|
67b20125e4 | ||
|
|
d4b822d697 | ||
|
|
1b63291ee2 | ||
|
|
aa7244a9a4 | ||
|
|
2a79ea0332 | ||
|
|
6e5c61d917 | ||
|
|
02e7de6367 | ||
|
|
cec12f4c76 | ||
|
|
da676ac298 | ||
|
|
18ec933085 | ||
|
|
c31d2c3fdc | ||
|
|
8778828a03 | ||
|
|
48b212dd8e | ||
|
|
be7de911c4 | ||
|
|
8cad407e0b | ||
|
|
85d2187c20 | ||
|
|
98d3f94996 | ||
|
|
173284903b | ||
|
|
c70240b893 | ||
|
|
8ba2136e06 | ||
|
|
2dce5d9442 | ||
|
|
f28b063091 | ||
|
|
90abea5b7a | ||
|
|
c5b2a8441b | ||
|
|
0f5ca83418 | ||
|
|
8a6b13c239 | ||
|
|
8e8a792d9d | ||
|
|
95e0acbb26 | ||
|
|
55037e6e54 | ||
|
|
289e1d8b2a | ||
|
|
e07918abe3 | ||
|
|
ffea6fcf09 | ||
|
|
11b2220696 | ||
|
|
aa8450a2a1 | ||
|
|
87cce344f6 | ||
|
|
7d4a2d2b68 | ||
|
|
cfc8b92dff | ||
|
|
c4e12dc846 | ||
|
|
a94a9c37fa | ||
|
|
79b6a43467 | ||
|
|
928de04f7a | ||
|
|
93fd248b52 | ||
|
|
2a7b123895 | ||
|
|
b3c56b53fb | ||
|
|
0ef3e359d8 | ||
|
|
f24d8127ab | ||
|
|
7875d472bc | ||
|
|
711adb9652 | ||
|
|
e6b4ea7618 | ||
|
|
466e95bb59 | ||
|
|
881f98e511 | ||
|
|
cbf4bb62e0 | ||
|
|
682482459d | ||
|
|
b87fae0049 | ||
|
|
b8b44c879f | ||
|
|
f53d1de87f | ||
|
|
5a18d437ce | ||
|
|
93eb549a83 | ||
|
|
fe3c39b583 | ||
|
|
84d400487f | ||
|
|
3afa499885 | ||
|
|
13d015cf93 | ||
|
|
876b79b8d8 | ||
|
|
3d74efa6b1 | ||
|
|
68d299e719 | ||
|
|
f9c5636c2d | ||
|
|
9b10118d34 | ||
|
|
0e3211f4ad | ||
|
|
2e4d9124ad | ||
|
|
6fef4c21b9 | ||
|
|
8e1bbd989a | ||
|
|
152d7cd95b | ||
|
|
74080bf108 | ||
|
|
647a209c73 | ||
|
|
0d057c777a | ||
|
|
275f7a63e8 | ||
|
|
97fe57bba9 | ||
|
|
88c1bb0720 | ||
|
|
1fdafaf72f | ||
|
|
5fe4bb6b36 | ||
|
|
99b733d44c | ||
|
|
b4ac05523b | ||
|
|
c7eacba41c | ||
|
|
1887c25279 | ||
|
|
c9b0f595b9 | ||
|
|
8bb580abfc | ||
|
|
af9cb5f5f2 | ||
|
|
9497dfd804 | ||
|
|
da55a05587 | ||
|
|
3fc4d6f620 | ||
|
|
67a8f37df0 | ||
|
|
075c429021 | ||
|
|
df0c678167 | ||
|
|
f108873c48 | ||
|
|
871b450dbd | ||
|
|
f71e192343 | ||
|
|
b3f81e75f6 | ||
|
|
a71e0483c9 | ||
|
|
f2d49ec21a | ||
|
|
4a9d9c8585 | ||
|
|
c885777ac6 | ||
|
|
fe3aca70c3 | ||
|
|
c4848f9b4f | ||
|
|
838d4dafbd | ||
|
|
8c663f93f7 | ||
|
|
b4cb7edf85 | ||
|
|
e96fdcd5ec | ||
|
|
6ef678663e | ||
|
|
f737a027cf | ||
|
|
547efecd82 | ||
|
|
65aa2bc614 | ||
|
|
de4421d6a3 | ||
|
|
6c3467e300 | ||
|
|
33554651e9 | ||
|
|
451d9057f3 | ||
|
|
c82aef0a56 | ||
|
|
1e53bf2789 | ||
|
|
d48ff93ba3 | ||
|
|
c8489a8f0c | ||
|
|
2680772d4b | ||
|
|
567f7bdd05 | ||
|
|
6cd255d516 | ||
|
|
e79829b5b3 | ||
|
|
fd3f02637a | ||
|
|
e019f21bda | ||
|
|
e9ac7b0fb7 | ||
|
|
1debd722b5 | ||
|
|
e7f6051f19 | ||
|
|
6717295e18 | ||
|
|
00cff1aac5 | ||
|
|
9722531817 | ||
|
|
5c6bfae4c7 | ||
|
|
5f51ef0b40 | ||
|
|
7e266293e6 | ||
|
|
eb6871ecd9 | ||
|
|
9cdd981ce7 | ||
|
|
bd8020aba8 | ||
|
|
09bc49bd51 | ||
|
|
82f0471d1b | ||
|
|
0bf2d84f96 | ||
|
|
6a95f412c9 | ||
|
|
7575c24037 | ||
|
|
43f973c4cf | ||
|
|
1b453728a3 | ||
|
|
a6c146bd00 | ||
|
|
a35cbb3ff3 | ||
|
|
c080f04e66 | ||
|
|
2167ba0111 | ||
|
|
4e6d717f39 | ||
|
|
845e251fa9 | ||
|
|
d1a8f0b786 | ||
|
|
dac19d7272 | ||
|
|
7624c8b9bb | ||
|
|
19fb1086b2 | ||
|
|
a5e23a40ff | ||
|
|
1ad2b7b699 | ||
|
|
b5049d541f | ||
|
|
e0055609bb | ||
|
|
e8ce348da1 | ||
|
|
6bfa162342 | ||
|
|
b4add82bb6 | ||
|
|
3bda8f755c | ||
|
|
3ca6330661 | ||
|
|
3d9000d5b5 | ||
|
|
3163a660aa | ||
|
|
0dadfd1b3d | ||
|
|
98f76008c7 | ||
|
|
8da0b7cf03 | ||
|
|
4315f93421 | ||
|
|
ddb5d7043a | ||
|
|
f903cae6ff | ||
|
|
40d59c1961 | ||
|
|
7090bcc8e0 | ||
|
|
c222bde14b | ||
|
|
4e06a72632 | ||
|
|
16040bc544 | ||
|
|
cc2d887e0e | ||
|
|
c1b4b24236 | ||
|
|
feaf8dfb9a | ||
|
|
628ef081d1 | ||
|
|
44dff36ff7 | ||
|
|
b97d53b29c | ||
|
|
00af9881b0 | ||
|
|
e09196d626 | ||
|
|
1a5775e2e8 | ||
|
|
e2579b1f5a | ||
|
|
7824e19d20 | ||
|
|
317305d5f9 | ||
|
|
e4e117faab | ||
|
|
e8176fe978 | ||
|
|
828602d672 | ||
|
|
d9224fbc65 | ||
|
|
51dad1d130 | ||
|
|
4593b146be | ||
|
|
f21d650ed4 | ||
|
|
a4f6705874 | ||
|
|
b35b537e3f | ||
|
|
5c52d5ffc7 | ||
|
|
f0808bb2e5 | ||
|
|
a6dee21092 | ||
|
|
6f781c5e7a | ||
|
|
f8ca859790 | ||
|
|
b78521cd69 | ||
|
|
76e2713ffe | ||
|
|
b5d291ea88 | ||
|
|
eb9172eecb | ||
|
|
97a4c120e9 | ||
|
|
64bddf47d8 | ||
|
|
4ed45ce543 | ||
|
|
ad511b0eb8 | ||
|
|
cb0eaeaad8 | ||
|
|
f3f0041ad0 | ||
|
|
d0027c3c41 | ||
|
|
cb7fc99368 | ||
|
|
a4383051d9 | ||
|
|
e7ae49f9c9 | ||
|
|
153d4be032 | ||
|
|
dfd99b6d8f | ||
|
|
c4b1d394d6 | ||
|
|
c4131c2798 | ||
|
|
c9d502e6fa | ||
|
|
677e80c0f8 | ||
|
|
aa85af4d1a | ||
|
|
ae731d232f | ||
|
|
a317d220ed | ||
|
|
3e1221a01c | ||
|
|
c1f6ca6697 | ||
|
|
36fc2f98ed | ||
|
|
556524c715 | ||
|
|
428f288379 | ||
|
|
cde801282d | ||
|
|
6cf0008469 | ||
|
|
7b0330a98c | ||
|
|
cc457f1798 | ||
|
|
ca0d31b09a | ||
|
|
445a9bd827 | ||
|
|
d8d25a308f | ||
|
|
c19e6ce773 | ||
|
|
d3c853a3be | ||
|
|
59d3639396 | ||
|
|
027e17468a | ||
|
|
45ea161f8d | ||
|
|
7b8a456f68 | ||
|
|
b43906f6ee | ||
|
|
6a66f142d4 | ||
|
|
5982965839 | ||
|
|
bfb92a27b7 |
@@ -1,2 +1,9 @@
|
||||
.git
|
||||
.github
|
||||
docs
|
||||
default.etcd
|
||||
browser
|
||||
*.gz
|
||||
*.tar.gz
|
||||
*.bzip2
|
||||
*.zip
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -10,9 +10,10 @@
|
||||
## Types of changes
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Optimization (provides speedup with no functional changes)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Documentation updated
|
||||
- [ ] Unit tests added/updated
|
||||
|
||||
16
.github/workflows/go.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.16.x]
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
@@ -21,6 +21,14 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'macos-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
make
|
||||
make test-race
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -39,11 +47,9 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
|
||||
@@ -17,6 +17,8 @@ linters:
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
- gomodguard
|
||||
- gofmt
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -15,6 +15,8 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -17,6 +17,8 @@ ARG TARGETARCH
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
@@ -6,19 +6,18 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
ENV MINIO_UPDATE=off \
|
||||
MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
RUN microdnf update --nodocs
|
||||
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
|
||||
RUN microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -2,19 +2,23 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2020-11-25T22-36-25Z" \
|
||||
release="RELEASE.2020-11-25T22-36-25Z" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -26,9 +30,9 @@ RUN \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
|
||||
35
Makefile
@@ -17,38 +17,28 @@ checks:
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
verifiers: getdeps lint check-gen
|
||||
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
@@ -71,13 +61,18 @@ build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
|
||||
hotfix: install
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install
|
||||
|
||||
docker: checks
|
||||
docker-hotfix: hotfix checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
docker: build checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
|
||||
222
README.md
@@ -5,80 +5,175 @@
|
||||
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
|
||||
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Docker Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server on a Docker container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
```
|
||||
|
||||
### Edge
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
|
||||
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
|
||||
|
||||
## Edge
|
||||
|
||||
Run the following command to run the bleeding-edge image of MinIO on a Docker container using an ephemeral data volume:
|
||||
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: To deploy MinIO on Docker with persistent storage, you must map local persistent directories from the host OS to the container using the
|
||||
`docker -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the Docker container.
|
||||
|
||||
# macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
## macOS
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
|
||||
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Apple macOS | 64-bit Intel | https://dl.min.io/server/minio/release/darwin-amd64/minio |
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
chmod 755 minio
|
||||
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
## GNU/Linux
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
# GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
|
||||
```
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Microsoft Windows | 64-bit | https://dl.min.io/server/minio/release/windows-amd64/minio.exe |
|
||||
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
|
||||
|
||||
```sh
|
||||
minio.exe server D:\Photos
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
# FreeBSD
|
||||
|
||||
MinIO does not provide an official FreeBSD binary. However, FreeBSD maintains an [upstream release](https://www.freshports.org/www/minio) using [pkg](https://github.com/freebsd/pkg):
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -87,13 +182,32 @@ sysrc minio_disks=/home/user/Photos
|
||||
service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
# Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
|
||||
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
|
||||
## Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
@@ -149,6 +263,13 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
# Test MinIO Connectivity
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
|
||||
@@ -157,12 +278,7 @@ MinIO Server comes with an embedded web based object browser. Point your web bro
|
||||
## Test using MinIO Client `mc`
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
|
||||
## Pre-existing data
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
# Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
@@ -171,7 +287,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
|
||||
### Important things to remember during MinIO upgrades
|
||||
## Important things to remember during MinIO upgrades
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
@@ -181,7 +297,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
# Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
@@ -189,8 +305,8 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
# Contribute to MinIO Project
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
# License
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
@@ -8,16 +8,16 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
|
||||
### 稳定版
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### 尝鲜版
|
||||
```
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
-e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
|
||||
@@ -78,7 +78,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装,, MinIO官方并没有提供FreeBSD二进制文件, 它由FreeBSD上游维护,点击 [这里](https://www.freshports.org/www/minio)查看。
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装,MinIO官方并没有提供FreeBSD二进制文件, 它由FreeBSD上游维护,点击 [这里](https://www.freshports.org/www/minio)查看。
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -89,7 +89,7 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.16](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -179,7 +179,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
|
||||
- 如果将`kes`用作MinIO的KMS,只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
|
||||
- 如果将Vault作为MinIO的KMS,请确保已遵循如下Vault升级过程的概述:https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- 如果将MindIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
- 如果将MinIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## 了解更多
|
||||
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
@@ -190,7 +190,7 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [MinIO文档](https://docs.min.io)
|
||||
|
||||
## 如何参与到MinIO项目
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加入到MinIO项目中。
|
||||
|
||||
## 授权许可
|
||||
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。
|
||||
|
||||
1
browser/.gitignore
vendored
@@ -17,4 +17,3 @@ release
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
production
|
||||
|
||||
@@ -17,24 +17,13 @@ nvm install stable
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
|
||||
```sh
|
||||
go get github.com/go-bindata/go-bindata/go-bindata
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
```
|
||||
|
||||
## Generating Assets
|
||||
|
||||
### Generate ui-assets.go
|
||||
|
||||
```sh
|
||||
npm run release
|
||||
```
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
This generates `production` in the current directory.
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
@@ -24,9 +24,6 @@ jest.mock("jwt-decode")
|
||||
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
GenerateAuth: jest.fn(() => {
|
||||
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
|
||||
}),
|
||||
SetAuth: jest.fn(
|
||||
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
|
||||
if (
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
} from "../constants"
|
||||
import QRCode from "react-qr-code";
|
||||
|
||||
export class ShareObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -89,6 +90,7 @@ export class ShareObjectModal extends React.Component {
|
||||
<ModalHeader>Share Object</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group copy-text">
|
||||
<QRCode value={url} size={128}/>
|
||||
<label>Shareable Link</label>
|
||||
<input
|
||||
type="text"
|
||||
|
||||
@@ -138,9 +138,10 @@ describe("Uploads actions", () => {
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
store.dispatch(uploadsActions.uploadFile(file))
|
||||
const objectPath = encodeURIComponent("pre1/file1")
|
||||
expect(open).toHaveBeenCalledWith(
|
||||
"PUT",
|
||||
"https://localhost:8080/upload/test1/pre1/file1",
|
||||
"https://localhost:8080/upload/test1/" + objectPath,
|
||||
true
|
||||
)
|
||||
expect(send).toHaveBeenCalledWith(file)
|
||||
|
||||
@@ -94,7 +94,7 @@ export const uploadFile = file => {
|
||||
_filePath = _filePath.substring(1)
|
||||
}
|
||||
const filePath = _filePath
|
||||
const objectName = `${currentPrefix}${filePath}`
|
||||
const objectName = encodeURIComponent(`${currentPrefix}${filePath}`)
|
||||
const uploadUrl = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`
|
||||
|
||||
@@ -75,6 +75,11 @@
|
||||
border-color: darken(@input-border, 5%);
|
||||
}
|
||||
}
|
||||
|
||||
svg {
|
||||
display: block;
|
||||
margin: 0 auto 5px;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
@@ -150,4 +155,4 @@
|
||||
100% {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
11
browser/assets.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package browser
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed production/*
|
||||
var fs embed.FS
|
||||
|
||||
// GetStaticAssets returns assets
|
||||
func GetStaticAssets() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -14,19 +14,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var moment = require('moment')
|
||||
var async = require('async')
|
||||
var exec = require('child_process').exec
|
||||
var fs = require('fs')
|
||||
|
||||
var isProduction = process.env.NODE_ENV == 'production' ? true : false
|
||||
var assetsFileName = ''
|
||||
var commitId = ''
|
||||
var date = moment.utc()
|
||||
var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
|
||||
var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z'
|
||||
var buildType = 'DEVELOPMENT'
|
||||
if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD
|
||||
|
||||
rmDir = function(dirPath) {
|
||||
try { var files = fs.readdirSync(dirPath); }
|
||||
@@ -53,74 +45,6 @@ async.waterfall([
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (isProduction) {
|
||||
fs.renameSync('production/index_bundle.js',
|
||||
'production/index_bundle-' + releaseTag + '.js')
|
||||
} else {
|
||||
fs.renameSync('dev/index_bundle.js',
|
||||
'dev/index_bundle-' + releaseTag + '.js')
|
||||
}
|
||||
var cmd = 'git log --format="%H" -n1'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
if (!stdout) throw new Error('commitId is empty')
|
||||
commitId = stdout.replace('\n', '')
|
||||
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
|
||||
assetsFileName = 'ui-assets.go';
|
||||
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
|
||||
if (!isProduction) {
|
||||
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
|
||||
}
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
var cmd = 'gofmt -s -w -l bindata_assetfs.go'
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
},
|
||||
function(stdout, stderr, cb) {
|
||||
fs.renameSync('bindata_assetfs.go', assetsFileName)
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' +
|
||||
releaseTag + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n')
|
||||
fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"')
|
||||
fs.appendFileSync(assetsFileName, '\n')
|
||||
var contents;
|
||||
if (isProduction) {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_productionIndexHtml/g, '_productionIndexHTML')
|
||||
.replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes')
|
||||
.replace(/productionIndexHtml/g, 'productionIndexHTML')
|
||||
.replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs')
|
||||
.replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes')
|
||||
.replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs')
|
||||
.replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs')
|
||||
.replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes')
|
||||
.replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs');
|
||||
} else {
|
||||
contents = fs.readFileSync(assetsFileName, 'utf8')
|
||||
.replace(/_devIndexHtml/g, '_devIndexHTML')
|
||||
.replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes')
|
||||
.replace(/devIndexHtml/g, 'devIndexHTML')
|
||||
.replace(/_devIndex_bundleJs/g, '_devIndexBundleJs')
|
||||
.replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes')
|
||||
.replace(/devIndex_bundleJs/g, 'devIndexBundleJs')
|
||||
.replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs')
|
||||
.replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes')
|
||||
.replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs');
|
||||
}
|
||||
contents = contents.replace(/MINIO_UI_VERSION/g, version)
|
||||
contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js')
|
||||
|
||||
fs.writeFileSync(assetsFileName, contents, 'utf8')
|
||||
console.log('UI assets file :', assetsFileName)
|
||||
cb()
|
||||
}
|
||||
], function(err) {
|
||||
if (err) return console.log(err)
|
||||
})
|
||||
|
||||
32008
browser/package-lock.json
generated
@@ -6,7 +6,7 @@
|
||||
"test": "jest",
|
||||
"dev": "NODE_ENV=dev webpack-dev-server --devtool cheap-module-eval-source-map --progress --colors --hot --content-base dev",
|
||||
"build": "NODE_ENV=dev node build.js",
|
||||
"release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js",
|
||||
"release": "NODE_ENV=production node build.js",
|
||||
"format": "esformatter -i 'app/**/*.js'"
|
||||
},
|
||||
"jest": {
|
||||
@@ -84,6 +84,7 @@
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-qr-code": "^1.1.1",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
|
||||
BIN
browser/production/chrome.png
Normal file
|
After Width: | Height: | Size: 3.6 KiB |
BIN
browser/production/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
browser/production/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
browser/production/favicon-96x96.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
browser/production/firefox.png
Normal file
|
After Width: | Height: | Size: 4.7 KiB |
59
browser/production/index.html
Normal file
@@ -0,0 +1,59 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>MinIO Browser</title>
|
||||
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/minio/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="96x96" href="/minio/favicon-96x96.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/minio/favicon-16x16.png">
|
||||
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
|
||||
<!--[if lt IE 11]>
|
||||
<div class="ie-warning">
|
||||
<div class="iw-inner">
|
||||
<i class="iwi-icon fas fa-exclamation-triangle"></i>
|
||||
|
||||
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<a href="http://www.google.com/chrome/">
|
||||
<img src="chrome.png" alt="">
|
||||
<div>Chrome</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.mozilla.org/en-US/firefox/new/">
|
||||
<img src="firefox.png" alt="">
|
||||
<div>Firefox</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.apple.com/safari/">
|
||||
<img src="safari.png" alt="">
|
||||
<div>Safari</div>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="iwi-skip">Skip & Continue</div>
|
||||
</div>
|
||||
</div>
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
67
browser/production/index_bundle.js
Normal file
98
browser/production/loader.css
Normal file
@@ -0,0 +1,98 @@
|
||||
.page-load {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background: #002a37;
|
||||
z-index: 100;
|
||||
transition: opacity 200ms;
|
||||
-webkit-transition: opacity 200ms;
|
||||
}
|
||||
|
||||
.pl-0{
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.pl-1 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.pl-inner {
|
||||
position: absolute;
|
||||
width: 100px;
|
||||
height: 100px;
|
||||
left: 50%;
|
||||
margin-left: -50px;
|
||||
top: 50%;
|
||||
margin-top: -50px;
|
||||
text-align: center;
|
||||
-webkit-animation: fade-in 500ms;
|
||||
animation: fade-in 500ms;
|
||||
-webkit-animation-fill-mode: both;
|
||||
animation-fill-mode: both;
|
||||
animation-delay: 350ms;
|
||||
-webkit-animation-delay: 350ms;
|
||||
-webkit-backface-visibility: visible;
|
||||
backface-visibility: visible;
|
||||
}
|
||||
|
||||
.pl-inner:before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
left: 0;
|
||||
top: 0;
|
||||
display: block;
|
||||
-webkit-animation: spin 1000ms infinite linear;
|
||||
animation: spin 1000ms infinite linear;
|
||||
border: 1px solid rgba(255, 255, 255, 0.2);;
|
||||
border-left-color: #fff;
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
.pl-inner > img {
|
||||
width: 30px;
|
||||
margin-top: 21px;
|
||||
}
|
||||
|
||||
@-webkit-keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@-webkit-keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
12
browser/production/logo.svg
Normal file
@@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="93px" height="187px" viewBox="0 0 93 187" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
|
||||
<title>logo</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs></defs>
|
||||
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="logo" transform="translate(0.187500, -0.683594)" fill="#FFFFFF" fill-rule="nonzero">
|
||||
<path d="M91.49,46.551 C86.7827023,38.7699609 82.062696,30.9966172 77.33,23.231 C74.87,19.231 72.33,15.231 69.88,11.231 C69.57,10.731 69.18,10.291 68.88,9.831 C64.35,2.931 55.44,-1.679 46.73,2.701 C42.9729806,4.51194908 40.0995718,7.75449451 38.7536428,11.7020516 C37.4077139,15.6496086 37.701799,19.9721186 39.57,23.701 C41.08,26.641 43.57,29.121 45.91,31.581 C53.03,39.141 60.38,46.491 67.45,54.111 C72.4175495,59.4492221 74.4526451,66.8835066 72.8965704,74.0075359 C71.3404956,81.1315653 66.390952,87.0402215 59.65,89.821 C59.4938176,89.83842 59.3361824,89.83842 59.18,89.821 L59.18,54.591 C46.6388051,61.0478363 35.3944735,69.759905 26.01,80.291 C11.32,96.671 2.64,117.141 0.01,132.071 L23.96,119.821 C31.96,115.771 39.86,111.821 48.14,107.581 L48.14,175.921 L59.14,187.131 L59.14,101.831 C59.14,101.831 59.39,101.711 60.22,101.261 C63.5480598,99.6738911 66.7772674,97.8873078 69.89,95.911 C77.7130888,90.4306687 82.7479457,81.8029342 83.6709542,72.295947 C84.5939627,62.7889599 81.3127806,53.3538429 74.69,46.471 C66.49,37.891 58.24,29.351 50.05,20.761 C47.67,18.261 47.72,15.101 50.05,12.881 C52.38,10.661 55.56,10.881 57.96,13.331 L61.38,16.781 C64.1,19.681 66.79,22.611 69.53,25.481 C76.4547149,32.7389629 83.3947303,39.9823123 90.35,47.211 C90.7,47.571 91.12,47.871 91.5,48.211 L91.93,47.951 C91.8351945,47.4695902 91.6876376,47.0000911 91.49,46.551 Z M48.11,94.931 C47.9883217,95.5022568 47.6230065,95.9917791 47.11,96.271 C42.72,98.601 38.29,100.871 33.87,103.141 L17.76,111.401 C24.771203,96.7435071 35.1132853,83.9289138 47.96,73.981 C48.08,74.221 48.16,74.301 48.16,74.381 C48.15,81.231 48.17,88.081 48.11,94.931 Z" id="Shape"></path>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.2 KiB |
BIN
browser/production/safari.png
Normal file
|
After Width: | Height: | Size: 4.9 KiB |
@@ -21,7 +21,7 @@ _init() {
|
||||
|
||||
## Minimum required versions for build dependencies
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.13"
|
||||
GO_VERSION="1.16"
|
||||
OSX_VERSION="10.8"
|
||||
KNAME=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
function start_minio_server()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$server_pid"
|
||||
}
|
||||
|
||||
function start_minio_gateway_s3()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
|
||||
--address 127.0.0.1:24240 > gateway.log 2>&1 &
|
||||
gw_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$gw_pid"
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
sr_pid="$(start_minio_server)"
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
kill "$gw_pid"
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
echo "=========== Gateway ==========="
|
||||
cat "gateway.log"
|
||||
echo "=========== Server ==========="
|
||||
cat "server.log"
|
||||
fi
|
||||
|
||||
rm -f gateway.log server.log
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
@@ -63,8 +64,8 @@ function start_minio_erasure_sets()
|
||||
|
||||
function start_minio_pool_erasure_sets()
|
||||
{
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
@@ -74,8 +75,8 @@ function start_minio_pool_erasure_sets()
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
@@ -85,8 +86,8 @@ function start_minio_pool_erasure_sets_ipv6()
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
|
||||
for i in $(seq 0 3); do
|
||||
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
|
||||
@@ -28,9 +28,11 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -61,7 +60,7 @@ type accessControlPolicy struct {
|
||||
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -125,7 +124,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -176,11 +175,11 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -240,11 +239,11 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -23,9 +23,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
@@ -43,7 +41,7 @@ const (
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -54,12 +52,6 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -89,7 +81,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -124,7 +116,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetBucketTarget", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
@@ -164,7 +156,6 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
||||
if sameTarget && bucket == target.TargetBucket {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
@@ -181,7 +172,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
@@ -213,7 +209,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketTargets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketTargets", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arnType := vars["type"]
|
||||
@@ -252,7 +248,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveBucketTarget", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := vars["arn"]
|
||||
|
||||
@@ -62,7 +62,7 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -104,7 +104,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -130,13 +130,14 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -158,15 +159,14 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
|
||||
// If all values were dynamic, tell the client.
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -176,7 +176,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -214,7 +214,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -251,7 +251,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -282,7 +282,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -299,7 +299,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -339,7 +339,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -367,7 +367,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -394,7 +394,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -424,7 +424,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
|
||||
@@ -19,12 +19,15 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -56,7 +59,7 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -66,7 +69,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
ok, err := globalIAMSys.IsTempUser(accessKey)
|
||||
ok, _, err := globalIAMSys.IsTempUser(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -94,7 +97,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -128,7 +131,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
@@ -155,6 +158,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -184,7 +188,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -229,7 +233,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -258,7 +262,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -284,7 +288,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -321,7 +325,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -356,7 +360,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := path.Clean(vars["accessKey"])
|
||||
@@ -395,6 +399,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
@@ -405,6 +410,19 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true, // check if changing password is explicitly denied.
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -426,7 +444,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
|
||||
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -444,7 +462,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -483,7 +501,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -523,7 +541,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -578,7 +596,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -637,7 +655,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -663,6 +681,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -675,6 +694,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -688,12 +708,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
@@ -701,12 +715,47 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var buckets []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
dns.ErrNoEntriesFound,
|
||||
dns.ErrDomainMissing) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
for _, dnsRecords := range dnsBuckets {
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: dnsRecords[0].Key,
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
accountName := cred.AccessKey
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should not happen!")
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -751,7 +800,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -778,7 +827,7 @@ func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -802,7 +851,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -836,7 +885,7 @@ func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Re
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -870,7 +919,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -898,7 +947,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -950,7 +999,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -963,7 +1012,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
ok, _, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -42,6 +43,7 @@ import (
|
||||
"github.com/minio/minio/cmd/logger/message/log"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bandwidth"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
@@ -78,7 +80,7 @@ func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (
|
||||
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ServerUpdate")
|
||||
|
||||
defer logger.AuditLog(w, r, "ServerUpdate", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -187,7 +189,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Service")
|
||||
|
||||
defer logger.AuditLog(w, r, "Service", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
action := vars["action"]
|
||||
@@ -258,9 +260,11 @@ type ServerHTTPAPIStats struct {
|
||||
// ServerHTTPStats holds all type of http operations performed to/from the server
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
}
|
||||
|
||||
// ServerInfoData holds storage, connections and other
|
||||
@@ -284,7 +288,7 @@ type ServerInfo struct {
|
||||
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StorageInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "StorageInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.StorageInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -292,10 +296,10 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
|
||||
// Collect any disk healing.
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx)
|
||||
healing, _ := getAggregatedBackgroundHealState(ctx, nil)
|
||||
healDisks := make(map[string]struct{}, len(healing.HealDisks))
|
||||
for _, disk := range healing.HealDisks {
|
||||
healDisks[disk] = struct{}{}
|
||||
@@ -327,7 +331,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DataUsageInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "DataUsageInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DataUsageInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -403,11 +407,50 @@ type PeerLocks struct {
|
||||
Locks map[string][]lockRequesterInfo
|
||||
}
|
||||
|
||||
// ForceUnlockHandler force unlocks requested resource
|
||||
func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ForceUnlock")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ForceUnlockAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
z, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
||||
var args dsync.LockArgs
|
||||
lockersMap := make(map[string]dsync.NetLocker)
|
||||
for _, path := range strings.Split(vars["paths"], ",") {
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
args.Resources = append(args.Resources, path)
|
||||
lockers, _ := z.serverPools[0].getHashedSet(path).getLockers()
|
||||
for _, locker := range lockers {
|
||||
if locker != nil {
|
||||
lockersMap[locker.String()] = locker
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, locker := range lockersMap {
|
||||
locker.ForceUnlock(ctx, args)
|
||||
}
|
||||
}
|
||||
|
||||
// TopLocksHandler Get list of locks in use
|
||||
func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "TopLocks")
|
||||
|
||||
defer logger.AuditLog(w, r, "TopLocks", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.TopLocksAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -459,7 +502,7 @@ type StartProfilingResult struct {
|
||||
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartProfiling")
|
||||
|
||||
defer logger.AuditLog(w, r, "StartProfiling", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
@@ -564,7 +607,7 @@ func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DownloadProfiling")
|
||||
|
||||
defer logger.AuditLog(w, r, "DownloadProfiling", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
@@ -665,7 +708,7 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Heal")
|
||||
|
||||
defer logger.AuditLog(w, r, "Heal", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -820,16 +863,14 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
keepConnLive(w, r, respCh)
|
||||
}
|
||||
|
||||
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
|
||||
var bgHealStates []madmin.BgHealState
|
||||
|
||||
localHealState, ok := getLocalBackgroundHealStatus()
|
||||
if !ok {
|
||||
return madmin.BgHealState{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
// getAggregatedBackgroundHealState returns the heal state of disks.
|
||||
// If no ObjectLayer is provided no set status is returned.
|
||||
func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
|
||||
// Get local heal status first
|
||||
bgHealStates = append(bgHealStates, localHealState)
|
||||
bgHealStates, ok := getBackgroundHealStatus(ctx, o)
|
||||
if !ok {
|
||||
return bgHealStates, errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
@@ -844,39 +885,16 @@ func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState,
|
||||
if errCount == len(nerrs) {
|
||||
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
|
||||
}
|
||||
bgHealStates = append(bgHealStates, peersHealStates...)
|
||||
bgHealStates.Merge(peersHealStates...)
|
||||
}
|
||||
|
||||
// Aggregate healing result
|
||||
var aggregatedHealStateResult = madmin.BgHealState{
|
||||
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
|
||||
LastHealActivity: bgHealStates[0].LastHealActivity,
|
||||
NextHealRound: bgHealStates[0].NextHealRound,
|
||||
HealDisks: bgHealStates[0].HealDisks,
|
||||
}
|
||||
|
||||
bgHealStates = bgHealStates[1:]
|
||||
|
||||
for _, state := range bgHealStates {
|
||||
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
|
||||
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
|
||||
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
|
||||
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
|
||||
// The node which has the last heal activity means its
|
||||
// is the node that is orchestrating self healing operations,
|
||||
// which also means it is the same node which decides when
|
||||
// the next self healing operation will be done.
|
||||
aggregatedHealStateResult.NextHealRound = state.NextHealRound
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatedHealStateResult, nil
|
||||
return bgHealStates, nil
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealBackgroundStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "HealBackgroundStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -889,7 +907,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1097,7 +1115,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ConsoleLog")
|
||||
|
||||
defer logger.AuditLog(w, r, "ConsoleLog", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConsoleLogAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1168,7 +1186,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
// KMSCreateKeyHandler - POST /minio/admin/v3/kms/key/create?key-id=<master-key-id>
|
||||
func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "KMSCreateKey")
|
||||
defer logger.AuditLog(w, r, "KMSCreateKey", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSCreateKeyAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1191,7 +1209,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "KMSKeyStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "KMSKeyStatus", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1264,7 +1282,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealthInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "HealthInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -1307,8 +1325,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
ctx, err = nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
if err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
@@ -1412,7 +1432,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
@@ -1442,7 +1462,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "BandwidthMonitor")
|
||||
|
||||
defer logger.AuditLog(w, r, "BandwidthMonitor", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.BandwidthMonitorAction, "")
|
||||
@@ -1451,30 +1471,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
reportCh := make(chan bandwidth.Report)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
@@ -1495,7 +1518,7 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ServerInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "ServerInfo", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ServerInfoAdminAction, "")
|
||||
@@ -1504,19 +1527,19 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
vault := fetchVaultStatus()
|
||||
kmsStat := fetchKMSStatus()
|
||||
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
if err != nil {
|
||||
ldap.Status = "offline"
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
ldap.Status = "Not Configured"
|
||||
} else {
|
||||
// Close ldap connection to avoid leaks.
|
||||
ldapConn.Close()
|
||||
ldap.Status = "online"
|
||||
ldap.Status = string(madmin.ItemOnline)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1525,12 +1548,14 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
server := getLocalServerProperty(globalEndpoints, r)
|
||||
local := getLocalServerProperty(globalEndpoints, r)
|
||||
servers := globalNotificationSys.ServerInfo()
|
||||
servers = append(servers, server)
|
||||
servers = append(servers, local)
|
||||
|
||||
assignPoolNumbers(servers)
|
||||
|
||||
var backend interface{}
|
||||
mode := madmin.ObjectLayerInitializing
|
||||
mode := madmin.ItemInitializing
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
@@ -1538,18 +1563,23 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI != nil {
|
||||
mode = madmin.ObjectLayerOnline
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
} else {
|
||||
buckets = madmin.Buckets{Error: err.Error()}
|
||||
objects = madmin.Objects{Error: err.Error()}
|
||||
usage = madmin.Usage{Error: err.Error()}
|
||||
}
|
||||
|
||||
// Fetching the backend information
|
||||
backendInfo := objectAPI.BackendInfo()
|
||||
if backendInfo.Type == BackendType(madmin.Erasure) {
|
||||
if backendInfo.Type == madmin.Erasure {
|
||||
// Calculate the number of online/offline disks of all nodes
|
||||
var allDisks []madmin.Disk
|
||||
for _, s := range servers {
|
||||
@@ -1561,9 +1591,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Type: madmin.ErasureType,
|
||||
OnlineDisks: onlineDisks.Sum(),
|
||||
OfflineDisks: offlineDisks.Sum(),
|
||||
StandardSCData: backendInfo.StandardSCData,
|
||||
StandardSCParity: backendInfo.StandardSCParity,
|
||||
RRSCData: backendInfo.RRSCData,
|
||||
RRSCParity: backendInfo.RRSCParity,
|
||||
}
|
||||
} else {
|
||||
@@ -1575,7 +1603,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
Vault: vault,
|
||||
KMS: kmsStat,
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
@@ -1583,7 +1611,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
infoMsg := madmin.InfoMessage{
|
||||
Mode: mode,
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
@@ -1608,6 +1636,22 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
for i := range servers {
|
||||
for idx, ge := range globalEndpoints {
|
||||
for _, endpoint := range ge.Endpoints {
|
||||
if servers[i].Endpoint == endpoint.Host {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
} else if host, err := xnet.ParseHost(servers[i].Endpoint); err == nil {
|
||||
if host.Name == endpoint.Hostname() {
|
||||
servers[i].PoolNumber = idx + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
@@ -1617,9 +1661,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1631,9 +1675,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
active, _ := tgt.IsActive()
|
||||
targetID := tgt.ID()
|
||||
if active {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
} else {
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
|
||||
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
list := lambdaMap[targetID.Name]
|
||||
list = append(list, targetIDStatus)
|
||||
@@ -1651,47 +1695,46 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
return notify
|
||||
}
|
||||
|
||||
// fetchVaultStatus fetches Vault Info
|
||||
func fetchVaultStatus() madmin.Vault {
|
||||
vault := madmin.Vault{}
|
||||
// fetchKMSStatus fetches KMS-related status information.
|
||||
func fetchKMSStatus() madmin.KMS {
|
||||
kmsStat := madmin.KMS{}
|
||||
if GlobalKMS == nil {
|
||||
vault.Status = "disabled"
|
||||
return vault
|
||||
kmsStat.Status = "disabled"
|
||||
return kmsStat
|
||||
}
|
||||
keyID := GlobalKMS.DefaultKeyID()
|
||||
kmsInfo := GlobalKMS.Info()
|
||||
|
||||
if len(kmsInfo.Endpoints) == 0 {
|
||||
vault.Status = "KMS configured using master key"
|
||||
return vault
|
||||
kmsStat.Status = "KMS configured using master key"
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
|
||||
vault.Status = "offline"
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
} else {
|
||||
vault.Status = "online"
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, sealedKey, err := GlobalKMS.GenerateKey(keyID, kmsContext)
|
||||
if err != nil {
|
||||
vault.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
vault.Encrypt = "Ok"
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.UnsealKey(keyID, sealedKey, kmsContext)
|
||||
switch {
|
||||
case err != nil:
|
||||
vault.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
||||
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
||||
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
|
||||
vault.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
vault.Decrypt = "Ok"
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
}
|
||||
return vault
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
@@ -1704,11 +1747,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "Online"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
} else {
|
||||
mapLog := make(map[string]madmin.Status)
|
||||
mapLog[tgt] = madmin.Status{Status: "offline"}
|
||||
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
loggerInfo = append(loggerInfo, mapLog)
|
||||
}
|
||||
}
|
||||
@@ -1720,11 +1763,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
if err == nil {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Online"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
} else {
|
||||
mapAudit := make(map[string]madmin.Status)
|
||||
mapAudit[tgt] = madmin.Status{Status: "Offline"}
|
||||
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
auditloggerInfo = append(auditloggerInfo, mapAudit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
@@ -97,16 +97,9 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(erasureDirs...)
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(erasureDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
endpoints := mustGetPoolEndpoints(erasureDirs...)
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer := &erasureServerPools{serverPools: make([]*erasureSets, 1)}
|
||||
objLayer.serverPools[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
objLayer, err := newErasureServerPools(ctx, endpoints)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -364,7 +357,7 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
// Test all combinations!
|
||||
for pIdx, parms := range qParmsArr {
|
||||
for vIdx, vars := range varsArr {
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
|
||||
isErrCase := false
|
||||
if pIdx < 4 || vIdx < 1 {
|
||||
isErrCase = true
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,8 +89,9 @@ type allHealState struct {
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence // Indexed by endpoint
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
healStatus map[string]healingTracker // Indexed by disk ID
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
@@ -99,6 +99,7 @@ func newHealState(cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
healStatus: make(map[string]healingTracker),
|
||||
}
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -113,7 +114,56 @@ func (ahs *allHealState) healDriveCount() int {
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
for id, disk := range ahs.healStatus {
|
||||
for _, ep := range healLocalDisks {
|
||||
if disk.Endpoint == ep.String() {
|
||||
delete(ahs.healStatus, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateHealStatus will update the heal status.
|
||||
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.healStatus[tracker.ID] = *tracker
|
||||
}
|
||||
|
||||
// Sort by zone, set and disk index
|
||||
func sortDisks(disks []madmin.Disk) {
|
||||
sort.Slice(disks, func(i, j int) bool {
|
||||
a, b := &disks[i], &disks[j]
|
||||
if a.PoolIndex != b.PoolIndex {
|
||||
return a.PoolIndex < b.PoolIndex
|
||||
}
|
||||
if a.SetIndex != b.SetIndex {
|
||||
return a.SetIndex < b.SetIndex
|
||||
}
|
||||
return a.DiskIndex < b.DiskIndex
|
||||
})
|
||||
}
|
||||
|
||||
// getLocalHealingDisks returns local healing disks indexed by endpoint.
|
||||
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
|
||||
for _, v := range ahs.healStatus {
|
||||
dst[v.Endpoint] = v.toHealingDisk()
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
@@ -124,15 +174,6 @@ func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
@@ -662,6 +703,13 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
@@ -677,10 +725,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
}
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
@@ -751,16 +798,19 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case source.bucket == nopHeal:
|
||||
switch source.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case source.bucket == SlashSeparator:
|
||||
case SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case source.bucket != "" && source.object == "":
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
if source.object == "" {
|
||||
itemType = madmin.HealItemBucket
|
||||
} else {
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
@@ -832,11 +882,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -112,10 +110,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
@@ -172,31 +169,31 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
}
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Quota operations
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
// Force unlocks paths
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
|
||||
Queries("paths", "{paths:.*}").HandlerFunc(httpTraceHdrs(adminAPI.ForceUnlockHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
|
||||
@@ -17,8 +17,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -39,33 +42,39 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := IsServerResolvable(endpoint); err == nil {
|
||||
network[nodeName] = "online"
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
|
||||
defer closeStorageDisks(localDisks)
|
||||
|
||||
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
|
||||
|
||||
return madmin.ServerProperties{
|
||||
State: "ok",
|
||||
props := madmin.ServerProperties{
|
||||
State: string(madmin.ItemInitializing),
|
||||
Endpoint: addr,
|
||||
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: storageInfo.Disks,
|
||||
}
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
// only need Disks information in server mode.
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
}
|
||||
|
||||
return props
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ const (
|
||||
ErrInvalidMaxUploads
|
||||
ErrInvalidMaxParts
|
||||
ErrInvalidPartNumberMarker
|
||||
ErrInvalidPartNumber
|
||||
ErrInvalidRequestBody
|
||||
ErrInvalidCopySource
|
||||
ErrInvalidMetadataDirective
|
||||
@@ -120,7 +121,6 @@ const (
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrBucketReplicationDisabledError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
@@ -263,7 +263,6 @@ const (
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -439,6 +438,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Argument partNumberMarker must be an integer.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumber: {
|
||||
Code: "InvalidPartNumber",
|
||||
Description: "The requested partnumber is not satisfiable",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrInvalidPolicyDocument: {
|
||||
Code: "InvalidPolicyDocument",
|
||||
Description: "The content of the form does not meet the conditions specified in the policy document.",
|
||||
@@ -889,11 +893,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketReplicationDisabledError: {
|
||||
Code: "XMinioAdminBucketReplicationDisabled",
|
||||
Description: "Replication specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
@@ -1215,11 +1214,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
@@ -2131,6 +2125,12 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -133,18 +133,20 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
@@ -156,16 +158,16 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// For providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rs == nil && opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
@@ -177,21 +179,25 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
if opts.VersionID == "" {
|
||||
if ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
SuccessorModTime: objInfo.SuccessorModTime,
|
||||
}); !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
|
||||
@@ -36,8 +36,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
@@ -56,8 +56,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
marker = trimLeadingSlash(values.Get("key-marker"))
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
@@ -86,8 +86,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
startAfter = values.Get("start-after")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
startAfter = trimLeadingSlash(values.Get("start-after"))
|
||||
delimiter = values.Get("delimiter")
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
@@ -117,8 +117,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
prefix = trimLeadingSlash(values.Get("prefix"))
|
||||
keyMarker = trimLeadingSlash(values.Get("key-marker"))
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
|
||||
@@ -48,6 +48,12 @@ type LocationResponse struct {
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
|
||||
type PolicyStatus struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
|
||||
IsPublic string
|
||||
}
|
||||
|
||||
// ListVersionsResponse - format for list bucket versions response.
|
||||
type ListVersionsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
||||
@@ -410,9 +416,11 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
@@ -429,10 +437,12 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
@@ -485,10 +495,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
if object.Name == "" {
|
||||
@@ -532,12 +544,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
if fetchOwner {
|
||||
owner.ID = globalMinioDefaultOwnerID
|
||||
var owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
@@ -565,7 +576,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
@@ -639,8 +650,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
|
||||
listPartsResponse.UploadID = partsInfo.UploadID
|
||||
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
|
||||
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
|
||||
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
|
||||
|
||||
// Dumb values not meaningful
|
||||
listPartsResponse.Initiator = Initiator{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
listPartsResponse.Owner = Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: globalMinioDefaultOwnerID,
|
||||
}
|
||||
|
||||
listPartsResponse.MaxParts = partsInfo.MaxParts
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
|
||||
@@ -27,23 +27,35 @@ import (
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
globalHTTPServer = h
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setCacheObjectLayer(c CacheObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalCacheObjectAPI = c
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
@@ -108,224 +120,229 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplicationconfiguration", httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// GetBucketPolicyStatus
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketreplicationconfiguration", httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// GetObjectRetention
|
||||
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketreplicationconfiguration", httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("restoreobject", httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
collectAPIStats("listbuckets", maxClients(httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/etag"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
@@ -161,6 +162,7 @@ func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolic
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -185,12 +187,12 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
@@ -237,7 +239,7 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
@@ -258,7 +260,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
@@ -271,7 +273,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
@@ -281,14 +283,13 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -298,18 +299,18 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -319,7 +320,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
return cred, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
@@ -328,7 +329,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return accessKey, owner, s3Error
|
||||
return cred, owner, s3Error
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
@@ -349,7 +350,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -364,15 +365,16 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -381,7 +383,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -389,6 +391,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
@@ -397,11 +400,11 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -430,19 +433,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
return errCode
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
|
||||
// Extract 'Content-Md5' if present.
|
||||
contentMD5, err = checkValidMD5(r.Header)
|
||||
clientETag, err := etag.FromContentMD5(r.Header)
|
||||
if err != nil {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
var contentSHA256 []byte
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
@@ -459,8 +457,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
@@ -468,16 +465,6 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// authHandler - handles all the incoming authorization headers and validates them if possible.
|
||||
type authHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setAuthHandler to validate authorization header for the incoming request.
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
return authHandler{h}
|
||||
}
|
||||
|
||||
// List of all support S3 auth types.
|
||||
var supportedS3AuthTypes = map[authType]struct{}{
|
||||
authTypeAnonymous: {},
|
||||
@@ -495,26 +482,30 @@ func isSupportedS3AuthType(aType authType) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// handler for validating incoming authorization headers.
|
||||
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
// setAuthHandler to validate authorization header for the incoming request.
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
@@ -560,6 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -569,6 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -592,6 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
@@ -602,6 +596,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
@@ -657,6 +652,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
@@ -670,6 +666,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
|
||||
@@ -61,23 +61,29 @@ func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitCount := 10
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
maxIOFn := func() int {
|
||||
return maxIO + globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers()
|
||||
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalHTTPTrace.NumSubscribers())
|
||||
}
|
||||
|
||||
tmpMaxWait := maxWait
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Any requests in progress, delay the heal.
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
time.Sleep(waitTick)
|
||||
waitCount--
|
||||
if waitCount == 0 {
|
||||
if tmpMaxWait > 0 {
|
||||
if tmpMaxWait < waitTick {
|
||||
time.Sleep(tmpMaxWait)
|
||||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
tmpMaxWait = tmpMaxWait - waitTick
|
||||
}
|
||||
if tmpMaxWait <= 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited %d times, resuming", waitCount)
|
||||
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -91,20 +97,22 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
if !ok {
|
||||
break
|
||||
return
|
||||
}
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
switch {
|
||||
case task.bucket == nopHeal:
|
||||
switch task.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case task.bucket == SlashSeparator:
|
||||
case SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case task.bucket != "" && task.object == "":
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
default:
|
||||
if task.object == "" {
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
|
||||
} else {
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
|
||||
@@ -17,15 +17,23 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,10 +42,200 @@ const (
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
// healingTracker is used to persist healing information during a heal.
|
||||
type healingTracker struct {
|
||||
disk StorageAPI `msg:"-"`
|
||||
|
||||
ID string
|
||||
PoolIndex int
|
||||
SetIndex int
|
||||
DiskIndex int
|
||||
Path string
|
||||
Endpoint string
|
||||
Started time.Time
|
||||
LastUpdate time.Time
|
||||
ObjectsHealed uint64
|
||||
ObjectsFailed uint64
|
||||
BytesDone uint64
|
||||
BytesFailed uint64
|
||||
|
||||
// Last object scanned.
|
||||
Bucket string `json:"-"`
|
||||
Object string `json:"-"`
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeObjectsHealed uint64 `json:"-"`
|
||||
ResumeObjectsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
|
||||
// Filled during heal.
|
||||
HealedBuckets []string
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
|
||||
// loadHealingTracker will load the healing tracker from the supplied disk.
|
||||
// The disk ID will be validated against the loaded one.
|
||||
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
|
||||
if disk == nil {
|
||||
return nil, errors.New("loadHealingTracker: nil disk given")
|
||||
}
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := disk.ReadAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var h healingTracker
|
||||
_, err = h.UnmarshalMsg(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.ID != diskID && h.ID != "" {
|
||||
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
|
||||
}
|
||||
h.disk = disk
|
||||
h.ID = diskID
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
// newHealingTracker will create a new healing tracker for the disk.
|
||||
func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
diskID, _ := disk.GetDiskID()
|
||||
h := healingTracker{
|
||||
disk: disk,
|
||||
ID: diskID,
|
||||
Path: disk.String(),
|
||||
Endpoint: disk.Endpoint().String(),
|
||||
Started: time.Now().UTC(),
|
||||
}
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
|
||||
return &h
|
||||
}
|
||||
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
|
||||
}
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
|
||||
}
|
||||
return h.save(ctx)
|
||||
}
|
||||
|
||||
// save will unconditionally save the tracker and will be created if not existing.
|
||||
func (h *healingTracker) save(ctx context.Context) error {
|
||||
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
// Attempt to get location.
|
||||
if api := newObjectLayerFn(); api != nil {
|
||||
if ep, ok := api.(*erasureServerPools); ok {
|
||||
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
h.LastUpdate = time.Now().UTC()
|
||||
htrackerBytes, err := h.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalBackgroundHealState.updateHealStatus(h)
|
||||
return h.disk.WriteAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
htrackerBytes)
|
||||
}
|
||||
|
||||
// delete the tracker on disk.
|
||||
func (h *healingTracker) delete(ctx context.Context) error {
|
||||
return h.disk.Delete(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
false)
|
||||
}
|
||||
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
func (h *healingTracker) resume() {
|
||||
h.ObjectsHealed = h.ResumeObjectsHealed
|
||||
h.ObjectsFailed = h.ResumeObjectsFailed
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
// Adds the bucket to the list of healed buckets and updates resume numbers.
|
||||
func (h *healingTracker) bucketDone(bucket string) {
|
||||
h.ResumeObjectsHealed = h.ObjectsHealed
|
||||
h.ResumeObjectsFailed = h.ObjectsFailed
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
// Delete...
|
||||
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
|
||||
// Order is preserved.
|
||||
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
|
||||
s := set.CreateStringSet(h.HealedBuckets...)
|
||||
h.QueuedBuckets = make([]string, 0, len(buckets))
|
||||
for _, b := range buckets {
|
||||
if !s.Contains(b.Name) {
|
||||
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healingTracker) printTo(writer io.Writer) {
|
||||
b, err := json.MarshalIndent(h, "", " ")
|
||||
if err != nil {
|
||||
writer.Write([]byte(err.Error()))
|
||||
}
|
||||
writer.Write(b)
|
||||
}
|
||||
|
||||
// toHealingDisk converts the information to madmin.HealingDisk
|
||||
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
return madmin.HealingDisk{
|
||||
ID: h.ID,
|
||||
Endpoint: h.Endpoint,
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
ObjectsHealed: h.ObjectsHealed,
|
||||
ObjectsFailed: h.ObjectsFailed,
|
||||
BytesDone: h.BytesDone,
|
||||
BytesFailed: h.BytesFailed,
|
||||
Bucket: h.Bucket,
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
@@ -89,7 +287,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -113,7 +311,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
|
||||
defer diskCheckTimer.Stop()
|
||||
wait:
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -122,9 +320,9 @@ wait:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
@@ -135,14 +333,14 @@ wait:
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
console.Debugf("disk check timer fired, attempting to heal %d drives\n", len(healDisks))
|
||||
console.Debugf(color.Green("healDisk:")+" disk check timer fired, attempting to heal %d drives\n", len(healDisks))
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
@@ -153,75 +351,96 @@ wait:
|
||||
continue
|
||||
}
|
||||
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RLock()
|
||||
z.serverPools[poolIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[zoneIdx].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RUnlock()
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
|
||||
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
|
||||
if a != b {
|
||||
return a
|
||||
}
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
// TODO(klauspost): This will block until all heals are done,
|
||||
// in the future this should be able to start healing other sets at once.
|
||||
var wg sync.WaitGroup
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
i := i
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
if len(disks) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s zone", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
|
||||
tracker.setQueuedBuckets(buckets)
|
||||
if err := tracker.save(ctx); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
|
||||
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}(setIndex, disks)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,146 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -42,10 +182,10 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 20
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -54,16 +194,283 @@ func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
// write "PoolIndex"
|
||||
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.PoolIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
// write "SetIndex"
|
||||
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.SetIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
// write "DiskIndex"
|
||||
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.DiskIndex)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
// write "Path"
|
||||
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Path)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
// write "Endpoint"
|
||||
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Endpoint)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
// write "Started"
|
||||
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Started)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
// write "LastUpdate"
|
||||
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.LastUpdate)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
// write "ObjectsHealed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ObjectsFailed"
|
||||
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "BytesDone"
|
||||
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
// write "BytesFailed"
|
||||
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
// write "Bucket"
|
||||
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
// write "Object"
|
||||
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Object)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsHealed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsHealed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
// write "ResumeObjectsFailed"
|
||||
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeObjectsFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesDone)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesFailed"
|
||||
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesFailed)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
err = en.WriteString(z.QueuedBuckets[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "HealedBuckets"
|
||||
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
err = en.WriteString(z.HealedBuckets[za0002])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// map header, size 20
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.PoolIndex)
|
||||
// string "SetIndex"
|
||||
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.SetIndex)
|
||||
// string "DiskIndex"
|
||||
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendInt(o, z.DiskIndex)
|
||||
// string "Path"
|
||||
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
|
||||
o = msgp.AppendString(o, z.Path)
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
// string "Started"
|
||||
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Started)
|
||||
// string "LastUpdate"
|
||||
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
|
||||
o = msgp.AppendTime(o, z.LastUpdate)
|
||||
// string "ObjectsHealed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsHealed)
|
||||
// string "ObjectsFailed"
|
||||
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ObjectsFailed)
|
||||
// string "BytesDone"
|
||||
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.BytesDone)
|
||||
// string "BytesFailed"
|
||||
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesFailed)
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Object"
|
||||
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
|
||||
o = msgp.AppendString(o, z.Object)
|
||||
// string "ResumeObjectsHealed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsHealed)
|
||||
// string "ResumeObjectsFailed"
|
||||
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeObjectsFailed)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
|
||||
}
|
||||
// string "HealedBuckets"
|
||||
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
|
||||
for za0002 := range z.HealedBuckets {
|
||||
o = msgp.AppendString(o, z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -91,6 +498,146 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
case "PoolIndex":
|
||||
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PoolIndex")
|
||||
return
|
||||
}
|
||||
case "SetIndex":
|
||||
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SetIndex")
|
||||
return
|
||||
}
|
||||
case "DiskIndex":
|
||||
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DiskIndex")
|
||||
return
|
||||
}
|
||||
case "Path":
|
||||
z.Path, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Path")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Started":
|
||||
z.Started, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "LastUpdate":
|
||||
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LastUpdate")
|
||||
return
|
||||
}
|
||||
case "ObjectsHealed":
|
||||
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ObjectsFailed":
|
||||
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "BytesDone":
|
||||
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesDone")
|
||||
return
|
||||
}
|
||||
case "BytesFailed":
|
||||
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesFailed")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Object":
|
||||
z.Object, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Object")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsHealed":
|
||||
z.ResumeObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsHealed")
|
||||
return
|
||||
}
|
||||
case "ResumeObjectsFailed":
|
||||
z.ResumeObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeObjectsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesDone")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesFailed":
|
||||
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.QueuedBuckets) >= int(zb0002) {
|
||||
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
|
||||
} else {
|
||||
z.QueuedBuckets = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QueuedBuckets", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealedBuckets":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets")
|
||||
return
|
||||
}
|
||||
if cap(z.HealedBuckets) >= int(zb0003) {
|
||||
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
|
||||
} else {
|
||||
z.HealedBuckets = make([]string, zb0003)
|
||||
}
|
||||
for za0002 := range z.HealedBuckets {
|
||||
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealedBuckets", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -104,7 +651,14 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
s += 14 + msgp.ArrayHeaderSize
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -55,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -114,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
md5hex := getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
textData := generateBytesData(objSize)
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
// randomly picks a character and returns its equivalent byte array.
|
||||
func getRandomByte() []byte {
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
@@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -301,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
i++
|
||||
if i == 10 {
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
})
|
||||
// Benchmark ends here. Stop timer.
|
||||
b.StopTimer()
|
||||
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (err *errHashMismatch) Error() string {
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
iow io.WriteCloser
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
@@ -71,9 +71,10 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
|
||||
go func() {
|
||||
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
||||
@@ -81,8 +82,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
@@ -91,7 +91,8 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct {
|
||||
disk StorageAPI
|
||||
rc io.ReadCloser
|
||||
data []byte
|
||||
rc io.Reader
|
||||
volume string
|
||||
filePath string
|
||||
tillOffset int64
|
||||
@@ -105,7 +106,10 @@ func (b *streamingBitrotReader) Close() error {
|
||||
if b.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return b.rc.Close()
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
@@ -119,11 +123,16 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if len(b.data) == 0 {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
} else {
|
||||
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if offset != b.currOffset {
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
@@ -140,20 +149,20 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
||||
return 0, errFileCorrupt
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
h := algo.New()
|
||||
return &streamingBitrotReader{
|
||||
disk,
|
||||
data,
|
||||
nil,
|
||||
volume,
|
||||
filePath,
|
||||
|
||||
@@ -17,13 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
@@ -96,16 +96,16 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
|
||||
return newStreamingBitrotReader(disk, data, bucket, filePath, tillOffset, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@@ -42,39 +41,39 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -102,7 +102,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -145,7 +145,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketEncryption")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -17,23 +17,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -42,7 +45,6 @@ import (
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -74,7 +76,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound && err != dns.ErrNotImplemented {
|
||||
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
@@ -82,6 +84,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsSet := set.NewStringSet()
|
||||
bucketsToBeUpdated := set.NewStringSet()
|
||||
bucketsInConflict := set.NewStringSet()
|
||||
|
||||
// This means that domain is updated, we should update
|
||||
// all bucket entries with new domain name.
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
@@ -91,11 +97,16 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() && !domainMissing {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
//
|
||||
// Additionally also check if domain is updated/missing with more
|
||||
// entries, if that is the case we should update the
|
||||
// new domain entries as well.
|
||||
continue
|
||||
}
|
||||
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
@@ -104,6 +115,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
@@ -114,8 +126,11 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}
|
||||
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
@@ -123,16 +138,16 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Remove buckets that are in DNS for this server, but aren't local
|
||||
for bucket, records := range dnsBuckets {
|
||||
if bucketsSet.Contains(bucket) {
|
||||
@@ -144,13 +159,18 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err = globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(bucket string) {
|
||||
defer wg.Done()
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}(bucket)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
@@ -159,7 +179,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLocation")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -207,7 +227,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -262,7 +282,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBuckets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -272,7 +292,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -282,7 +302,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
if err != nil && !IsErrIgnored(err,
|
||||
dns.ErrNoEntriesFound,
|
||||
dns.ErrDomainMissing) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -292,6 +314,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(bucketsInfo, func(i, j int) bool {
|
||||
return bucketsInfo[i].Name < bucketsInfo[j].Name
|
||||
})
|
||||
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
@@ -311,16 +338,17 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
for _, bucketInfo := range bucketsInfo {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
@@ -349,7 +377,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteMultipleObjects")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -385,6 +413,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
}
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -401,15 +434,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the list of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
hasLockEnabled, hasLifecycleConfig, replicateSync bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
@@ -457,16 +496,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
replicateSync = repsync
|
||||
if replicate {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
@@ -510,14 +554,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
DeleteMarkerVersionID: dObjects[i].DeleteMarkerVersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
@@ -549,39 +597,36 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
|
||||
globalReplicationState.queueReplicaDeleteTask(DeletedObjectVersionInfo{
|
||||
dv := DeletedObjectVersionInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
})
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI, replicateSync)
|
||||
}
|
||||
}
|
||||
|
||||
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
action := lifecycle.DeleteAction
|
||||
if dobj.VersionID != "" {
|
||||
action = lifecycle.DeleteVersionAction
|
||||
}
|
||||
deleteTransitionedObject(ctx, newObjectLayerFn(), bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}, action, true)
|
||||
}, false, true)
|
||||
}
|
||||
|
||||
}
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
eventName := event.ObjectRemovedDelete
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
if objInfo.DeleteMarker {
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
@@ -604,7 +649,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -735,7 +780,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PostPolicyBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -761,13 +806,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != path.Clean(resource[1:]) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -810,13 +857,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
defer fileBody.Close()
|
||||
|
||||
formValues.Set("Bucket", bucket)
|
||||
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
}
|
||||
object := formValues.Get("Key")
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
successRedirect := formValues.Get("success_action_redirect")
|
||||
successStatus := formValues.Get("success_action_status")
|
||||
@@ -830,12 +876,51 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Verify policy signature.
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
cred, errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
@@ -844,10 +929,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
|
||||
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
|
||||
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -875,20 +961,20 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
@@ -928,12 +1014,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -990,6 +1080,64 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
}
|
||||
|
||||
// GetBucketPolicyStatusHandler - Retrieves the policy status
|
||||
// for an MinIO bucket, indicating whether the bucket is public.
|
||||
func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicyStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyStatusAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if anonymous (non-owner) has access to list objects.
|
||||
readable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
// Check if anonymous (non-owner) has access to upload objects.
|
||||
writable := globalPolicySys.IsAllowed(policy.Args{
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
})
|
||||
|
||||
encodedSuccessResponse := encodeResponse(PolicyStatus{
|
||||
IsPublic: func() string {
|
||||
// Silly to have special 'boolean' values yes
|
||||
// but complying with silly implementation
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
|
||||
if readable && writable {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
}(),
|
||||
})
|
||||
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// HeadBucketHandler - HEAD Bucket
|
||||
// ----------
|
||||
// This operation is useful to determine if a bucket exists.
|
||||
@@ -999,7 +1147,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HeadBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1029,7 +1177,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1117,7 +1265,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketObjectLockConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketObjectLockConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1173,7 +1321,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketObjectLockConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketObjectLockConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1211,7 +1359,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1255,7 +1403,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1293,7 +1441,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketTagging")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketTagging", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1323,7 +1471,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
|
||||
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketReplicationConfig")
|
||||
defer logger.AuditLog(w, r, "PutBucketReplicationConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1336,11 +1484,6 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Turn off replication if disk crawl is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketReplicationDisabledError), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1392,7 +1535,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketReplicationConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -1433,7 +1576,7 @@ func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWr
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
|
||||
defer logger.AuditLog(w, r, "DeleteBucketReplicationConfig", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestRemoveBucketHandler(t *testing.T) {
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading object: <ERROR> %v", err)
|
||||
@@ -669,7 +669,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
|
||||
@@ -38,7 +38,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -103,7 +103,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -145,7 +145,7 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketLifecycle")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -65,6 +65,46 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
type expiryTask struct {
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
expiryCh chan expiryTask
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
|
||||
select {
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalExpiryState *expiryState
|
||||
)
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
es := &expiryState{
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
}
|
||||
go func() {
|
||||
<-GlobalContext.Done()
|
||||
close(es.expiryCh)
|
||||
}()
|
||||
return es
|
||||
}
|
||||
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
// add future metrics here
|
||||
transitionCh chan ObjectInfo
|
||||
@@ -205,16 +245,11 @@ func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, ar
|
||||
}
|
||||
|
||||
// set PutObjectOptions for PUT operation to transition data to target cluster
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
|
||||
tag, err := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
UserTags: tag.ToMap(),
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
@@ -224,29 +259,47 @@ func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
|
||||
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
|
||||
return
|
||||
return putOpts, nil
|
||||
}
|
||||
|
||||
// handle deletes of transitioned objects or object versions when one of the following is true:
|
||||
// 1. temporarily restored copies of objects (restored with the PostRestoreObject API) expired.
|
||||
// 2. life cycle expiry date is met on the object.
|
||||
// 3. Object is removed through DELETE api call
|
||||
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, action lifecycle.Action, isDeleteTierOnly bool) error {
|
||||
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, restoredObject, isDeleteTierOnly bool) error {
|
||||
if lcOpts.TransitionStatus == "" && !isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
@@ -266,46 +319,41 @@ func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
switch action {
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
if restoredObject {
|
||||
// delete locally restored copy of object or object version
|
||||
// from the source, while leaving metadata behind. The data on
|
||||
// transitioned tier lies untouched and still accessible
|
||||
opts.TransitionStatus = lcOpts.TransitionStatus
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
return err
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
// When an object is past expiry, delete the data from transitioned tier and
|
||||
// metadata from source
|
||||
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
if isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
objInfo := ObjectInfo{
|
||||
Name: object,
|
||||
VersionID: lcOpts.VersionID,
|
||||
DeleteMarker: lcOpts.DeleteMarker,
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-EXPIRY]",
|
||||
})
|
||||
}
|
||||
|
||||
// When an object is past expiry, delete the data from transitioned tier and
|
||||
// metadata from source
|
||||
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
if isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-EXPIRY]",
|
||||
})
|
||||
|
||||
// should never reach here
|
||||
return nil
|
||||
}
|
||||
@@ -340,13 +388,19 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
|
||||
return err
|
||||
}
|
||||
oi := gr.ObjInfo
|
||||
|
||||
if oi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
gr.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
putOpts := putTransitionOpts(oi)
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, "", "", putOpts); err != nil {
|
||||
putOpts, err := putTransitionOpts(oi)
|
||||
if err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
|
||||
}
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
}
|
||||
gr.Close()
|
||||
@@ -357,20 +411,19 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
|
||||
opts.TransitionStatus = lifecycle.TransitionComplete
|
||||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
_, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
objInfo, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
if err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: oi.Bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: oi.Name,
|
||||
VersionID: opts.VersionID,
|
||||
},
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -420,7 +473,7 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
||||
}
|
||||
}
|
||||
|
||||
reader, _, _, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
|
||||
reader, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -639,11 +692,11 @@ func restoreTransitionedObject(ctx context.Context, bucket, object string, objAP
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader, nil, nil)
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
opts := putRestoreOpts(bucket, object, rreq, objInfo)
|
||||
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
|
||||
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
|
||||
|
||||
@@ -26,32 +26,25 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
inParallel := func(objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects))
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
objects[index].Size, _ = objects[index].GetActualSize()
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
const maxConcurrent = 500
|
||||
for {
|
||||
if len(objects) < maxConcurrent {
|
||||
inParallel(objects)
|
||||
return
|
||||
}
|
||||
inParallel(objects[:maxConcurrent])
|
||||
objects = objects[maxConcurrent:]
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
@@ -82,7 +75,7 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -143,7 +136,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2M")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -210,7 +203,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -301,10 +294,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
}
|
||||
|
||||
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
|
||||
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
@@ -314,7 +303,7 @@ func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -343,15 +332,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
// Forward the request using Source IP or bucket
|
||||
forwardStr := handlers.GetSourceIPFromHeaders(r)
|
||||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
|
||||
@@ -477,6 +477,15 @@ func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, ob
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the state of the BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) Reset() {
|
||||
sys.Lock()
|
||||
for k := range sys.metadataMap {
|
||||
delete(sys.metadataMap, k)
|
||||
}
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// NewBucketMetadataSys - creates new policy system.
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
@@ -111,7 +111,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -106,7 +106,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -141,7 +141,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
||||
@@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
authType := getRequestAuthType(r)
|
||||
var signatureVersion string
|
||||
switch authType {
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
signatureVersion = signV2Algorithm
|
||||
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
|
||||
signatureVersion = signV4Algorithm
|
||||
}
|
||||
|
||||
var authtype string
|
||||
switch authType {
|
||||
case authTypePresignedV2, authTypePresigned:
|
||||
authtype = "REST-QUERY-STRING"
|
||||
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
|
||||
authtype = "REST-HEADER"
|
||||
case authTypePostPolicy:
|
||||
authtype = "POST"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"signatureversion": {signatureVersion},
|
||||
"authType": {authtype},
|
||||
}
|
||||
|
||||
if lc != "" {
|
||||
|
||||
@@ -18,11 +18,13 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
@@ -31,13 +33,16 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHealthCheckDuration = 100 * time.Second
|
||||
)
|
||||
|
||||
// BucketTargetSys represents bucket targets subsystem
|
||||
type BucketTargetSys struct {
|
||||
sync.RWMutex
|
||||
arnRemotesMap map[string]*miniogo.Core
|
||||
arnRemotesMap map[string]*TargetClient
|
||||
targetsMap map[string][]madmin.BucketTarget
|
||||
}
|
||||
|
||||
@@ -68,7 +73,6 @@ func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType str
|
||||
|
||||
// ListBucketTargets - gets list of bucket targets for this bucket.
|
||||
func (sys *BucketTargetSys) ListBucketTargets(ctx context.Context, bucket string) (*madmin.BucketTargets, error) {
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
@@ -96,7 +100,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
@@ -107,7 +111,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -120,7 +124,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
@@ -132,7 +136,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
|
||||
tgts := sys.targetsMap[bucket]
|
||||
newtgts := make([]madmin.BucketTarget, len(tgts))
|
||||
labels := make(map[string]struct{})
|
||||
labels := make(map[string]struct{}, len(tgts))
|
||||
found := false
|
||||
for idx, t := range tgts {
|
||||
labels[t.Label] = struct{}{}
|
||||
@@ -198,9 +202,12 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
// delete ARN type from list of matching targets
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
targets := make([]madmin.BucketTarget, 0)
|
||||
found := false
|
||||
tgts := sys.targetsMap[bucket]
|
||||
tgts, ok := sys.targetsMap[bucket]
|
||||
if !ok {
|
||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
targets := make([]madmin.BucketTarget, 0, len(tgts))
|
||||
for _, tgt := range tgts {
|
||||
if tgt.Arn != arnStr {
|
||||
targets = append(targets, tgt)
|
||||
@@ -217,7 +224,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
}
|
||||
|
||||
// GetRemoteTargetClient returns minio-go client for replication target instance
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *miniogo.Core {
|
||||
func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn string) *TargetClient {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
return sys.arnRemotesMap[arn]
|
||||
@@ -264,7 +271,7 @@ func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, a
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys() *BucketTargetSys {
|
||||
return &BucketTargetSys{
|
||||
arnRemotesMap: make(map[string]*miniogo.Core),
|
||||
arnRemotesMap: make(map[string]*TargetClient),
|
||||
targetsMap: make(map[string][]madmin.BucketTarget),
|
||||
}
|
||||
}
|
||||
@@ -345,20 +352,34 @@ var getRemoteTargetInstanceTransport http.RoundTripper
|
||||
var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*miniogo.Core, error) {
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
|
||||
config := tcfg.Credentials
|
||||
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
|
||||
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute)
|
||||
})
|
||||
|
||||
core, err := miniogo.NewCore(tcfg.URL().Host, &miniogo.Options{
|
||||
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
|
||||
Creds: creds,
|
||||
Secure: tcfg.Secure,
|
||||
Region: tcfg.Region,
|
||||
Transport: getRemoteTargetInstanceTransport,
|
||||
})
|
||||
return core, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hcDuration := defaultHealthCheckDuration
|
||||
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
|
||||
hcDuration = tcfg.HealthCheckDuration
|
||||
}
|
||||
tc := &TargetClient{
|
||||
Client: api,
|
||||
healthCheckDuration: hcDuration,
|
||||
bucket: tcfg.TargetBucket,
|
||||
replicateSync: tcfg.ReplicationSync,
|
||||
}
|
||||
go tc.healthCheck()
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
|
||||
@@ -422,3 +443,29 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
// TargetClient is the struct for remote target client.
|
||||
type TargetClient struct {
|
||||
*miniogo.Client
|
||||
up int32
|
||||
healthCheckDuration time.Duration
|
||||
bucket string // remote bucket target
|
||||
replicateSync bool
|
||||
}
|
||||
|
||||
func (tc *TargetClient) isOffline() bool {
|
||||
return atomic.LoadInt32(&tc.up) == 0
|
||||
}
|
||||
|
||||
func (tc *TargetClient) healthCheck() {
|
||||
for {
|
||||
_, err := tc.BucketExists(GlobalContext, tc.bucket)
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&tc.up, 0)
|
||||
time.Sleep(tc.healthCheckDuration)
|
||||
continue
|
||||
}
|
||||
atomic.StoreInt32(&tc.up, 1)
|
||||
time.Sleep(tc.healthCheckDuration)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -98,7 +98,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketVersioning")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@@ -51,6 +51,11 @@ func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, erro
|
||||
return globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
}
|
||||
|
||||
// Reset BucketVersioningSys to initial state.
|
||||
func (sys *BucketVersioningSys) Reset() {
|
||||
// There is currently no internal state.
|
||||
}
|
||||
|
||||
// NewBucketVersioningSys - creates new versioning system.
|
||||
func NewBucketVersioningSys() *BucketVersioningSys {
|
||||
return &BucketVersioningSys{}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -41,21 +43,36 @@ import (
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 3*time.Second)
|
||||
// Inject into config package.
|
||||
config.Logger.Info = logger.Info
|
||||
config.Logger.LogIf = logger.LogIf
|
||||
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
|
||||
|
||||
initGlobalContext()
|
||||
|
||||
globalReplicationState = newReplicationState()
|
||||
globalForwarder = handlers.NewForwarder(&handlers.Forwarder{
|
||||
PassHost: true,
|
||||
RoundTripper: newGatewayHTTPTransport(1 * time.Hour),
|
||||
Logger: func(err error) {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", color.New())
|
||||
@@ -241,6 +258,14 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
globalDomainNames = append(globalDomainNames, domainName)
|
||||
}
|
||||
sort.Strings(globalDomainNames)
|
||||
lcpSuf := lcpSuffix(globalDomainNames)
|
||||
for _, domainName := range globalDomainNames {
|
||||
if domainName == lcpSuf && len(globalDomainNames) > 1 {
|
||||
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
@@ -286,6 +311,16 @@ func handleCommonEnvVars() {
|
||||
globalConfigEncrypted = true
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUser) || env.IsSet(config.EnvRootPassword) {
|
||||
cred, err := auth.CreateCredentials(env.Get(config.EnvRootUser, ""), env.Get(config.EnvRootPassword, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
globalActiveCred = cred
|
||||
globalConfigEncrypted = true
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
|
||||
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
|
||||
if err != nil {
|
||||
@@ -296,6 +331,17 @@ func handleCommonEnvVars() {
|
||||
os.Unsetenv(config.EnvAccessKeyOld)
|
||||
os.Unsetenv(config.EnvSecretKeyOld)
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUserOld) && env.IsSet(config.EnvRootPasswordOld) {
|
||||
oldCred, err := auth.CreateCredentials(env.Get(config.EnvRootUserOld, ""), env.Get(config.EnvRootPasswordOld, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate the old credentials inherited from the shell environment")
|
||||
}
|
||||
globalOldCred = oldCred
|
||||
os.Unsetenv(config.EnvRootUserOld)
|
||||
os.Unsetenv(config.EnvRootPasswordOld)
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -27,9 +29,9 @@ import (
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
@@ -37,13 +39,16 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type objectDeleter interface {
|
||||
@@ -59,12 +64,12 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,7 +27,6 @@ import (
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/crawler"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/heal"
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/scanner"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -59,7 +60,7 @@ func initHelp() {
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
config.CrawlerSubSys: crawler.DefaultKVS,
|
||||
config.ScannerSubSys: scanner.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -116,8 +117,8 @@ func initHelp() {
|
||||
Description: "manage object healing frequency and bitrot verification checks",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CrawlerSubSys,
|
||||
Description: "manage crawling for usage calculation, lifecycle, healing and more",
|
||||
Key: config.ScannerSubSys,
|
||||
Description: "manage namespace scanning for usage calculation, lifecycle, healing and more",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
@@ -199,7 +200,7 @@ func initHelp() {
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.HealSubSys: heal.Help,
|
||||
config.CrawlerSubSys: crawler.Help,
|
||||
config.ScannerSubSys: scanner.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
@@ -228,7 +229,7 @@ var (
|
||||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config, setDriveCount int) error {
|
||||
func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
@@ -251,8 +252,10 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
for _, setDriveCount := range setDriveCounts {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,11 +274,11 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
if _, err = heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
|
||||
if _, err = scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -293,7 +296,10 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
}
|
||||
}
|
||||
{
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -342,7 +348,7 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
ctx := GlobalContext
|
||||
|
||||
var err error
|
||||
@@ -429,7 +435,7 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCount)
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
@@ -437,9 +443,17 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
break
|
||||
}
|
||||
// if we validated all setDriveCounts and it was successful
|
||||
// proceed to store the correct storage class globally.
|
||||
if i == len(setDriveCounts)-1 {
|
||||
globalStorageClass = sc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,7 +475,10 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
|
||||
}
|
||||
@@ -470,11 +487,13 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS with current KMS config: %w", err))
|
||||
}
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption // Enable auto-encryption if enabled
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
if globalAutoEncryption && !globalIsGateway {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
|
||||
if kmsCfg.Vault.Enabled {
|
||||
const deprecationWarning = `Native Hashicorp Vault support is deprecated and will be removed on 2021-10-01. Please migrate to KES + Hashicorp Vault: https://github.com/minio/kes/wiki/Hashicorp-Vault-Keystore
|
||||
Note that native Hashicorp Vault and KES + Hashicorp Vault are not compatible.
|
||||
If you need help to migrate smoothly visit: https://min.io/pricing`
|
||||
logger.LogIf(ctx, fmt.Errorf(deprecationWarning))
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
@@ -534,7 +553,7 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
http.WithAuthToken(l.AuthToken),
|
||||
http.WithUserAgent(loggerUserAgent),
|
||||
http.WithLogKind(string(logger.All)),
|
||||
http.WithTransport(NewGatewayHTTPTransport()),
|
||||
http.WithTransport(NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)),
|
||||
),
|
||||
); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
@@ -553,12 +572,16 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
||||
}
|
||||
|
||||
// Apply dynamic config values
|
||||
logger.LogIf(ctx, applyDynamicConfig(ctx, s))
|
||||
logger.LogIf(ctx, applyDynamicConfig(ctx, newObjectLayerFn(), s))
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, s config.Config) error {
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
if objAPI == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read all dynamic configs.
|
||||
// API
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
@@ -571,28 +594,27 @@ func applyDynamicConfig(ctx context.Context, s config.Config) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
|
||||
// Validate if the object layer supports compression.
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
|
||||
// Heal
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to apply heal config: %w", err))
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
|
||||
// Crawler
|
||||
crawlerCfg, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
|
||||
// Scanner
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply crawler config: %w", err)
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
|
||||
// Apply configurations.
|
||||
// We should not fail after this.
|
||||
globalAPIConfig.init(apiConfig, globalAPIConfig.setDriveCount)
|
||||
globalAPIConfig.init(apiConfig, objAPI.SetDriveCounts())
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
@@ -602,7 +624,7 @@ func applyDynamicConfig(ctx context.Context, s config.Config) error {
|
||||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
logger.LogIf(ctx, crawlerSleeper.Update(crawlerCfg.Delay, crawlerCfg.MaxWait))
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
globalServerConfigMu.Lock()
|
||||
@@ -723,7 +745,7 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCount())
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCounts())
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
|
||||
@@ -207,7 +207,7 @@ func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client)
|
||||
}
|
||||
|
||||
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ROOT_USER_OLD and MINIO_ROOT_PASSWORD_OLD envs")
|
||||
}
|
||||
|
||||
return saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
@@ -294,7 +294,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
||||
}
|
||||
|
||||
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
|
||||
logger.Info("Rotation complete, please make sure to unset MINIO_ROOT_USER_OLD and MINIO_ROOT_PASSWORD_OLD envs")
|
||||
}
|
||||
|
||||
return saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
|
||||
@@ -29,14 +29,14 @@ import (
|
||||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
apiReplicationWorkers = "replication_workers"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
@@ -45,6 +45,7 @@ const (
|
||||
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
|
||||
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
|
||||
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
|
||||
EnvAPIReplicationWorkers = "MINIO_API_REPLICATION_WORKERS"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
@@ -78,12 +79,16 @@ var (
|
||||
},
|
||||
config.KV{
|
||||
Key: apiListQuorum,
|
||||
Value: "optimal",
|
||||
Value: "strict",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiExtendListCacheLife,
|
||||
Value: "0s",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReplicationWorkers,
|
||||
Value: "100",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -96,6 +101,7 @@ type Config struct {
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
ListQuorum string `json:"list_strict_quorum"`
|
||||
ExtendListLife time.Duration `json:"extend_list_cache_life"`
|
||||
ReplicationWorkers int `json:"replication_workers"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -113,8 +119,6 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
// acceptable quorum expected for list operations
|
||||
func (sCfg Config) GetListQuorum() int {
|
||||
switch sCfg.ListQuorum {
|
||||
case "optimal":
|
||||
return 3
|
||||
case "reduced":
|
||||
return 2
|
||||
case "disk":
|
||||
@@ -123,7 +127,7 @@ func (sCfg Config) GetListQuorum() int {
|
||||
case "strict":
|
||||
return -1
|
||||
}
|
||||
// Defaults to 3 drives per set.
|
||||
// Defaults to 3 drives per set, defaults to "optimal" value
|
||||
return 3
|
||||
}
|
||||
|
||||
@@ -175,6 +179,15 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
replicationWorkers, err := strconv.Atoi(env.Get(EnvAPIReplicationWorkers, kvs.Get(apiReplicationWorkers)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if replicationWorkers <= 0 {
|
||||
return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Minimum number of replication workers should be 1")
|
||||
}
|
||||
|
||||
return Config{
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
@@ -183,5 +196,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
ListQuorum: listQuorum,
|
||||
ExtendListLife: listLife,
|
||||
ReplicationWorkers: replicationWorkers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -45,5 +45,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiReplicationWorkers,
|
||||
Description: `set the number of replication workers, defaults to 100`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
16
cmd/config/cache/help.go
vendored
@@ -40,19 +40,13 @@ var (
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Exclude,
|
||||
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Description: `exclude cache for following patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: After,
|
||||
Description: `minimum accesses before caching an object`,
|
||||
Description: `minimum number of access before caching an object`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
@@ -80,5 +74,11 @@ var (
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/minio/minio/pkg/env"
|
||||
@@ -113,3 +114,12 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// EnsureCertAndKey checks if both client certificate and key paths are provided
|
||||
func EnsureCertAndKey(ClientCert, ClientKey string) error {
|
||||
if (ClientCert != "" && ClientKey == "") ||
|
||||
(ClientCert == "" && ClientKey != "") {
|
||||
return errors.New("cert and key must be specified as a pair")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,23 +26,26 @@ import (
|
||||
|
||||
// Config represents the compression settings.
|
||||
type Config struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Extensions []string `json:"extensions"`
|
||||
MimeTypes []string `json:"mime-types"`
|
||||
Enabled bool `json:"enabled"`
|
||||
AllowEncrypted bool `json:"allow_encryption"`
|
||||
Extensions []string `json:"extensions"`
|
||||
MimeTypes []string `json:"mime-types"`
|
||||
}
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
Extensions = "extensions"
|
||||
MimeTypes = "mime_types"
|
||||
Extensions = "extensions"
|
||||
AllowEncrypted = "allow_encryption"
|
||||
MimeTypes = "mime_types"
|
||||
|
||||
EnvCompressState = "MINIO_COMPRESS_ENABLE"
|
||||
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
|
||||
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
|
||||
EnvCompressState = "MINIO_COMPRESS_ENABLE"
|
||||
EnvCompressAllowEncryption = "MINIO_COMPRESS_ALLOW_ENCRYPTION"
|
||||
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
|
||||
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
|
||||
|
||||
// Include-list for compression.
|
||||
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
|
||||
DefaultMimeTypes = "text/*,application/json,application/xml"
|
||||
DefaultMimeTypes = "text/*,application/json,application/xml,binary/octet-stream"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV config for compression settings
|
||||
@@ -52,6 +55,10 @@ var (
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: AllowEncrypted,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: Extensions,
|
||||
Value: DefaultExtensions,
|
||||
@@ -101,6 +108,12 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
allowEnc := env.Get(EnvCompressAllowEncryption, kvs.Get(AllowEncrypted))
|
||||
cfg.AllowEncrypted, err = config.ParseBool(allowEnc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
compressExtensions := env.Get(EnvCompressExtensions, kvs.Get(Extensions))
|
||||
compressMimeTypes := env.Get(EnvCompressMimeTypes, kvs.Get(MimeTypes))
|
||||
compressMimeTypesLegacy := env.Get(EnvCompressMimeTypesLegacy, kvs.Get(MimeTypes))
|
||||
|
||||
@@ -77,6 +77,7 @@ const (
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
HealSubSys = "heal"
|
||||
ScannerSubSys = "scanner"
|
||||
CrawlerSubSys = "crawler"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
@@ -114,7 +115,7 @@ var SubSystems = set.CreateStringSet(
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
HealSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
@@ -132,7 +133,7 @@ var SubSystems = set.CreateStringSet(
|
||||
var SubSystemsDynamic = set.CreateStringSet(
|
||||
APISubSys,
|
||||
CompressionSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
HealSubSys,
|
||||
)
|
||||
|
||||
@@ -151,7 +152,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
HealSubSys,
|
||||
CrawlerSubSys,
|
||||
ScannerSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
@@ -462,6 +463,13 @@ func LookupWorm() (bool, error) {
|
||||
return ParseBool(env.Get(EnvWorm, EnableOff))
|
||||
}
|
||||
|
||||
// Carries all the renamed sub-systems from their
|
||||
// previously known names
|
||||
var renamedSubsys = map[string]string{
|
||||
CrawlerSubSys: ScannerSubSys,
|
||||
// Add future sub-system renames
|
||||
}
|
||||
|
||||
// Merge - merges a new config with all the
|
||||
// missing values for default configs,
|
||||
// returns a config.
|
||||
@@ -477,9 +485,21 @@ func (c Config) Merge() Config {
|
||||
}
|
||||
}
|
||||
if _, ok := cp[subSys]; !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
rnSubSys, ok := renamedSubsys[subSys]
|
||||
if !ok {
|
||||
// A config subsystem was removed or server was downgraded.
|
||||
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
|
||||
continue
|
||||
}
|
||||
// Copy over settings from previous sub-system
|
||||
// to newly renamed sub-system
|
||||
for _, kv := range cp[rnSubSys][Default] {
|
||||
_, ok := c[subSys][tgt].Lookup(kv.Key)
|
||||
if !ok {
|
||||
ckvs.Set(kv.Key, kv.Value)
|
||||
}
|
||||
}
|
||||
subSys = rnSubSys
|
||||
}
|
||||
cp[subSys][tgt] = ckvs
|
||||
}
|
||||
|
||||
@@ -23,17 +23,23 @@ const (
|
||||
|
||||
// Top level common ENVs
|
||||
const (
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvRootUser = "MINIO_ROOT_USER"
|
||||
EnvRootPassword = "MINIO_ROOT_PASSWORD"
|
||||
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
|
||||
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
|
||||
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
|
||||
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvFSOSync = "MINIO_FS_OSYNC"
|
||||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
|
||||
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
|
||||
@@ -34,6 +34,9 @@ import (
|
||||
// ErrNoEntriesFound - Indicates no entries were found for the given key (directory)
|
||||
var ErrNoEntriesFound = errors.New("No entries found for this key")
|
||||
|
||||
// ErrDomainMissing - Indicates domain is missing
|
||||
var ErrDomainMissing = errors.New("domain is missing")
|
||||
|
||||
const etcdPathSeparator = "/"
|
||||
|
||||
// create a new coredns service record for the bucket.
|
||||
@@ -57,9 +60,9 @@ func (c *CoreDNS) List() (map[string][]SrvRecord, error) {
|
||||
var srvRecords = map[string][]SrvRecord{}
|
||||
for _, domainName := range c.domainNames {
|
||||
key := msg.Path(fmt.Sprintf("%s.", domainName), c.prefixPath)
|
||||
records, err := c.list(key)
|
||||
records, err := c.list(key+etcdPathSeparator, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return srvRecords, err
|
||||
}
|
||||
for _, record := range records {
|
||||
if record.Key == "" {
|
||||
@@ -76,7 +79,7 @@ func (c *CoreDNS) Get(bucket string) ([]SrvRecord, error) {
|
||||
var srvRecords []SrvRecord
|
||||
for _, domainName := range c.domainNames {
|
||||
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath)
|
||||
records, err := c.list(key)
|
||||
records, err := c.list(key, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,19 +112,25 @@ func msgUnPath(s string) string {
|
||||
|
||||
// Retrieves list of entries under the key passed.
|
||||
// Note that this method fetches entries upto only two levels deep.
|
||||
func (c *CoreDNS) list(key string) ([]SrvRecord, error) {
|
||||
func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
r, err := c.etcdClient.Get(ctx, key, clientv3.WithPrefix())
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.Count == 0 {
|
||||
key = strings.TrimSuffix(key, etcdPathSeparator)
|
||||
r, err = c.etcdClient.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// only if we are looking at `domain` as true
|
||||
// we should return error here.
|
||||
if domain && r.Count == 0 {
|
||||
return nil, ErrDomainMissing
|
||||
}
|
||||
}
|
||||
|
||||
var srvRecords []SrvRecord
|
||||
@@ -166,11 +175,11 @@ func (c *CoreDNS) Put(bucket string) error {
|
||||
key = key + etcdPathSeparator + ip
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
_, err = c.etcdClient.Put(ctx, key, string(bucketMsg))
|
||||
defer cancel()
|
||||
cancel()
|
||||
if err != nil {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
c.etcdClient.Delete(ctx, key)
|
||||
defer cancel()
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -182,18 +191,12 @@ func (c *CoreDNS) Put(bucket string) error {
|
||||
func (c *CoreDNS) Delete(bucket string) error {
|
||||
for _, domainName := range c.domainNames {
|
||||
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath)
|
||||
srvRecords, err := c.list(key)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
_, err := c.etcdClient.Delete(ctx, key+etcdPathSeparator, clientv3.WithPrefix())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, record := range srvRecords {
|
||||
dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
if _, err = c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil {
|
||||
dcancel()
|
||||
return err
|
||||
}
|
||||
dcancel()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -203,12 +206,12 @@ func (c *CoreDNS) DeleteRecord(record SrvRecord) error {
|
||||
for _, domainName := range c.domainNames {
|
||||
key := msg.Path(fmt.Sprintf("%s.%s.", record.Key, domainName), c.prefixPath)
|
||||
|
||||
dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
if _, err := c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil {
|
||||
dcancel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
_, err := c.etcdClient.Delete(ctx, key+etcdPathSeparator+record.Host)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dcancel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,12 @@ var (
|
||||
"Can only accept `on` and `off` values. To enable O_SYNC for fs backend, set this value to `on`",
|
||||
)
|
||||
|
||||
ErrOverlappingDomainValue = newErrFn(
|
||||
"Overlapping domain values",
|
||||
"Please check the passed value",
|
||||
"MINIO_DOMAIN only accepts non-overlapping domain values",
|
||||
)
|
||||
|
||||
ErrInvalidDomainValue = newErrFn(
|
||||
"Invalid domain value",
|
||||
"Please check the passed value",
|
||||
@@ -116,19 +122,19 @@ var (
|
||||
ErrInvalidRotatingCredentialsBackendEncrypted = newErrFn(
|
||||
"Invalid rotating credentials",
|
||||
"Please set correct rotating credentials in the environment for decryption",
|
||||
`Detected encrypted config backend, correct old access and secret keys should be specified via environment variables MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD to be able to re-encrypt the MinIO config, user IAM and policies with new credentials`,
|
||||
`Detected encrypted config backend, correct old access and secret keys should be specified via environment variables MINIO_ROOT_USER_OLD and MINIO_ROOT_PASSWORD_OLD to be able to re-encrypt the MinIO config, user IAM and policies with new credentials`,
|
||||
)
|
||||
|
||||
ErrInvalidCredentialsBackendEncrypted = newErrFn(
|
||||
"Invalid credentials",
|
||||
"Please set correct credentials in the environment for decryption",
|
||||
`Detected encrypted config backend, correct access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY to be able to decrypt the MinIO config, user IAM and policies`,
|
||||
`Detected encrypted config backend, correct access and secret keys should be specified via environment variables MINIO_ROOT_USER and MINIO_ROOT_PASSWORD to be able to decrypt the MinIO config, user IAM and policies`,
|
||||
)
|
||||
|
||||
ErrMissingCredentialsBackendEncrypted = newErrFn(
|
||||
"Credentials missing",
|
||||
"Please set your credentials in the environment",
|
||||
`Detected encrypted config backend, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY to be able to decrypt the MinIO config, user IAM and policies`,
|
||||
`Detected encrypted config backend, access and secret keys should be specified via environment variables MINIO_ROOT_USER and MINIO_ROOT_PASSWORD to be able to decrypt the MinIO config, user IAM and policies`,
|
||||
)
|
||||
|
||||
ErrInvalidCredentials = newErrFn(
|
||||
@@ -140,13 +146,13 @@ var (
|
||||
ErrEnvCredentialsMissingGateway = newErrFn(
|
||||
"Credentials missing",
|
||||
"Please set your credentials in the environment",
|
||||
`In Gateway mode, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY respectively`,
|
||||
`In Gateway mode, access and secret keys should be specified via environment variables MINIO_ROOT_USER and MINIO_ROOT_PASSWORD respectively`,
|
||||
)
|
||||
|
||||
ErrEnvCredentialsMissingDistributed = newErrFn(
|
||||
"Credentials missing",
|
||||
"Please set your credentials in the environment",
|
||||
`In distributed server mode, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY respectively`,
|
||||
`In distributed server mode, access and secret keys should be specified via environment variables MINIO_ROOT_USER and MINIO_ROOT_PASSWORD respectively`,
|
||||
)
|
||||
|
||||
ErrInvalidErasureEndpoints = newErrFn(
|
||||
@@ -275,4 +281,10 @@ Example 1:
|
||||
"",
|
||||
"Refer to https://docs.min.io/docs/minio-kms-quickstart-guide.html for setting up SSE",
|
||||
)
|
||||
|
||||
ErrInvalidReplicationWorkersValue = newErrFn(
|
||||
"Invalid value for replication workers",
|
||||
"",
|
||||
"MINIO_API_REPLICATION_WORKERS: should be > 0",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/clientv3/namespace"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -144,6 +145,13 @@ func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
|
||||
cfg.Enabled = true
|
||||
cfg.DialTimeout = defaultDialTimeout
|
||||
cfg.DialKeepAliveTime = defaultDialKeepAlive
|
||||
// Disable etcd client SDK logging, etcd client
|
||||
// incorrectly starts logging in unexpected data
|
||||
// format.
|
||||
cfg.LogConfig = &zap.Config{
|
||||
Level: zap.NewAtomicLevelAt(zap.FatalLevel),
|
||||
Encoding: "console",
|
||||
}
|
||||
cfg.Endpoints = etcdEndpoints
|
||||
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kvs.Get(CoreDNSPath))
|
||||
// Default path prefix for all keys on etcd, other than CoreDNSPath.
|
||||
|
||||
@@ -66,7 +66,7 @@ var (
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Bitrot,
|
||||
Description: `perform bitrot scan on disks when checking objects during crawl`,
|
||||
Description: `perform bitrot scan on disks when checking objects during scanner`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
|
||||
@@ -21,17 +21,20 @@ import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ldap "github.com/go-ldap/ldap/v3"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
ldap "gopkg.in/ldap.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLDAPExpiry = time.Hour * 1
|
||||
|
||||
dnDelimiter = ";"
|
||||
)
|
||||
|
||||
// Config contains AD/LDAP server connectivity information.
|
||||
@@ -45,50 +48,65 @@ type Config struct {
|
||||
STSExpiryDuration string `json:"stsExpiryDuration"`
|
||||
|
||||
// Format string for usernames
|
||||
UsernameFormat string `json:"usernameFormat"`
|
||||
UsernameFormats []string `json:"-"`
|
||||
UsernameSearchFilter string `json:"-"`
|
||||
UsernameSearchBaseDNS []string `json:"-"`
|
||||
UsernameFormat string `json:"usernameFormat"`
|
||||
UsernameFormats []string `json:"-"`
|
||||
|
||||
GroupSearchBaseDN string `json:"groupSearchBaseDN"`
|
||||
GroupSearchBaseDNS []string `json:"-"`
|
||||
GroupSearchFilter string `json:"groupSearchFilter"`
|
||||
GroupNameAttribute string `json:"groupNameAttribute"`
|
||||
// User DN search parameters
|
||||
UserDNSearchBaseDN string `json:"userDNSearchBaseDN"`
|
||||
UserDNSearchFilter string `json:"userDNSearchFilter"`
|
||||
|
||||
// Group search parameters
|
||||
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
|
||||
GroupSearchBaseDistNames []string `json:"-"`
|
||||
GroupSearchFilter string `json:"groupSearchFilter"`
|
||||
|
||||
// Lookup bind LDAP service account
|
||||
LookupBindDN string `json:"lookupBindDN"`
|
||||
LookupBindPassword string `json:"lookupBindPassword"`
|
||||
|
||||
stsExpiryDuration time.Duration // contains converted value
|
||||
tlsSkipVerify bool // allows skipping TLS verification
|
||||
serverInsecure bool // allows plain text connection to LDAP Server
|
||||
serverStartTLS bool // allows plain text connection to LDAP Server
|
||||
serverInsecure bool // allows plain text connection to LDAP server
|
||||
serverStartTLS bool // allows using StartTLS connection to LDAP server
|
||||
isUsingLookupBind bool
|
||||
rootCAs *x509.CertPool
|
||||
}
|
||||
|
||||
// LDAP keys and envs.
|
||||
const (
|
||||
ServerAddr = "server_addr"
|
||||
STSExpiry = "sts_expiry"
|
||||
UsernameFormat = "username_format"
|
||||
UsernameSearchFilter = "username_search_filter"
|
||||
UsernameSearchBaseDN = "username_search_base_dn"
|
||||
GroupSearchFilter = "group_search_filter"
|
||||
GroupNameAttribute = "group_name_attribute"
|
||||
GroupSearchBaseDN = "group_search_base_dn"
|
||||
TLSSkipVerify = "tls_skip_verify"
|
||||
ServerInsecure = "server_insecure"
|
||||
ServerStartTLS = "server_starttls"
|
||||
ServerAddr = "server_addr"
|
||||
STSExpiry = "sts_expiry"
|
||||
LookupBindDN = "lookup_bind_dn"
|
||||
LookupBindPassword = "lookup_bind_password"
|
||||
UserDNSearchBaseDN = "user_dn_search_base_dn"
|
||||
UserDNSearchFilter = "user_dn_search_filter"
|
||||
UsernameFormat = "username_format"
|
||||
GroupSearchFilter = "group_search_filter"
|
||||
GroupSearchBaseDN = "group_search_base_dn"
|
||||
TLSSkipVerify = "tls_skip_verify"
|
||||
ServerInsecure = "server_insecure"
|
||||
ServerStartTLS = "server_starttls"
|
||||
|
||||
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
|
||||
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
|
||||
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
|
||||
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
|
||||
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
|
||||
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
|
||||
EnvUsernameSearchFilter = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_FILTER"
|
||||
EnvUsernameSearchBaseDN = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_BASE_DN"
|
||||
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
|
||||
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
|
||||
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
|
||||
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
|
||||
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
|
||||
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
|
||||
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
|
||||
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
|
||||
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
|
||||
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
|
||||
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
|
||||
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
|
||||
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
|
||||
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
|
||||
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
|
||||
)
|
||||
|
||||
var removedKeys = []string{
|
||||
"username_search_filter",
|
||||
"username_search_base_dn",
|
||||
"group_name_attribute",
|
||||
}
|
||||
|
||||
// DefaultKVS - default config for LDAP config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
@@ -101,21 +119,17 @@ var (
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: UsernameSearchFilter,
|
||||
Key: UserDNSearchBaseDN,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: UsernameSearchBaseDN,
|
||||
Key: UserDNSearchFilter,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchFilter,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: "",
|
||||
@@ -136,117 +150,191 @@ var (
|
||||
Key: ServerStartTLS,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: LookupBindDN,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: LookupBindPassword,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
dnDelimiter = ";"
|
||||
)
|
||||
|
||||
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
|
||||
var groups []string
|
||||
sres, err := conn.Search(sreq)
|
||||
if err != nil {
|
||||
// Check if there is no matching result and return empty slice.
|
||||
// Ref: https://ldap.com/ldap-result-code-reference/
|
||||
if ldap.IsErrorWithCode(err, 32) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range sres.Entries {
|
||||
// We only queried one attribute,
|
||||
// so we only look up the first one.
|
||||
groups = append(groups, entry.Attributes[0].Values...)
|
||||
groups = append(groups, entry.DN)
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (l *Config) bind(conn *ldap.Conn, username, password string) ([]string, error) {
|
||||
var bindDNS = make([]string, len(l.UsernameFormats))
|
||||
func (l *Config) lookupBind(conn *ldap.Conn) error {
|
||||
var err error
|
||||
if l.LookupBindPassword == "" {
|
||||
err = conn.UnauthenticatedBind(l.LookupBindDN)
|
||||
} else {
|
||||
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
|
||||
}
|
||||
if ldap.IsErrorWithCode(err, 49) {
|
||||
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// usernameFormatsBind - Iterates over all given username formats and expects
|
||||
// that only one will succeed if the credentials are valid. The succeeding
|
||||
// bindDN is returned or an error.
|
||||
//
|
||||
// In the rare case that multiple username formats succeed, implying that two
|
||||
// (or more) distinct users in the LDAP directory have the same username and
|
||||
// password, we return an error as we cannot identify the account intended by
|
||||
// the user.
|
||||
func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string) (string, error) {
|
||||
var bindDistNames []string
|
||||
var errs = make([]error, len(l.UsernameFormats))
|
||||
var successCount = 0
|
||||
for i, usernameFormat := range l.UsernameFormats {
|
||||
bindDN := fmt.Sprintf(usernameFormat, username)
|
||||
// Bind with user credentials to validate the password
|
||||
if err := conn.Bind(bindDN, password); err != nil {
|
||||
return nil, err
|
||||
errs[i] = conn.Bind(bindDN, password)
|
||||
if errs[i] == nil {
|
||||
bindDistNames = append(bindDistNames, bindDN)
|
||||
successCount++
|
||||
} else if !ldap.IsErrorWithCode(errs[i], 49) {
|
||||
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %v", errs[i])
|
||||
}
|
||||
bindDNS[i] = bindDN
|
||||
}
|
||||
return bindDNS, nil
|
||||
if successCount == 0 {
|
||||
var errStrings []string
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
}
|
||||
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
|
||||
return "", errors.New(outErr)
|
||||
}
|
||||
if successCount > 1 {
|
||||
successDistNames := strings.Join(bindDistNames, ", ")
|
||||
errMsg := fmt.Sprintf("Multiple username formats succeeded - ambiguous user login (succeeded for: %s)", successDistNames)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
return bindDistNames[0], nil
|
||||
}
|
||||
|
||||
var standardAttributes = []string{
|
||||
"givenName",
|
||||
"sn",
|
||||
"cn",
|
||||
"memberOf",
|
||||
"email",
|
||||
// lookupUserDN searches for the DN of the user given their username. conn is
|
||||
// assumed to be using the lookup bind service account. It is required that the
|
||||
// search result in at most one result.
|
||||
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
|
||||
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
l.UserDNSearchBaseDN,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
[]string{}, // only need DN, so no pass no attributes here
|
||||
nil,
|
||||
)
|
||||
|
||||
searchResult, err := conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(searchResult.Entries) == 0 {
|
||||
return "", fmt.Errorf("User DN for %s not found", username)
|
||||
}
|
||||
if len(searchResult.Entries) != 1 {
|
||||
return "", fmt.Errorf("Multiple DNs for %s found - please fix the search filter", username)
|
||||
}
|
||||
return searchResult.Entries[0].DN, nil
|
||||
}
|
||||
|
||||
// Bind - binds to ldap, searches LDAP and returns list of groups.
|
||||
func (l *Config) Bind(username, password string) ([]string, error) {
|
||||
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
|
||||
// user and the list of groups.
|
||||
func (l *Config) Bind(username, password string) (string, []string, error) {
|
||||
conn, err := l.Connect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
bindDNS, err := l.bind(conn, username, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var bindDN string
|
||||
if l.isUsingLookupBind {
|
||||
// Bind to the lookup user account
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Lookup user DN
|
||||
bindDN, err = l.lookupUserDN(conn, username)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("Unable to find user DN: %s", err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
|
||||
// Authenticate the user credentials.
|
||||
err = conn.Bind(bindDN, password)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("LDAP auth failed for DN %s: %v", bindDN, err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
|
||||
// Bind to the lookup user account again to perform group search.
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
} else {
|
||||
// Verify login credentials by checking the username formats.
|
||||
bindDN, err = l.usernameFormatsBind(conn, username, password)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Bind to the successful bindDN again.
|
||||
err = conn.Bind(bindDN, password)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("LDAP conn failed though auth for DN %s succeeded: %v", bindDN, err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
}
|
||||
|
||||
// User groups lookup.
|
||||
var groups []string
|
||||
if l.UsernameSearchFilter != "" {
|
||||
for _, userSearchBase := range l.UsernameSearchBaseDNS {
|
||||
filter := strings.Replace(l.UsernameSearchFilter, "%s",
|
||||
ldap.EscapeFilter(username), -1)
|
||||
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
userSearchBase,
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
standardAttributes,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
groups, err = getGroups(conn, searchRequest)
|
||||
var newGroups []string
|
||||
newGroups, err = getGroups(conn, searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
errRet := fmt.Errorf("Error finding groups of %s: %v", bindDN, err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
|
||||
groups = append(groups, newGroups...)
|
||||
}
|
||||
}
|
||||
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDNS {
|
||||
var filters []string
|
||||
if l.GroupNameAttribute == "" {
|
||||
filters = []string{strings.Replace(l.GroupSearchFilter, "%s",
|
||||
ldap.EscapeFilter(username), -1)}
|
||||
} else {
|
||||
// With group name attribute specified, make sure to
|
||||
// include search queries for CN distinguished name
|
||||
for _, bindDN := range bindDNS {
|
||||
filters = append(filters, strings.Replace(l.GroupSearchFilter, "%s",
|
||||
ldap.EscapeFilter(bindDN), -1))
|
||||
}
|
||||
}
|
||||
for _, filter := range filters {
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
standardAttributes,
|
||||
nil,
|
||||
)
|
||||
|
||||
var newGroups []string
|
||||
newGroups, err = getGroups(conn, searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groups = append(groups, newGroups...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
return bindDN, groups, nil
|
||||
}
|
||||
|
||||
// Connect connect to ldap server.
|
||||
@@ -287,6 +375,38 @@ func (l Config) GetExpiryDuration() time.Duration {
|
||||
return l.stsExpiryDuration
|
||||
}
|
||||
|
||||
func (l Config) testConnection() error {
|
||||
conn, err := l.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating connection to LDAP server: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if l.isUsingLookupBind {
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate some random user credentials for username formats mode test.
|
||||
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
|
||||
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||
rand.Shuffle(len(charset), func(i, j int) {
|
||||
charset[i], charset[j] = charset[j], charset[i]
|
||||
})
|
||||
password := string(charset[:20])
|
||||
_, err = l.usernameFormatsBind(conn, username, password)
|
||||
if err == nil {
|
||||
// We don't expect to successfully guess a credential in this
|
||||
// way.
|
||||
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
|
||||
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("LDAP connection test error: %v", err)
|
||||
}
|
||||
|
||||
// Enabled returns if jwks is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(ServerAddr) != ""
|
||||
@@ -295,6 +415,12 @@ func Enabled(kvs config.KVS) bool {
|
||||
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
|
||||
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
l = Config{}
|
||||
|
||||
// Purge all removed keys first
|
||||
for _, k := range removedKeys {
|
||||
kvs.Delete(k)
|
||||
}
|
||||
|
||||
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
|
||||
return l, err
|
||||
}
|
||||
@@ -316,6 +442,8 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
l.STSExpiryDuration = v
|
||||
l.stsExpiryDuration = expDur
|
||||
}
|
||||
|
||||
// LDAP connection configuration
|
||||
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
|
||||
l.serverInsecure, err = config.ParseBool(v)
|
||||
if err != nil {
|
||||
@@ -334,43 +462,62 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
return l, err
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup bind user configuration
|
||||
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
|
||||
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
|
||||
if lookupBindDN != "" {
|
||||
l.LookupBindDN = lookupBindDN
|
||||
l.LookupBindPassword = lookupBindPassword
|
||||
l.isUsingLookupBind = true
|
||||
|
||||
// User DN search configuration
|
||||
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
|
||||
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
|
||||
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
|
||||
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
|
||||
}
|
||||
l.UserDNSearchBaseDN = userDNSearchBaseDN
|
||||
l.UserDNSearchFilter = userDNSearchFilter
|
||||
}
|
||||
|
||||
// Username format configuration.
|
||||
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
|
||||
if !strings.Contains(v, "%s") {
|
||||
return l, errors.New("LDAP username format doesn't have '%s' substitution")
|
||||
}
|
||||
l.UsernameFormats = strings.Split(v, dnDelimiter)
|
||||
} else {
|
||||
return l, fmt.Errorf("'%s' cannot be empty and must have a value", UsernameFormat)
|
||||
}
|
||||
|
||||
if v := env.Get(EnvUsernameSearchFilter, kvs.Get(UsernameSearchFilter)); v != "" {
|
||||
if !strings.Contains(v, "%s") {
|
||||
return l, errors.New("LDAP username search filter doesn't have '%s' substitution")
|
||||
}
|
||||
l.UsernameSearchFilter = v
|
||||
// Either lookup bind mode or username format is supported, but not
|
||||
// both.
|
||||
if l.isUsingLookupBind && len(l.UsernameFormats) > 0 {
|
||||
return l, errors.New("Lookup Bind mode and Username Format mode are not supported at the same time")
|
||||
}
|
||||
|
||||
if v := env.Get(EnvUsernameSearchBaseDN, kvs.Get(UsernameSearchBaseDN)); v != "" {
|
||||
l.UsernameSearchBaseDNS = strings.Split(v, dnDelimiter)
|
||||
// At least one of bind mode or username format must be used.
|
||||
if !l.isUsingLookupBind && len(l.UsernameFormats) == 0 {
|
||||
return l, errors.New("Either Lookup Bind mode or Username Format mode is required.")
|
||||
}
|
||||
|
||||
// Test connection to LDAP server.
|
||||
if err := l.testConnection(); err != nil {
|
||||
return l, fmt.Errorf("Connection test for LDAP server failed: %v", err)
|
||||
}
|
||||
|
||||
// Group search params configuration
|
||||
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
|
||||
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
|
||||
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
|
||||
|
||||
// Either all group params must be set or none must be set.
|
||||
var allSet bool
|
||||
if grpSearchFilter != "" {
|
||||
if grpSearchNameAttr == "" || grpSearchBaseDN == "" {
|
||||
return l, errors.New("All group related parameters must be set")
|
||||
}
|
||||
allSet = true
|
||||
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
|
||||
return l, errors.New("All group related parameters must be set")
|
||||
}
|
||||
|
||||
if allSet {
|
||||
if grpSearchFilter != "" {
|
||||
l.GroupSearchFilter = grpSearchFilter
|
||||
l.GroupNameAttribute = grpSearchNameAttr
|
||||
l.GroupSearchBaseDNS = strings.Split(grpSearchBaseDN, dnDelimiter)
|
||||
l.GroupSearchBaseDistName = grpSearchBaseDN
|
||||
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
|
||||
}
|
||||
|
||||
l.rootCAs = rootCAs
|
||||
|
||||
@@ -27,43 +27,53 @@ var (
|
||||
Type: "address",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UsernameFormat,
|
||||
Description: `";" separated list of username bind DNs e.g. "uid=%s,cn=accounts,dc=myldapserver,dc=com"`,
|
||||
Type: "list",
|
||||
Key: STSExpiry,
|
||||
Description: `temporary credentials validity duration in s,m,h,d. Default is "1h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UsernameSearchFilter,
|
||||
Description: `user search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"`,
|
||||
Key: LookupBindDN,
|
||||
Description: `DN for LDAP read-only service account used to perform DN and group lookups`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: LookupBindPassword,
|
||||
Description: `Password for LDAP read-only service account used to perform DN and group lookups`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UserDNSearchBaseDN,
|
||||
Description: `Base LDAP DN to search for user DN`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UserDNSearchFilter,
|
||||
Description: `Search filter to lookup user DN`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UsernameFormat,
|
||||
Description: `";" separated list of username bind DNs e.g. "uid=%s,cn=accounts,dc=myldapserver,dc=com"`,
|
||||
Optional: true,
|
||||
Type: "list",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupSearchFilter,
|
||||
Description: `search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Description: `";" separated list of group search base DNs e.g. "dc=myldapserver,dc=com"`,
|
||||
Optional: true,
|
||||
Type: "list",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UsernameSearchBaseDN,
|
||||
Description: `";" separated list of username search DNs`,
|
||||
Type: "list",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupNameAttribute,
|
||||
Description: `search attribute for group name e.g. "cn"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: STSExpiry,
|
||||
Description: `temporary credentials validity duration in s,m,h,d. Default is "1h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: TLSSkipVerify,
|
||||
Description: `trust server TLS without verification, defaults to "off" (verify)`,
|
||||
@@ -76,6 +86,12 @@ var (
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ServerStartTLS,
|
||||
Description: `use StartTLS connection to AD/LDAP server, defaults to "off"`,
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
|
||||
@@ -41,13 +41,9 @@ func SetIdentityLDAP(s config.Config, ldapArgs Config) {
|
||||
Key: GroupSearchFilter,
|
||||
Value: ldapArgs.GroupSearchFilter,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: ldapArgs.GroupNameAttribute,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: ldapArgs.GroupSearchBaseDN,
|
||||
Value: ldapArgs.GroupSearchBaseDistName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
)
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crawler
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
@@ -29,8 +29,10 @@ const (
|
||||
Delay = "delay"
|
||||
MaxWait = "max_wait"
|
||||
|
||||
EnvDelay = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_CRAWLER_MAX_WAIT"
|
||||
EnvDelay = "MINIO_SCANNER_DELAY"
|
||||
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
|
||||
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
|
||||
)
|
||||
|
||||
// Config represents the heal settings.
|
||||
@@ -58,7 +60,7 @@ var (
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Delay,
|
||||
Description: `crawler delay multiplier, defaults to '10.0'`,
|
||||
Description: `scanner delay multiplier, defaults to '10.0'`,
|
||||
Optional: true,
|
||||
Type: "float",
|
||||
},
|
||||
@@ -73,14 +75,22 @@ var (
|
||||
|
||||
// LookupConfig - lookup config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
|
||||
if err = config.CheckValidKeys(config.ScannerSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Delay, err = strconv.ParseFloat(env.Get(EnvDelay, kvs.Get(Delay)), 64)
|
||||
delay := env.Get(EnvDelayLegacy, "")
|
||||
if delay == "" {
|
||||
delay = env.Get(EnvDelay, kvs.Get(Delay))
|
||||
}
|
||||
cfg.Delay, err = strconv.ParseFloat(delay, 64)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.MaxWait, err = time.ParseDuration(env.Get(EnvMaxWait, kvs.Get(MaxWait)))
|
||||
maxWait := env.Get(EnvMaxWaitLegacy, "")
|
||||
if maxWait == "" {
|
||||
maxWait = env.Get(EnvMaxWait, kvs.Get(MaxWait))
|
||||
}
|
||||
cfg.MaxWait, err = time.ParseDuration(maxWait)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -64,7 +64,7 @@ const (
|
||||
defaultRRSParity = minParityDisks
|
||||
|
||||
// Default DMA value
|
||||
defaultDMA = DMAWrite
|
||||
defaultDMA = DMAReadWrite
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
@@ -88,14 +88,13 @@ var (
|
||||
// StorageClass - holds storage class information
|
||||
type StorageClass struct {
|
||||
Parity int
|
||||
DMA string
|
||||
}
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
Standard StorageClass `json:"standard"`
|
||||
RRS StorageClass `json:"rrs"`
|
||||
DMA StorageClass `json:"dma"`
|
||||
DMA string `json:"dma"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
@@ -112,7 +111,7 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
// IsValid - returns true if input string is a valid
|
||||
// storage class kind supported.
|
||||
func IsValid(sc string) bool {
|
||||
return sc == RRS || sc == STANDARD || sc == DMA
|
||||
return sc == RRS || sc == STANDARD
|
||||
}
|
||||
|
||||
// UnmarshalText unmarshals storage class from its textual form into
|
||||
@@ -122,14 +121,6 @@ func (sc *StorageClass) UnmarshalText(b []byte) error {
|
||||
if scStr == "" {
|
||||
return nil
|
||||
}
|
||||
if scStr == DMAWrite {
|
||||
sc.DMA = DMAWrite
|
||||
return nil
|
||||
}
|
||||
if scStr == DMAReadWrite {
|
||||
sc.DMA = DMAReadWrite
|
||||
return nil
|
||||
}
|
||||
s, err := parseStorageClass(scStr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -143,14 +134,14 @@ func (sc *StorageClass) MarshalText() ([]byte, error) {
|
||||
if sc.Parity != 0 {
|
||||
return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil
|
||||
}
|
||||
return []byte(sc.DMA), nil
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (sc *StorageClass) String() string {
|
||||
if sc.Parity != 0 {
|
||||
return fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)
|
||||
}
|
||||
return sc.DMA
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parses given storageClassEnv and returns a storageClass structure.
|
||||
@@ -182,22 +173,34 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
if ssParity == 0 && rrsParity == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateParity validate standard storage class parity.
|
||||
func ValidateParity(ssParity, setDriveCount int) error {
|
||||
// SS parity disks should be greater than or equal to minParityDisks.
|
||||
// Parity below minParityDisks is not supported.
|
||||
if ssParity < minParityDisks {
|
||||
if ssParity > 0 && ssParity < minParityDisks {
|
||||
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d",
|
||||
ssParity, minParityDisks)
|
||||
}
|
||||
|
||||
if ssParity > setDriveCount/2 {
|
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
// SS parity disks should be greater than or equal to minParityDisks.
|
||||
// Parity below minParityDisks is not supported.
|
||||
if ssParity > 0 && ssParity < minParityDisks {
|
||||
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d",
|
||||
ssParity, minParityDisks)
|
||||
}
|
||||
|
||||
// RRS parity disks should be greater than or equal to minParityDisks.
|
||||
// Parity below minParityDisks is not supported.
|
||||
if rrsParity < minParityDisks {
|
||||
if rrsParity > 0 && rrsParity < minParityDisks {
|
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
|
||||
}
|
||||
|
||||
@@ -210,7 +213,7 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
}
|
||||
|
||||
if ssParity > 0 && rrsParity > 0 {
|
||||
if ssParity < rrsParity {
|
||||
if ssParity > 0 && ssParity < rrsParity {
|
||||
return fmt.Errorf("Standard storage class parity disks %d should be greater than or equal to Reduced redundancy storage class parity disks %d", ssParity, rrsParity)
|
||||
}
|
||||
}
|
||||
@@ -218,14 +221,16 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
}
|
||||
|
||||
// GetParityForSC - Returns the data and parity drive count based on storage class
|
||||
// If storage class is set using the env vars MINIO_STORAGE_CLASS_RRS and MINIO_STORAGE_CLASS_STANDARD
|
||||
// or config.json fields
|
||||
// -- corresponding values are returned
|
||||
// If storage class is not set during startup, default values are returned
|
||||
// -- Default for Reduced Redundancy Storage class is, parity = 2 and data = N-Parity
|
||||
// -- Default for Standard Storage class is, parity = N/2, data = N/2
|
||||
// If storage class is empty
|
||||
// -- standard storage class is assumed and corresponding data and parity is returned
|
||||
// If storage class is set using the env vars MINIO_STORAGE_CLASS_RRS and
|
||||
// MINIO_STORAGE_CLASS_STANDARD or server config fields corresponding values are
|
||||
// returned.
|
||||
//
|
||||
// -- if input storage class is empty then standard is assumed
|
||||
// -- if input is RRS but RRS is not configured default '2' parity
|
||||
// for RRS is assumed
|
||||
// -- if input is STANDARD but STANDARD is not configured '0' parity
|
||||
// is returned, the caller is expected to choose the right parity
|
||||
// at that point.
|
||||
func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
||||
switch strings.TrimSpace(sc) {
|
||||
case RRS:
|
||||
@@ -241,7 +246,7 @@ func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
||||
|
||||
// GetDMA - returns DMA configuration.
|
||||
func (sCfg Config) GetDMA() string {
|
||||
return sCfg.DMA.DMA
|
||||
return sCfg.DMA
|
||||
}
|
||||
|
||||
// Enabled returns if etcd is enabled.
|
||||
@@ -254,8 +259,6 @@ func Enabled(kvs config.KVS) bool {
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
|
||||
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
|
||||
return Config{}, err
|
||||
@@ -271,9 +274,6 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
return Config{}, err
|
||||
}
|
||||
}
|
||||
if cfg.Standard.Parity == 0 {
|
||||
cfg.Standard.Parity = setDriveCount / 2
|
||||
}
|
||||
|
||||
if rrsc != "" {
|
||||
cfg.RRS, err = parseStorageClass(rrsc)
|
||||
@@ -291,7 +291,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
if dma != DMAReadWrite && dma != DMAWrite {
|
||||
return Config{}, errors.New(`valid dma values are "read-write" and "write"`)
|
||||
}
|
||||
cfg.DMA.DMA = dma
|
||||
cfg.DMA = dma
|
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
|
||||
@@ -70,7 +70,7 @@ func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointServerPools EndpointServerP
|
||||
// HasLogListeners returns true if console log listeners are registered
|
||||
// for this node or peers
|
||||
func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
||||
return sys != nil && sys.pubsub.HasSubscribers()
|
||||
return sys != nil && sys.pubsub.NumSubscribers() > 0
|
||||
}
|
||||
|
||||
// Subscribe starts console logging for this node.
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -410,6 +411,9 @@ func NewKMS(cfg KMSConfig) (kms KMS, err error) {
|
||||
} else if cfg.Vault.Enabled && cfg.Kes.Enabled {
|
||||
return kms, errors.New("Ambiguous KMS configuration: vault configuration and kes configuration are provided at the same time")
|
||||
} else if cfg.Vault.Enabled {
|
||||
if v, ok := os.LookupEnv("MINIO_KMS_VAULT_DEPRECATION"); !ok || v != "off" { // TODO(aead): Remove once Vault support has been removed
|
||||
return kms, errors.New("Hashicorp Vault is deprecated and will be removed Oct. 2021. To temporarily enable Hashicorp Vault support, set MINIO_KMS_VAULT_DEPRECATION=off")
|
||||
}
|
||||
kms, err = NewVault(cfg.Vault)
|
||||
if err != nil {
|
||||
return kms, err
|
||||
|
||||