mirror of
https://github.com/minio/minio.git
synced 2026-02-05 02:10:14 -05:00
Compare commits
461 Commits
RELEASE.20
...
key-versio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01cb705c36 | ||
|
|
9ea14c88d8 | ||
|
|
30a1261c22 | ||
|
|
0e017ab071 | ||
|
|
f14198e3dc | ||
|
|
93c389dbc9 | ||
|
|
ddd9a84cd7 | ||
|
|
b7540169a2 | ||
|
|
f01374950f | ||
|
|
18aceae620 | ||
|
|
427826abc5 | ||
|
|
2780778c10 | ||
|
|
2d8ba15b9e | ||
|
|
bd6dd55e7f | ||
|
|
0d7408fc99 | ||
|
|
864f80e226 | ||
|
|
0379d6a37f | ||
|
|
43aa8e4259 | ||
|
|
e2ed696619 | ||
|
|
fb3f67a597 | ||
|
|
7ee75368e0 | ||
|
|
1d6478b8ae | ||
|
|
0581001b6f | ||
|
|
479303e7e9 | ||
|
|
89aec6804b | ||
|
|
eb33bc6bf5 | ||
|
|
3310f740f0 | ||
|
|
4595293ca0 | ||
|
|
02a67cbd2a | ||
|
|
2b34e5b9ae | ||
|
|
a6258668a6 | ||
|
|
d0cada583f | ||
|
|
0bd8f06b62 | ||
|
|
6640be3bed | ||
|
|
eafeb27e90 | ||
|
|
f2c9eb0f79 | ||
|
|
f2619d1f62 | ||
|
|
8c70975283 | ||
|
|
01447d2438 | ||
|
|
07f31e574c | ||
|
|
8d223e07fb | ||
|
|
4041a8727c | ||
|
|
5f243fde9a | ||
|
|
a0e3f1cc18 | ||
|
|
b1bc641105 | ||
|
|
e0c8738230 | ||
|
|
9aa24b1920 | ||
|
|
53d40e41bc | ||
|
|
e88d494775 | ||
|
|
b67f0cf721 | ||
|
|
46922c71b7 | ||
|
|
670edb4fcf | ||
|
|
42d4ab2a0a | ||
|
|
5e2eb372bf | ||
|
|
cccb37a5ac | ||
|
|
dbf31af6cb | ||
|
|
93e40c3ab4 | ||
|
|
8aa0e9ff7c | ||
|
|
bbd6f18afb | ||
|
|
2a3acc4f24 | ||
|
|
11507d46da | ||
|
|
f9c62dea55 | ||
|
|
8c2c92f7af | ||
|
|
4c71f1b4ec | ||
|
|
6cd8a372cb | ||
|
|
953a3e2bbd | ||
|
|
7cc0c69228 | ||
|
|
f129fd48f2 | ||
|
|
bc4008ced4 | ||
|
|
526053339b | ||
|
|
62a35b3e77 | ||
|
|
39df134204 | ||
|
|
ad4cbce22d | ||
|
|
90f5e1e5f6 | ||
|
|
aeabac9181 | ||
|
|
b312f13473 | ||
|
|
727a803bc0 | ||
|
|
d0e443172d | ||
|
|
60446e7ac0 | ||
|
|
b8544266e5 | ||
|
|
437dd4e32a | ||
|
|
447054b841 | ||
|
|
9bf43e54cd | ||
|
|
60f8423157 | ||
|
|
4355ea3c3f | ||
|
|
e30f1ad7bd | ||
|
|
f00c8c4cce | ||
|
|
703f51164d | ||
|
|
b8dde47d4e | ||
|
|
7fa3e39f85 | ||
|
|
4df7a3aa8f | ||
|
|
64a8f2e554 | ||
|
|
f4fd4ea66d | ||
|
|
712fe1a8df | ||
|
|
4a319bedc9 | ||
|
|
bdb3db6dad | ||
|
|
abb385af41 | ||
|
|
4ee62606e4 | ||
|
|
079d64c801 | ||
|
|
dcc000ae2c | ||
|
|
c5d19ecebb | ||
|
|
ed29a525b3 | ||
|
|
020c46cd3c | ||
|
|
827004cd6d | ||
|
|
779ec8f0d4 | ||
|
|
3d0f513ee2 | ||
|
|
4b6eadbd80 | ||
|
|
6f47414b23 | ||
|
|
224a27992a | ||
|
|
232544e1d8 | ||
|
|
dbcb71828d | ||
|
|
b9196757fd | ||
|
|
b4ac53d157 | ||
|
|
4952bdb770 | ||
|
|
00b2ef2932 | ||
|
|
4536ecfaa4 | ||
|
|
43a7402968 | ||
|
|
330dca9a35 | ||
|
|
ddd137d317 | ||
|
|
06ddd8770e | ||
|
|
16f8cf1c52 | ||
|
|
01e520eb23 | ||
|
|
02f770a0c0 | ||
|
|
2f4c79bc0f | ||
|
|
969ee7dfbe | ||
|
|
5f0b086b05 | ||
|
|
68b004a48f | ||
|
|
54ecce66f0 | ||
|
|
2b008c598b | ||
|
|
86d02b17cf | ||
|
|
c1a95a70ac | ||
|
|
f246c9053f | ||
|
|
9cdd204ae4 | ||
|
|
7b3eb9f7f8 | ||
|
|
d56ef8dbe1 | ||
|
|
a248ed5ff5 | ||
|
|
5bb31e4883 | ||
|
|
aff2a76d80 | ||
|
|
eddbe6bca2 | ||
|
|
734d1e320a | ||
|
|
b8dab7b1a9 | ||
|
|
abd6bf060d | ||
|
|
f0d4ef604c | ||
|
|
2712f75762 | ||
|
|
4c46668da8 | ||
|
|
02e93fd6ba | ||
|
|
366876e98b | ||
|
|
d202fdd022 | ||
|
|
c07e5b49d4 | ||
|
|
9a39f8ad4d | ||
|
|
7e0c1c9413 | ||
|
|
485d833cd7 | ||
|
|
e8a476ef5a | ||
|
|
267f0ecea2 | ||
|
|
4ee3434854 | ||
|
|
0e9854372e | ||
|
|
b5177993b3 | ||
|
|
55f5c18fd9 | ||
|
|
8ce101c174 | ||
|
|
4972735507 | ||
|
|
e6ca6de194 | ||
|
|
cefc43e4da | ||
|
|
25e34fda5f | ||
|
|
4208d7af5a | ||
|
|
8d42f37e4b | ||
|
|
7cb4b5c636 | ||
|
|
1615920f48 | ||
|
|
7ee42b3ff5 | ||
|
|
a6f1e727fb | ||
|
|
c1fc7779ca | ||
|
|
b3ab7546ee | ||
|
|
ad88a81e3d | ||
|
|
c4239ced22 | ||
|
|
f85c28e960 | ||
|
|
f7e176d4ca | ||
|
|
72a0d14195 | ||
|
|
6abe4128d7 | ||
|
|
ed5ed7e490 | ||
|
|
51410c9023 | ||
|
|
96ca402dcd | ||
|
|
3da7c9cce3 | ||
|
|
a14e19ec54 | ||
|
|
e091dde041 | ||
|
|
d10bb7e1b6 | ||
|
|
7ebceacac6 | ||
|
|
1593cb615d | ||
|
|
86a41d1631 | ||
|
|
d4157b819c | ||
|
|
e0aceca1b7 | ||
|
|
87804624fe | ||
|
|
e029f8a9d7 | ||
|
|
1bc6681176 | ||
|
|
28322124e2 | ||
|
|
cbfe9de3e7 | ||
|
|
dc86b8d9d4 | ||
|
|
ba70118e2b | ||
|
|
cb1d3e50f7 | ||
|
|
ded0b19d97 | ||
|
|
d0bb3dd136 | ||
|
|
ab7714b01e | ||
|
|
e5b18df6db | ||
|
|
0abfd1bcb1 | ||
|
|
6186d11761 | ||
|
|
e8b457e8a6 | ||
|
|
afea40cc0f | ||
|
|
402b798f1b | ||
|
|
4759532e90 | ||
|
|
7f1e1713ab | ||
|
|
b2c5819dbc | ||
|
|
2b0156b1fc | ||
|
|
f6f0807c86 | ||
|
|
0c53d86017 | ||
|
|
6a6ee46d76 | ||
|
|
974cbb3bb7 | ||
|
|
03e996320e | ||
|
|
78fcb76294 | ||
|
|
3d152015eb | ||
|
|
ade8925155 | ||
|
|
05a6c170bf | ||
|
|
e1c2344591 | ||
|
|
48a591e9b4 | ||
|
|
fa5d9c02ef | ||
|
|
5bd27346ac | ||
|
|
3c82cf9327 | ||
|
|
70d40083e9 | ||
|
|
8a30967542 | ||
|
|
229f04ab79 | ||
|
|
1123dc3676 | ||
|
|
5bf41aff17 | ||
|
|
e47d787adb | ||
|
|
398ffb1136 | ||
|
|
5862582cd7 | ||
|
|
e36b1146d6 | ||
|
|
c28a4beeb7 | ||
|
|
15ab0808b3 | ||
|
|
3bae73fb42 | ||
|
|
bc527eceda | ||
|
|
b963f36e1e | ||
|
|
cdd7512a2e | ||
|
|
a6d5287310 | ||
|
|
22822f4151 | ||
|
|
0b7aa6af87 | ||
|
|
8c9ab85cfa | ||
|
|
b1c849bedc | ||
|
|
fb24bcfee0 | ||
|
|
8268c12cfb | ||
|
|
3f39da48ea | ||
|
|
9d5cdaa2e3 | ||
|
|
84e122c5c3 | ||
|
|
261111e728 | ||
|
|
0f1e8db4c5 | ||
|
|
64e803b136 | ||
|
|
a0f9e9f661 | ||
|
|
b6b7cddc9c | ||
|
|
241be9709c | ||
|
|
85f08d7752 | ||
|
|
6be88a2b99 | ||
|
|
060276932d | ||
|
|
6224849fd1 | ||
|
|
9b79eec29e | ||
|
|
c2e318dd40 | ||
|
|
69258d5945 | ||
|
|
d7ef6315ae | ||
|
|
aaf4fb1184 | ||
|
|
f05641c3c6 | ||
|
|
7a34c88d73 | ||
|
|
6c746843ac | ||
|
|
bb07df7e7b | ||
|
|
1cb824039e | ||
|
|
504e52b45e | ||
|
|
38c0840834 | ||
|
|
c65e67c357 | ||
|
|
fb2360ff88 | ||
|
|
1a2de1bdde | ||
|
|
993b97f1db | ||
|
|
1c4d28d7af | ||
|
|
af55f37b27 | ||
|
|
2d67c26794 | ||
|
|
006cacfefb | ||
|
|
c28f09d4a7 | ||
|
|
73992d2b9f | ||
|
|
a8f143298f | ||
|
|
2d44c161c7 | ||
|
|
9511056f44 | ||
|
|
fb4ad000b6 | ||
|
|
a8ff12bc72 | ||
|
|
1e1bd3afd9 | ||
|
|
7b239ae154 | ||
|
|
8a11282522 | ||
|
|
85c3db3a93 | ||
|
|
37383ecd09 | ||
|
|
6378ca10a4 | ||
|
|
9e81ccd2d9 | ||
|
|
72cff79c8a | ||
|
|
a5702f978e | ||
|
|
4687c4616f | ||
|
|
d8dfb57d5c | ||
|
|
b07c58aa05 | ||
|
|
cc0c41d216 | ||
|
|
f1302c40fe | ||
|
|
3b1aa40372 | ||
|
|
d96798ae7b | ||
|
|
b508264ac4 | ||
|
|
db78431b1d | ||
|
|
743ddb196a | ||
|
|
3ffeabdfcb | ||
|
|
51b1f41518 | ||
|
|
e7a56f35b9 | ||
|
|
516af01a12 | ||
|
|
acdb355070 | ||
|
|
37c02a5f7b | ||
|
|
04be352ae9 | ||
|
|
53eb7656de | ||
|
|
d8f0e0ea6e | ||
|
|
2e0fd2cba9 | ||
|
|
909b169593 | ||
|
|
4e67a4027e | ||
|
|
49055658a9 | ||
|
|
89c58ce87d | ||
|
|
14876a4df1 | ||
|
|
2681219039 | ||
|
|
5da7f0100a | ||
|
|
dea9abed29 | ||
|
|
e3eb5c1328 | ||
|
|
fb9364f1fb | ||
|
|
6efb56851c | ||
|
|
db2c7ed1d1 | ||
|
|
74c047cb03 | ||
|
|
50a5ad48fc | ||
|
|
292fccff6e | ||
|
|
a9dc061d84 | ||
|
|
01a8c09920 | ||
|
|
4c8562bcec | ||
|
|
f13c04629b | ||
|
|
80ff907d08 | ||
|
|
673df6d517 | ||
|
|
2d40433bc1 | ||
|
|
3bc39db34e | ||
|
|
a17f14f73a | ||
|
|
6651c655cb | ||
|
|
3ae104edae | ||
|
|
c87a489514 | ||
|
|
a60267501d | ||
|
|
641a56da0d | ||
|
|
59788e25c7 | ||
|
|
a16193bb50 | ||
|
|
132e7413ba | ||
|
|
1966668066 | ||
|
|
064f36ca5a | ||
|
|
15b609ecea | ||
|
|
4a1edfd9aa | ||
|
|
b7f319b62a | ||
|
|
33c101544d | ||
|
|
21cf29330e | ||
|
|
3b21bb5be8 | ||
|
|
6fe2b3f901 | ||
|
|
b368d4cc13 | ||
|
|
0680af7414 | ||
|
|
91805bcab6 | ||
|
|
c0e2886e37 | ||
|
|
4f5dded4d4 | ||
|
|
b3a94c4e85 | ||
|
|
8e618d45fc | ||
|
|
3ef59d2821 | ||
|
|
23db4958f5 | ||
|
|
d9ee668b6d | ||
|
|
2e5d792f0c | ||
|
|
b276651eaa | ||
|
|
3535197f99 | ||
|
|
95f076340a | ||
|
|
698bb93a46 | ||
|
|
2584430141 | ||
|
|
ded373e600 | ||
|
|
e8c54c3d6c | ||
|
|
f944a42886 | ||
|
|
eff0ea43aa | ||
|
|
3b602bb532 | ||
|
|
459985f0fa | ||
|
|
d0080046c2 | ||
|
|
7fcb428622 | ||
|
|
4ea6f94ed8 | ||
|
|
83adc2eebf | ||
|
|
989c318a28 | ||
|
|
ef802f2b2c | ||
|
|
f5d2fbc84c | ||
|
|
e139673969 | ||
|
|
a8c6465f22 | ||
|
|
27538e2d22 | ||
|
|
6c6f0987dc | ||
|
|
5f64658faa | ||
|
|
ce183cb2b4 | ||
|
|
b3bac73c0f | ||
|
|
e726d8ff0f | ||
|
|
f4230777b3 | ||
|
|
380233d646 | ||
|
|
d592bc0c1c | ||
|
|
0d0b0aa599 | ||
|
|
b433bf14ba | ||
|
|
cf371da346 | ||
|
|
107d951893 | ||
|
|
22c53b1c70 | ||
|
|
88926ad8e9 | ||
|
|
32d04091a2 | ||
|
|
b6d4a77b94 | ||
|
|
be84a4fd68 | ||
|
|
2ec1f404ac | ||
|
|
2040559f71 | ||
|
|
ca0ce4c6ef | ||
|
|
757cf413cb | ||
|
|
b35acb3dbc | ||
|
|
e404abf103 | ||
|
|
f7ff19cb18 | ||
|
|
f736702da8 | ||
|
|
91faaa1387 | ||
|
|
68a9f521d5 | ||
|
|
f365a98029 | ||
|
|
47bbc272df | ||
|
|
aebac90013 | ||
|
|
7ca4ba77c4 | ||
|
|
13512170b5 | ||
|
|
154fcaeb56 | ||
|
|
722118386d | ||
|
|
709612cb37 | ||
|
|
b35d083872 | ||
|
|
5e7b243bde | ||
|
|
f8f9fc77ac | ||
|
|
499531f0b5 | ||
|
|
3c2141513f | ||
|
|
602f6a9ad0 | ||
|
|
22c5a5b91b | ||
|
|
41f508765d | ||
|
|
7dccd1f589 | ||
|
|
55ff598b23 | ||
|
|
a22ce4550c | ||
|
|
168ae81b1f | ||
|
|
5f6a25cdd0 | ||
|
|
be97ae4c5d | ||
|
|
4d7d008741 | ||
|
|
2d7a3d1516 | ||
|
|
dfab400d43 | ||
|
|
70078eab10 | ||
|
|
3415c4dd1e | ||
|
|
e200808ab7 | ||
|
|
fae563b85d | ||
|
|
3e6dc02f8f | ||
|
|
95e4cbbfde | ||
|
|
2825294b7b | ||
|
|
bce93b5cfa | ||
|
|
7a4b250c8b | ||
|
|
e5335450a4 | ||
|
|
a6ffdf1dd4 | ||
|
|
69e41f87ef | ||
|
|
ee48f9f206 | ||
|
|
9ba39d7fad | ||
|
|
d2fb371f80 | ||
|
|
2f9018f03b | ||
|
|
eb990f64a9 | ||
|
|
bbb64eaade | ||
|
|
7bd1d899bc | ||
|
|
c91d1ec2e3 | ||
|
|
c50b64027d |
2
.github/workflows/go-cross.yml
vendored
2
.github/workflows/go-cross.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/go-fips.yml
vendored
2
.github/workflows/go-fips.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/go-healing.yml
vendored
2
.github/workflows/go-healing.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
14
.github/workflows/go-lint.yml
vendored
14
.github/workflows/go-lint.yml
vendored
@@ -20,24 +20,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest, Windows]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'Windows'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
Set-MpPreference -DisableRealtimeMonitoring $true
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 120m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
39
.github/workflows/go-resiliency.yml
vendored
Normal file
39
.github/workflows/go-resiliency.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Resiliency Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-resiliency
|
||||
3
.github/workflows/go.yml
vendored
3
.github/workflows/go.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -39,3 +39,4 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make test-timeout
|
||||
|
||||
36
.github/workflows/iam-integrations.yaml
vendored
36
.github/workflows/iam-integrations.yaml
vendored
@@ -61,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -125,3 +125,37 @@ jobs:
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
iam-import-with-missing-entities:
|
||||
name: Test IAM import in new cluster with missing entities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster there are missing groups etc
|
||||
run: |
|
||||
make test-iam-import-with-missing-entities
|
||||
iam-import-with-openid:
|
||||
name: Test IAM import in new cluster with opendid configurations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster with openid configurations
|
||||
run: |
|
||||
make test-iam-import-with-openid
|
||||
|
||||
2
.github/workflows/mint.yml
vendored
2
.github/workflows/mint.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.24.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
|
||||
9
.github/workflows/replication.yaml
vendored
9
.github/workflows/replication.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -40,6 +40,7 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
make test-ilm-transition
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
@@ -70,3 +71,9 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-versioning
|
||||
|
||||
- name: Test Multipart upload with failures
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-multipart
|
||||
|
||||
2
.github/workflows/root-disable.yml
vendored
2
.github/workflows/root-disable.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
3
.github/workflows/run-mint.sh
vendored
3
.github/workflows/run-mint.sh
vendored
@@ -15,6 +15,9 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
## change working directory
|
||||
cd .github/workflows/mint
|
||||
|
||||
## always pull latest
|
||||
docker pull docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 1m
|
||||
|
||||
|
||||
2
.github/workflows/upgrade-ci-cd.yaml
vendored
2
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
4
.github/workflows/vulncheck.yml
vendored
4
.github/workflows/vulncheck.yml
vendored
@@ -21,8 +21,8 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.4
|
||||
check-latest: true
|
||||
go-version: 1.24.x
|
||||
cached: false
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
|
||||
@@ -1,36 +1,64 @@
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
simplify: true
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
staticcheck:
|
||||
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- durationcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gomodguard
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
- usetesting
|
||||
- whitespace
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -SA1008
|
||||
- -SA1019
|
||||
- -SA4000
|
||||
- -SA9004
|
||||
- -ST1000
|
||||
- -ST1005
|
||||
- -ST1016
|
||||
- -U1000
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- forcetypeassert
|
||||
path: _test\.go
|
||||
- path: (.+)\.go$
|
||||
text: 'empty-block:'
|
||||
- path: (.+)\.go$
|
||||
text: 'unused-parameter:'
|
||||
- path: (.+)\.go$
|
||||
text: 'dot-imports:'
|
||||
- path: (.+)\.go$
|
||||
text: should have a package comment
|
||||
- path: (.+)\.go$
|
||||
text: error strings should not be capitalized or end with punctuation or a newline
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- "empty-block:"
|
||||
- "unused-parameter:"
|
||||
- "dot-imports:"
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
max-issues-per-linter: 100
|
||||
max-same-issues: 100
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
15
.typos.toml
15
.typos.toml
@@ -1,8 +1,5 @@
|
||||
[files]
|
||||
extend-exclude = [
|
||||
".git/",
|
||||
"docs/",
|
||||
]
|
||||
extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"]
|
||||
ignore-hidden = false
|
||||
|
||||
[default]
|
||||
@@ -16,6 +13,8 @@ extend-ignore-re = [
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
"ERRO:",
|
||||
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
@@ -36,3 +35,11 @@ extend-ignore-re = [
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
|
||||
[type.go]
|
||||
extend-ignore-identifiers-re = [
|
||||
# Variants of `typ` used to mean `type` in golang as it is otherwise a
|
||||
# keyword - some of these (like typ1 -> type1) can be fixed, but probably
|
||||
# not worth the effort.
|
||||
"[tT]yp[0-9]*",
|
||||
]
|
||||
|
||||
@@ -12,8 +12,9 @@ Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to
|
||||
|
||||
```sh
|
||||
git clone https://github.com/minio/minio
|
||||
cd minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
ls $(go env GOPATH)/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY ./minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
@@ -1,35 +1,39 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
apk add -U --no-cache bash && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
COPY dockerscripts/download-static-curl.sh /build/download-static-curl
|
||||
RUN chmod +x /build/download-static-curl && \
|
||||
/build/download-static-curl
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
@@ -51,10 +55,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/curl* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
55
Makefile
55
Makefile
@@ -2,8 +2,8 @@ PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
REPO ?= quay.io/minio
|
||||
@@ -24,8 +24,6 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.10-0.20240227114326-6d6f813fff1b
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -34,11 +32,14 @@ verifiers: lint check-gen
|
||||
|
||||
check-gen: ## check for updated autogenerated files
|
||||
@go generate ./... >/dev/null
|
||||
@go mod tidy -compat=1.21
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
@(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false)
|
||||
|
||||
lint: getdeps ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@command typos && typos ./ || echo "typos binary is not found.. skipping.."
|
||||
|
||||
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@echo "Running $@ check"
|
||||
@@ -57,6 +58,10 @@ test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-ilm-transition: install-race
|
||||
@echo "Running ILM tiering tests with healing"
|
||||
@env bash $(PWD)/docs/bucket/lifecycle/setup_ilm_transition.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
@@ -86,14 +91,22 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities
|
||||
@echo "Test IAM import configurations with missing entities"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh
|
||||
|
||||
test-iam-import-with-openid: install-race
|
||||
@echo "Test IAM import configurations with openid"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-openid.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
|
||||
@@ -130,6 +143,14 @@ test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
test-multipart: install-race ## test multipart
|
||||
@echo "Test multipart behavior when part files are missing"
|
||||
@(env bash $(PWD)/buildscripts/multipart-quorum-test.sh)
|
||||
|
||||
test-timeout: install-race ## test multipart
|
||||
@echo "Test server timeout"
|
||||
@(env bash $(PWD)/buildscripts/test-timeout.sh)
|
||||
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
@@ -157,7 +178,7 @@ build-debugging:
|
||||
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
@@ -165,9 +186,9 @@ hotfix-vars:
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.9/pkger_2.2.9_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.2.9_linux_amd64.deb --yes
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@@ -177,11 +198,11 @@ hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@pkger -r $(VERSION) --ignore
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
@@ -194,6 +215,10 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
test-resiliency: build
|
||||
@echo "Running resiliency tests"
|
||||
@(DOCKER_COMPOSE_FILE=$(PWD)/docs/resiliency/docker-compose.yaml env bash $(PWD)/docs/resiliency/resiliency-tests.sh)
|
||||
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
|
||||
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
@@ -93,7 +93,6 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
@@ -123,7 +122,7 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
|
||||
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go install github.com/minio/minio@latest
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:1389`
|
||||
# `localhost:389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
@@ -41,7 +41,7 @@ __init__() {
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:1389
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
@@ -58,7 +58,7 @@ create_iam_content_in_old_minio() {
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_addr=localhost:389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
|
||||
@@ -69,8 +69,10 @@ __init__() {
|
||||
|
||||
## this is needed because github actions don't have
|
||||
## docker-compose on all runners
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
COMPOSE_VERSION=v2.35.1
|
||||
mkdir -p /tmp/gopath/bin/
|
||||
wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64
|
||||
chmod +x /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
|
||||
126
buildscripts/multipart-quorum-test.sh
Normal file
126
buildscripts/multipart-quorum-test.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function start_minio_10drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${PWD}/mc" mb --with-versioning minio/bucket
|
||||
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json
|
||||
uploadId=$(jq -r '.UploadId' upload-id.json)
|
||||
|
||||
truncate -s 5MiB file-5mib
|
||||
for i in {1..2}; do
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api upload-part \
|
||||
--upload-id "$uploadId" --bucket bucket --key obj-1 \
|
||||
--part-number "$i" --body ./file-5mib
|
||||
done
|
||||
for i in {1..6}; do
|
||||
find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete
|
||||
done
|
||||
cat <<EOF >parts.json
|
||||
{
|
||||
"Parts": [
|
||||
{
|
||||
"PartNumber": 1,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
},
|
||||
{
|
||||
"PartNumber": 2,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1)
|
||||
rv=$?
|
||||
if [ $rv -eq 0 ]; then
|
||||
echo "Failed to receive an error"
|
||||
exit 1
|
||||
fi
|
||||
echo "Received an error during complete-multipart as expected: $err"
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_minio_10drive ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
137
buildscripts/test-timeout.sh
Normal file
137
buildscripts/test-timeout.sh
Normal file
@@ -0,0 +1,137 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function gen_put_request() {
|
||||
hdr_sleep=$1
|
||||
body_sleep=$2
|
||||
|
||||
echo "PUT /testbucket/testobject HTTP/1.1"
|
||||
sleep $hdr_sleep
|
||||
echo "Host: foo-header"
|
||||
echo "User-Agent: curl/8.2.1"
|
||||
echo "Accept: */*"
|
||||
echo "Content-Length: 30"
|
||||
echo ""
|
||||
|
||||
sleep $body_sleep
|
||||
echo "random line 0"
|
||||
echo "random line 1"
|
||||
echo ""
|
||||
echo ""
|
||||
}
|
||||
|
||||
function send_put_object_request() {
|
||||
hdr_timeout=$1
|
||||
body_timeout=$2
|
||||
|
||||
start=$(date +%s)
|
||||
timeout 5m bash -c "gen_put_request $hdr_timeout $body_timeout | netcat 127.0.0.1 $start_port | read" || return -1
|
||||
[ $(($(date +%s) - start)) -gt $((srv_hdr_timeout + srv_idle_timeout + 1)) ] && return -1
|
||||
return 0
|
||||
}
|
||||
|
||||
function test_minio_with_timeout() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" --read-header-timeout ${srv_hdr_timeout}s --idle-timeout ${srv_idle_timeout}s "${WORK_DIR}/disk/" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 1
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
"${PWD}/mc" mb minio/testbucket
|
||||
"${PWD}/mc" anonymous set public minio/testbucket
|
||||
|
||||
# slow header writing
|
||||
send_put_object_request 20 0 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header write and slow bodywrite
|
||||
send_put_object_request 0 40 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header and body write
|
||||
send_put_object_request 1 1 || exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject || exit -1
|
||||
}
|
||||
|
||||
function main() {
|
||||
export start_port=$(shuf -i 10000-65000 -n 1)
|
||||
export srv_hdr_timeout=5
|
||||
export srv_idle_timeout=5
|
||||
export -f gen_put_request
|
||||
|
||||
test_minio_with_timeout ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -39,7 +39,7 @@ function start_minio_3_node() {
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
/tmp/mc ready myminio
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
@@ -62,48 +62,23 @@ function start_minio_3_node() {
|
||||
fi
|
||||
|
||||
# Failure
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-1 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-2 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio-server-3 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -115,8 +90,18 @@ function start_minio_3_node() {
|
||||
fi
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -47,43 +47,25 @@ function start_minio_3_node() {
|
||||
disown $pid3
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
/tmp/mc ready myminio
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 1 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 2 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 3 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -96,7 +78,7 @@ function start_minio_3_node() {
|
||||
}
|
||||
|
||||
function check_heal() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -118,6 +100,17 @@ function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
@@ -155,14 +148,7 @@ function perform_test() {
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
@@ -427,6 +428,21 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketPolicyConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketPolicyNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
@@ -796,11 +812,12 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
bucketMap[bucket].NotificationConfigXML = configData
|
||||
bucketMap[bucket].NotificationConfigUpdatedAt = updatedAt
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyTooLarge.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -818,7 +835,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyInvalidVersion.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -964,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
|
||||
@@ -1023,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if len(diffCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(diffCh) > 0 {
|
||||
@@ -1032,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -1082,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
if len(mrfCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(mrfCh) > 0 {
|
||||
@@ -1091,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@@ -216,6 +216,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
|
||||
@@ -125,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
@@ -416,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
|
||||
@@ -190,7 +190,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
//
|
||||
// PUT /minio/admin/v3/idp/ldap/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@@ -214,10 +214,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
var (
|
||||
targetGroups []string
|
||||
@@ -345,7 +342,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
@@ -479,3 +476,178 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dnList := r.Form["userDNs"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(dnList) == 0
|
||||
|
||||
if isAll && len(dnList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(dnList) == 1 {
|
||||
var dn string
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
|
||||
if err == nil {
|
||||
dn = foundResult.NormDN
|
||||
}
|
||||
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(dnList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
dnList = append(dnList, selfDN)
|
||||
}
|
||||
|
||||
var ldapUserList []string
|
||||
if isAll {
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range ldapUsers {
|
||||
ldapUserList = append(ldapUserList, user)
|
||||
}
|
||||
} else {
|
||||
for _, userDN := range dnList {
|
||||
// Validate the userDN
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundResult == nil {
|
||||
continue
|
||||
}
|
||||
ldapUserList = append(ldapUserList, foundResult.NormDN)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
|
||||
for _, internalDN := range ldapUserList {
|
||||
externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN)
|
||||
accessKeys := madmin.ListAccessKeysLDAPResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[externalDN] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
246
cmd/admin-handlers-idp-openid.go
Normal file
246
cmd/admin-handlers-idp-openid.go
Normal file
@@ -0,0 +1,246 @@
|
||||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const dummyRoleARN = "dummy-internal"
|
||||
|
||||
// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.OpenIDConfig.Enabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userList := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(userList) == 0
|
||||
cfgName := r.Form.Get("configName")
|
||||
allConfigs := r.Form.Get("allConfigs") == "true"
|
||||
if cfgName == "" && !allConfigs {
|
||||
cfgName = madmin.Default
|
||||
}
|
||||
|
||||
if isAll && len(userList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(userList) == 1 && userList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(userList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
userList = append(userList, selfDN)
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
s := globalServerConfig.Clone()
|
||||
roleArnMap := make(map[string]string)
|
||||
// Map of configs to a map of users to their access keys
|
||||
cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys)
|
||||
configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, config := range configs {
|
||||
if !allConfigs && cfgName != config.Name {
|
||||
continue
|
||||
}
|
||||
arn := dummyRoleARN
|
||||
if config.RoleARN != "" {
|
||||
arn = config.RoleARN
|
||||
}
|
||||
roleArnMap[arn] = config.Name
|
||||
newResp := make(map[string]madmin.OpenIDUserAccessKeys)
|
||||
cfgToUsersMap[config.Name] = newResp
|
||||
}
|
||||
if len(roleArnMap) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userSet := set.CreateStringSet(userList...)
|
||||
accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, accessKey := range accessKeys {
|
||||
// Filter out any disqualifying access keys
|
||||
_, ok := accessKey.Claims[subClaim]
|
||||
if !ok {
|
||||
continue // OpenID access keys must have a sub claim
|
||||
}
|
||||
if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) {
|
||||
continue // skip if not the type we want
|
||||
}
|
||||
arn, ok := accessKey.Claims[roleArnClaim].(string)
|
||||
if !ok {
|
||||
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
|
||||
continue // skip if no roleArn and no policy claim
|
||||
}
|
||||
}
|
||||
matchingCfgName, ok := roleArnMap[arn]
|
||||
if !ok {
|
||||
continue // skip if not part of the target config
|
||||
}
|
||||
var id string
|
||||
if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" {
|
||||
id, _ = accessKey.Claims[idClaim].(string)
|
||||
}
|
||||
if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) {
|
||||
continue // skip if not in the user list
|
||||
}
|
||||
openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser]
|
||||
|
||||
// Add new user to map if not already present
|
||||
if !ok {
|
||||
var readableClaim string
|
||||
if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" {
|
||||
readableClaim, _ = accessKey.Claims[rc].(string)
|
||||
}
|
||||
openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{
|
||||
MinioAccessKey: accessKey.ParentUser,
|
||||
ID: id,
|
||||
ReadableName: readableClaim,
|
||||
}
|
||||
}
|
||||
svcAccInfo := madmin.ServiceAccountInfo{
|
||||
AccessKey: accessKey.AccessKey,
|
||||
Expiration: &accessKey.Expiration,
|
||||
}
|
||||
if accessKey.IsServiceAccount() {
|
||||
openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo)
|
||||
} else {
|
||||
openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo)
|
||||
}
|
||||
cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys
|
||||
}
|
||||
|
||||
// Convert map to slice and sort
|
||||
resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap))
|
||||
for cfgName, usersMap := range cfgToUsersMap {
|
||||
users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap))
|
||||
for _, user := range usersMap {
|
||||
users = append(users, user)
|
||||
}
|
||||
sort.Slice(users, func(i, j int) bool {
|
||||
return users[i].MinioAccessKey < users[j].MinioAccessKey
|
||||
})
|
||||
resp = append(resp, madmin.ListAccessKeysOpenIDResp{
|
||||
ConfigName: cfgName,
|
||||
Users: users,
|
||||
})
|
||||
}
|
||||
sort.Slice(resp, func(i, j int) bool {
|
||||
return resp[i].ConfigName < resp[j].ConfigName
|
||||
})
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
@@ -258,8 +258,8 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
// concurrent rebalance-start commands.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -329,8 +329,8 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
// pools may temporarily have out of date info on the others.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -374,6 +374,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
globalNotificationSys.LoadRebalanceMeta(ctx, false)
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
@@ -382,8 +383,8 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
|
||||
return
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == host && !proxyEp.IsLocal {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,9 +120,12 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
|
||||
@@ -26,16 +26,18 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
@@ -64,6 +66,17 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// This API only supports removal of internal users not service accounts.
|
||||
ok, _, err = globalIAMSys.IsServiceAccount(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// When the user is root credential you are not allowed to
|
||||
// remove the root user. Also you cannot delete yourself.
|
||||
if accessKey == globalActiveCred.AccessKey || accessKey == cred.AccessKey {
|
||||
@@ -184,12 +197,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if name == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to view one's own info.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := name == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@@ -480,12 +488,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := accessKey == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@@ -636,7 +639,7 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, false)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@@ -676,10 +679,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
@@ -770,7 +770,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
@@ -894,7 +894,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
Status: opts.status,
|
||||
Name: opts.name,
|
||||
Description: opts.description,
|
||||
SessionPolicy: updateReq.NewPolicy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(updateReq.NewPolicy),
|
||||
Expiration: updateReq.NewExpiration,
|
||||
},
|
||||
},
|
||||
@@ -1166,6 +1166,172 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ListAccessKeysBulk - GET /minio/admin/v3/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
users := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(users) == 0
|
||||
|
||||
if isAll && len(users) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty user list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(users) == 1 {
|
||||
if users[0] == cred.AccessKey || users[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(users) == 0 {
|
||||
selfUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfUser = cred.ParentUser
|
||||
}
|
||||
users = append(users, selfUser)
|
||||
}
|
||||
|
||||
var checkedUserList []string
|
||||
if isAll {
|
||||
users, err := globalIAMSys.ListUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range users {
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
checkedUserList = append(checkedUserList, globalActiveCred.AccessKey)
|
||||
} else {
|
||||
for _, user := range users {
|
||||
// Validate the user
|
||||
_, ok := globalIAMSys.GetUser(ctx, user)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysResp)
|
||||
for _, user := range checkedUserList {
|
||||
accessKeys := madmin.ListAccessKeysResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[user] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage, permissions and other bucket metadata for incoming us
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -1235,18 +1401,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
},
|
||||
)
|
||||
|
||||
dataUsageInfo, _ := bucketStorageCache.Get()
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var err error
|
||||
var buckets []BucketInfo
|
||||
@@ -1287,7 +1441,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case accountName == globalActiveCred.AccessKey:
|
||||
case accountName == globalActiveCred.AccessKey || newGlobalAuthZPluginFn() != nil:
|
||||
// For owner account and when plugin authZ is configured always set
|
||||
// effective policy as `consoleAdmin`.
|
||||
//
|
||||
// In the latter case, we let the UI render everything, but individual
|
||||
// actions would fail if not permitted by the external authZ service.
|
||||
for _, policy := range policy.DefaultPolicies {
|
||||
if policy.Name == "consoleAdmin" {
|
||||
effectivePolicy = policy.Definition
|
||||
@@ -1315,8 +1474,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
effectivePolicy = globalIAMSys.GetCombinedPolicy(policies...)
|
||||
|
||||
}
|
||||
|
||||
buf, err = json.MarshalIndent(effectivePolicy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1333,15 +1492,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
// Fetch the data usage of the current bucket
|
||||
var size uint64
|
||||
var objectsCount uint64
|
||||
var objectsHist, versionsHist map[string]uint64
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
objectsCount = dataUsageInfo.BucketsUsage[bucket.Name].ObjectsCount
|
||||
objectsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectSizesHistogram
|
||||
versionsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectVersionsHistogram
|
||||
}
|
||||
bui := globalBucketQuotaSys.GetBucketUsageInfo(ctx, bucket.Name)
|
||||
size := bui.Size
|
||||
objectsCount := bui.ObjectsCount
|
||||
objectsHist := bui.ObjectSizesHistogram
|
||||
versionsHist := bui.ObjectVersionsHistogram
|
||||
|
||||
// Fetch the prefix usage of the current bucket
|
||||
var prefixUsage map[string]uint64
|
||||
if enablePrefixUsage {
|
||||
@@ -1411,6 +1567,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTooManyPolicies), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, name)
|
||||
|
||||
policyDoc, err := globalIAMSys.InfoPolicy(name)
|
||||
if err != nil {
|
||||
@@ -1514,6 +1671,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(ctx, policyName, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1546,6 +1704,13 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
// Reject policy names with commas.
|
||||
if strings.Contains(policyName, ",") {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrPolicyInvalidName), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
@@ -1611,6 +1776,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
policyName := vars["policyName"]
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
if !isGroup {
|
||||
ok, _, err := globalIAMSys.IsTempUser(entityName)
|
||||
@@ -1696,7 +1862,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
}))
|
||||
}
|
||||
|
||||
// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/polciy-entities?policy=xxx&user=xxx&group=xxx
|
||||
// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/policy-entities?policy=xxx&user=xxx&group=xxx
|
||||
func (a adminAPIHandlers) ListPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -1798,6 +1964,7 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, strings.Join(addedOrRemoved, ","))
|
||||
|
||||
respBody := madmin.PolicyAssociationResp{
|
||||
UpdatedAt: updatedAt,
|
||||
@@ -1823,6 +1990,227 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// RevokeTokens - POST /minio/admin/v3/revoke-tokens/{userProvider}
|
||||
func (a adminAPIHandlers) RevokeTokens(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userProvider := mux.Vars(r)["userProvider"]
|
||||
|
||||
user := r.Form.Get("user")
|
||||
tokenRevokeType := r.Form.Get("tokenRevokeType")
|
||||
fullRevoke := r.Form.Get("fullRevoke") == "true"
|
||||
isTokenSelfRevoke := user == ""
|
||||
if !isTokenSelfRevoke {
|
||||
var err error
|
||||
user, err = getUserWithProvider(ctx, userProvider, user, false)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if (user != "" && tokenRevokeType == "" && !fullRevoke) || (tokenRevokeType != "" && fullRevoke) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
adminPrivilege := globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.RemoveServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
})
|
||||
|
||||
if !adminPrivilege || isTokenSelfRevoke {
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
if !isTokenSelfRevoke && user != parentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
user = parentUser
|
||||
}
|
||||
|
||||
// Infer token revoke type from the request if requestor is STS.
|
||||
if isTokenSelfRevoke && tokenRevokeType == "" && !fullRevoke {
|
||||
if cred.IsTemp() {
|
||||
tokenRevokeType, _ = cred.Claims[tokenRevokeTypeClaim].(string)
|
||||
}
|
||||
if tokenRevokeType == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNoTokenRevokeType), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := globalIAMSys.RevokeTokens(ctx, user, tokenRevokeType)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// InfoAccessKey - GET /minio/admin/v3/info-access-key?access-key=<access-key>
|
||||
func (a adminAPIHandlers) InfoAccessKey(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := mux.Vars(r)["accessKey"]
|
||||
if accessKey == "" {
|
||||
accessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
u, ok := globalIAMSys.GetUser(ctx, accessKey)
|
||||
targetCred := u.Credentials
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// If requested user does not exist and requestor is not allowed to list service accounts, return access denied.
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
requestUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
requestUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if requestUser != targetCred.ParentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchAccessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
sessionPolicy *policy.Policy
|
||||
err error
|
||||
userType string
|
||||
)
|
||||
switch {
|
||||
case targetCred.IsTemp():
|
||||
userType = "STS"
|
||||
_, sessionPolicy, err = globalIAMSys.GetTemporaryAccount(ctx, accessKey)
|
||||
if err == errNoSuchTempAccount {
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
case targetCred.IsServiceAccount():
|
||||
userType = "Service Account"
|
||||
_, sessionPolicy, err = globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
if err == errNoSuchServiceAccount {
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
default:
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// if session policy is nil or empty, then it is implied policy
|
||||
impliedPolicy := sessionPolicy == nil || (sessionPolicy.Version == "" && len(sessionPolicy.Statements) == 0)
|
||||
|
||||
var svcAccountPolicy policy.Policy
|
||||
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(targetCred.ParentUser, targetCred.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var expiration *time.Time
|
||||
if !targetCred.Expiration.IsZero() && !targetCred.Expiration.Equal(timeSentinel) {
|
||||
expiration = &targetCred.Expiration
|
||||
}
|
||||
|
||||
userProvider := guessUserProvider(targetCred)
|
||||
|
||||
infoResp := madmin.InfoAccessKeyResp{
|
||||
AccessKey: accessKey,
|
||||
InfoServiceAccountResp: madmin.InfoServiceAccountResp{
|
||||
ParentUser: targetCred.ParentUser,
|
||||
Name: targetCred.Name,
|
||||
Description: targetCred.Description,
|
||||
AccountStatus: targetCred.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
Policy: string(policyJSON),
|
||||
Expiration: expiration,
|
||||
},
|
||||
|
||||
UserType: userType,
|
||||
UserProvider: userProvider,
|
||||
}
|
||||
|
||||
populateProviderInfoFromClaims(targetCred.Claims, userProvider, &infoResp)
|
||||
|
||||
data, err := json.Marshal(infoResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
const (
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
@@ -1852,6 +2240,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportIAMAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
// Initialize a zip writer which will provide a zipped content
|
||||
@@ -1988,7 +2377,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
SecretKey: acc.Credentials.SecretKey,
|
||||
Groups: acc.Credentials.Groups,
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
SessionPolicy: policyJSON,
|
||||
Status: acc.Credentials.Status,
|
||||
Name: sa.Name,
|
||||
Description: sa.Description,
|
||||
@@ -2062,19 +2451,25 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "")
|
||||
}
|
||||
|
||||
// ImportIAMV2 - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAMV2(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "v2")
|
||||
}
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiVer string) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
// Validate signature, permissions and get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ImportIAMAction)
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
@@ -2086,9 +2481,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var skipped, removed, added madmin.IAMEntities
|
||||
var failed madmin.IAMErrEntities
|
||||
|
||||
// import policies first
|
||||
{
|
||||
|
||||
f, err := zr.Open(pathJoin(iamAssetsDir, allPoliciesFile))
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
@@ -2111,8 +2509,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
for policyName, policy := range allPolicies {
|
||||
if policy.IsEmpty() {
|
||||
err = globalIAMSys.DeletePolicy(ctx, policyName, true)
|
||||
removed.Policies = append(removed.Policies, policyName)
|
||||
} else {
|
||||
_, err = globalIAMSys.SetPolicy(ctx, policyName, policy)
|
||||
added.Policies = append(added.Policies, policyName)
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allPoliciesFile, policyName), r.URL)
|
||||
@@ -2158,43 +2558,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
|
||||
// Incoming access key matches parent user then we should
|
||||
// reject password change requests.
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAddUserInvalidArgument, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if accessKey has beginning and end space characters, this only applies to new users.
|
||||
if !exists && hasSpaceBE(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, toAdminAPIErrCode(ctx, err), err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
failed.Users = append(failed.Users, madmin.IAMErrEntity{Name: accessKey, Error: err})
|
||||
} else {
|
||||
added.Users = append(added.Users, accessKey)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2230,8 +2604,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
failed.Groups = append(failed.Groups, madmin.IAMErrEntity{Name: group, Error: err})
|
||||
} else {
|
||||
added.Groups = append(added.Groups, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2260,7 +2635,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skippedAccessKeys, err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skipped.ServiceAccounts = append(skipped.ServiceAccounts, skippedAccessKeys...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
@@ -2268,6 +2644,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
if slices.Contains(skipped.ServiceAccounts, user) {
|
||||
continue
|
||||
}
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
if len(svcAcctReq.SessionPolicy) > 0 {
|
||||
@@ -2283,17 +2662,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
updateReq := true
|
||||
_, _, err = globalIAMSys.GetServiceAccount(ctx, svcAcctReq.AccessKey)
|
||||
if err != nil {
|
||||
@@ -2308,7 +2676,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// clean import.
|
||||
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
|
||||
if err != nil {
|
||||
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
delErr := fmt.Errorf("failed to delete existing service account (%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -2325,10 +2693,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
failed.ServiceAccounts = append(failed.ServiceAccounts, madmin.IAMErrEntity{Name: user, Error: err})
|
||||
} else {
|
||||
added.ServiceAccounts = append(added.ServiceAccounts, user)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2365,8 +2733,15 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, regUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, userPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.UserPolicies = append(
|
||||
failed.UserPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.UserPolicies = append(added.UserPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2396,7 +2771,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skipped.Groups = append(skipped.Groups, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
@@ -2404,9 +2780,19 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if slices.Contains(skipped.Groups, g) {
|
||||
continue
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
return
|
||||
failed.GroupPolicies = append(
|
||||
failed.GroupPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: g,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.GroupPolicies = append(added.GroupPolicies, map[string][]string{g: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2436,13 +2822,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skipped.Users = append(skipped.Users, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
if slices.Contains(skipped.Users, u) {
|
||||
continue
|
||||
}
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
@@ -2455,19 +2845,43 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.STSPolicies = append(
|
||||
failed.STSPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.STSPolicies = append(added.STSPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apiVer == "v2" {
|
||||
iamr := madmin.ImportIAMResult{
|
||||
Skipped: skipped,
|
||||
Removed: removed,
|
||||
Added: added,
|
||||
Failed: failed,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(iamr)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, b)
|
||||
}
|
||||
}
|
||||
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
dur := exp.Sub(time.Now())
|
||||
dur := time.Until(*exp)
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
@@ -2475,7 +2889,7 @@ func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
@@ -2542,6 +2956,14 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
denyOnly := (targetUser == cred.AccessKey || targetUser == cred.ParentUser)
|
||||
if ldap && !denyOnly {
|
||||
res, _ := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if res != nil && res.NormDN == cred.ParentUser {
|
||||
denyOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
@@ -2551,7 +2973,7 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
ConditionValues: condValues,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: (targetUser == cred.AccessKey || targetUser == cred.ParentUser),
|
||||
DenyOnly: denyOnly,
|
||||
}) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAccessDenied)
|
||||
}
|
||||
@@ -2568,3 +2990,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
|
||||
return ctx, cred, opts, createReq, targetUser, APIError{}
|
||||
}
|
||||
|
||||
// setReqInfoPolicyName will set the given policyName as a tag on the context's request info,
|
||||
// so that it appears in audit logs.
|
||||
func setReqInfoPolicyName(ctx context.Context, policyName string) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
reqInfo.SetTags("policyName", policyName)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -159,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
s.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
@@ -239,9 +240,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Associate policy and check that user can access
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
@@ -334,23 +338,34 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 2.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -436,7 +451,7 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::testbucket/*"
|
||||
"arn:aws:s3:::testbucket"
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -470,9 +485,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, "testbucket")
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
|
||||
@@ -490,10 +508,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
|
||||
// Detach policy1 to set up for policy2
|
||||
_, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to detach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy2},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.3 check user can create service account implicitly.
|
||||
@@ -538,16 +568,24 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -571,9 +609,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -645,16 +686,24 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
@@ -667,6 +716,12 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
c.Fatalf("policy info err: %v", err)
|
||||
}
|
||||
|
||||
// Check that policy with comma is rejected.
|
||||
err = s.adm.AddCannedPolicy(ctx, "invalid,policy", policyBytes)
|
||||
if err == nil {
|
||||
c.Fatalf("invalid policy created successfully")
|
||||
}
|
||||
|
||||
infoStr := string(info)
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
@@ -690,16 +745,24 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -726,9 +789,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: group,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -743,8 +809,9 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("group list err: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(groups...).Contains(group) {
|
||||
c.Fatalf("created group not present!")
|
||||
expected := []string{group}
|
||||
if !slices.Equal(groups, expected) {
|
||||
c.Fatalf("expected group listing: %v, got: %v", expected, groups)
|
||||
}
|
||||
groupInfo, err := s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
@@ -850,16 +917,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -871,9 +946,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -931,16 +1009,24 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -952,9 +1038,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -1010,16 +1099,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -1031,9 +1128,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
@@ -1564,7 +1664,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
@@ -49,6 +49,7 @@ import (
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/estream"
|
||||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
@@ -59,7 +60,6 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
@@ -93,11 +93,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() {
|
||||
if globalInplaceUpdateDisabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if currentReleaseTime.IsZero() || currentReleaseTime.Equal(timeSentinel) {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
apiErr.Description = fmt.Sprintf("unable to perform in-place update, release time is unrecognized: %s", currentReleaseTime)
|
||||
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
updateURL := vars["updateURL"]
|
||||
dryRun := r.Form.Get("dry-run") == "true"
|
||||
@@ -110,6 +117,11 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
u, err := url.Parse(updateURL)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -128,6 +140,39 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{
|
||||
DryRun: dryRun,
|
||||
Results: make([]madmin.ServerPeerUpdateStatus, 0, len(globalNotificationSys.allPeerClients)),
|
||||
}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus, len(globalNotificationSys.allPeerClients))
|
||||
failedClients := make(map[int]bool, len(globalNotificationSys.allPeerClients))
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) <= 0 {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
|
||||
for _, client := range globalNotificationSys.peerClients {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: client.String(),
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(updateStatus)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
return
|
||||
}
|
||||
|
||||
u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
|
||||
// Download Binary Once
|
||||
binC, bin, err := downloadBinary(u, mode)
|
||||
@@ -137,16 +182,6 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{DryRun: dryRun}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus)
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
failedClients := make(map[int]struct{})
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Push binary to other servers
|
||||
for idx, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) {
|
||||
@@ -156,7 +191,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
Err: nerr.Err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
failedClients[idx] = struct{}{}
|
||||
failedClients[idx] = true
|
||||
} else {
|
||||
peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
|
||||
Host: nerr.Host.String(),
|
||||
@@ -167,25 +202,17 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is already running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,8 +220,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
if globalIsDistErasure {
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
@@ -237,17 +263,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Notify all other MinIO peers signal service.
|
||||
startTime := time.Now().Add(restartUpdateDelay)
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
prs, ok := peerResults[client.String()]
|
||||
if ok && prs.CurrentVersion != prs.UpdatedVersion && prs.UpdatedVersion != "" {
|
||||
return client.SignalService(serviceRestart, "", dryRun)
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
return client.SignalService(serviceRestart, "", dryRun, &startTime)
|
||||
}
|
||||
return nil
|
||||
}, idx, *client.host)
|
||||
@@ -283,7 +310,9 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
if !dryRun {
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
prs, ok := peerResults[local]
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
globalServiceSignalCh <- serviceRestart
|
||||
}
|
||||
}
|
||||
@@ -542,9 +571,12 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
var objectAPI ObjectLayer
|
||||
var execAt *time.Time
|
||||
switch serviceSig {
|
||||
case serviceRestart:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction)
|
||||
t := time.Now().Add(restartUpdateDelay)
|
||||
execAt = &t
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
@@ -571,7 +603,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun) {
|
||||
for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun, execAt) {
|
||||
if nerr.Err != nil && process {
|
||||
waitingDrives := map[string]madmin.DiskMetrics{}
|
||||
jerr := json.Unmarshal([]byte(nerr.Err.Error()), &waitingDrives)
|
||||
@@ -797,7 +829,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// Flush before waiting for next...
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
@@ -844,9 +876,10 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry {
|
||||
t := time.Unix(0, l.Timestamp)
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Elapsed: now.Sub(l.Timestamp),
|
||||
Timestamp: t,
|
||||
Elapsed: now.Sub(t),
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
@@ -1031,7 +1064,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
// Start profiling on remote servers.
|
||||
var hostErrs []NotificationPeerErr
|
||||
for _, profiler := range profiles {
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(ctx, profiler)...)
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@@ -1112,7 +1145,11 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// Start profiling on remote servers.
|
||||
for _, profiler := range profiles {
|
||||
globalNotificationSys.StartProfiling(profiler)
|
||||
// Limit start time to max 10s.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
globalNotificationSys.StartProfiling(ctx, profiler)
|
||||
// StartProfiling blocks, so we can cancel now.
|
||||
cancel()
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@@ -1127,6 +1164,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Stop remote profiles
|
||||
go globalNotificationSys.DownloadProfilingData(GlobalContext, io.Discard)
|
||||
|
||||
// Stop local
|
||||
globalProfilerMu.Lock()
|
||||
defer globalProfilerMu.Unlock()
|
||||
for k, v := range globalProfiler {
|
||||
@@ -1279,7 +1320,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Analyze the heal token and route the request accordingly
|
||||
token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
|
||||
token, _, success := proxyRequestByToken(ctx, w, r, hip.clientToken, false)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
@@ -1318,7 +1359,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
case noError:
|
||||
@@ -1326,7 +1367,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write(hr.respBytes); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
} else {
|
||||
writeSuccessResponseJSON(w, hr.respBytes)
|
||||
}
|
||||
@@ -1353,7 +1394,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write(errorRespJSON); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
break forLoop
|
||||
}
|
||||
@@ -1366,7 +1407,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
|
||||
clientToken := nh.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", nh.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: clientToken,
|
||||
@@ -1570,7 +1611,6 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@@ -1799,7 +1839,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
@@ -1808,7 +1848,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
prevResult = result
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1917,7 +1957,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
@@ -1925,7 +1965,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2042,7 +2082,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
grid.PutByteBuffer(entry)
|
||||
if len(traceCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(traceCh) > 0 {
|
||||
@@ -2051,7 +2091,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -2143,7 +2183,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
grid.PutByteBuffer(log)
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(logCh) > 0 {
|
||||
@@ -2152,7 +2192,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -2182,7 +2222,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/status
|
||||
// KMSStatusHandler - GET /minio/admin/v3/kms/status
|
||||
func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -2326,6 +2366,7 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]
|
||||
}
|
||||
|
||||
func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage {
|
||||
const operationTimeout = 10 * time.Second
|
||||
ldap := madmin.LDAP{}
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect()
|
||||
@@ -2366,7 +2407,9 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx2, objectAPI)
|
||||
cancel()
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
@@ -2400,24 +2443,30 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
}
|
||||
|
||||
if pools {
|
||||
poolsInfo, _ = getPoolsInfo(ctx, allDisks)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
poolsInfo, _ = getPoolsInfo(ctx2, allDisks)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
KMSStatus: fetchKMSStatus(ctx),
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
Notifications: notifyTarget,
|
||||
}
|
||||
{
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
services.KMSStatus = fetchKMSStatus(ctx2)
|
||||
cancel()
|
||||
}
|
||||
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalSite.Region(),
|
||||
SQSARN: globalEventNotifier.GetARNList(false),
|
||||
SQSARN: globalEventNotifier.GetARNList(),
|
||||
DeploymentID: globalDeploymentID(),
|
||||
Buckets: buckets,
|
||||
Objects: objects,
|
||||
@@ -2627,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
// disk metrics are already included under drive info of each server
|
||||
getRealtimeMetrics := func() *madmin.RealtimeMetrics {
|
||||
var m madmin.RealtimeMetrics
|
||||
var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
types := madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
mLocal := collectLocalMetrics(types, collectMetricsOpts{})
|
||||
m.Merge(&mLocal)
|
||||
cctx, cancel := context.WithTimeout(healthCtx, time.Second/2)
|
||||
@@ -2671,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
|
||||
var anonPools []string
|
||||
|
||||
if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
|
||||
if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") {
|
||||
// No ellipses pattern. Anonymize host name from every pool arg
|
||||
pools := strings.Fields(poolsArgs)
|
||||
anonPools = make([]string, len(pools))
|
||||
@@ -2913,13 +2962,13 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
if len(healthInfoCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-ticker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-healthCtx.Done():
|
||||
return
|
||||
}
|
||||
@@ -3045,7 +3094,7 @@ func targetStatus(ctx context.Context, h logger.Target) madmin.Status {
|
||||
return madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
// fetchLoggerInfo return log info
|
||||
func fetchLoggerInfo(ctx context.Context) ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
@@ -3371,7 +3420,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
if volume != minioMetaBucket || file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
|
||||
@@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
@@ -463,6 +463,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
Owner: lri.Owner,
|
||||
ID: lri.UID,
|
||||
Quorum: lri.Quorum,
|
||||
Timestamp: time.Unix(0, lri.Timestamp),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -260,7 +260,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
@@ -331,7 +331,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
if h.clientToken == bgHealingUUID {
|
||||
@@ -761,6 +761,15 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
return nil
|
||||
}
|
||||
|
||||
countOKDrives := func(drives []madmin.HealDriveInfo) (count int) {
|
||||
for _, drive := range drives {
|
||||
if drive.State == madmin.DriveStateOk {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
@@ -781,6 +790,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if res.err != nil {
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks {
|
||||
if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks {
|
||||
res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got)
|
||||
}
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
@@ -792,18 +806,20 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer) error {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
if h.bucket == "" { // heal internal meta only during a site-wide heal
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
return h.healBuckets(objAPI)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -814,8 +830,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI)
|
||||
xioutil.SafeClose(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -826,6 +841,7 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
h.settings.Recursive = true
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
@@ -842,14 +858,14 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, false)
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
|
||||
@@ -863,7 +879,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -881,16 +897,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.object != "" {
|
||||
if err := h.healObject(bucket, h.object, "", h.settings.ScanMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
@@ -159,14 +159,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
|
||||
// Metrics operation
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Heal operations
|
||||
@@ -193,9 +193,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -244,6 +244,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// STS accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Access key (service account/STS) operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
// List policies latest
|
||||
@@ -290,6 +294,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Import IAM info
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
|
||||
|
||||
// IDentity Provider configuration APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
|
||||
@@ -301,12 +306,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// LDAP specific service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
|
||||
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
|
||||
|
||||
// OpenID specific service accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// -- END IAM APIs --
|
||||
|
||||
// GetBucketQuotaConfig
|
||||
@@ -340,6 +351,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
|
||||
adminMiddleware(adminAPI.ListBatchJobs))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.BatchJobStatus))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.DescribeBatchJob))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(
|
||||
@@ -416,6 +430,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
|
||||
|
||||
// STS Revocation
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens))
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
|
||||
@@ -32,6 +32,8 @@ type DeletedObject struct {
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
|
||||
// MinIO extensions to support delete marker replication
|
||||
ReplicationState ReplicationState `xml:"-"`
|
||||
|
||||
found bool // the object was found during deletion
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
@@ -42,10 +44,10 @@ type DeleteMarkerMTime struct {
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
return e.EncodeElement(t.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectV object version key/versionId
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
@@ -213,6 +213,10 @@ const (
|
||||
ErrPolicyAlreadyAttached
|
||||
ErrPolicyNotAttached
|
||||
ErrExcessData
|
||||
ErrPolicyInvalidName
|
||||
ErrNoTokenRevokeType
|
||||
ErrAdminOpenIDNotEnabled
|
||||
ErrAdminNoSuchAccessKey
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3/SSE-KMS related API errors
|
||||
@@ -561,6 +565,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "More data provided than indicated content length",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyInvalidName: {
|
||||
Code: "PolicyInvalidName",
|
||||
Description: "Policy name may not contain comma",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminOpenIDNotEnabled: {
|
||||
Code: "OpenIDNotEnabled",
|
||||
Description: "No enabled OpenID Connect identity providers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyTooLarge: {
|
||||
Code: "PolicyTooLarge",
|
||||
Description: "Policy exceeds the maximum allowed document size.",
|
||||
@@ -623,7 +637,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrMissingContentMD5: {
|
||||
Code: "MissingContentMD5",
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
Description: "Missing or invalid required header for this request: Content-Md5 or Amz-Content-Checksum",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
@@ -978,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
Description: "No matching ExistingObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetDenyAddError: {
|
||||
@@ -1258,6 +1272,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrNoTokenRevokeType: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "No token revoke type specified and one could not be inferred from the request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchAccessKey: {
|
||||
Code: "XMinioAdminNoSuchAccessKey",
|
||||
Description: "The specified access key does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@@ -1486,7 +1510,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
Description: "Please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
@@ -2155,6 +2179,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchUserLDAPWarn
|
||||
case errNoSuchServiceAccount:
|
||||
apiErr = ErrAdminServiceAccountNotFound
|
||||
case errNoSuchAccessKey:
|
||||
apiErr = ErrAdminNoSuchAccessKey
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
@@ -2248,6 +2274,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
case hash.ErrInvalidChecksum:
|
||||
apiErr = ErrInvalidChecksum
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -2549,11 +2577,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
}
|
||||
case azblob.StorageError:
|
||||
case *azcore.ResponseError:
|
||||
apiErr = APIError{
|
||||
Code: string(e.ServiceCode()),
|
||||
Code: e.ErrorCode,
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more other SDK related errors here if any in future.
|
||||
default:
|
||||
@@ -2592,7 +2620,7 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// getAPIErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
@@ -64,7 +63,7 @@ var toAPIErrorTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
|
||||
@@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) {
|
||||
e = char
|
||||
|
||||
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
|
||||
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
|
||||
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
|
||||
if !isAlnum {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,10 +166,11 @@ type Part struct {
|
||||
Size int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// ListPartsResponse - format for list parts response.
|
||||
@@ -192,6 +193,8 @@ type ListPartsResponse struct {
|
||||
IsTruncated bool
|
||||
|
||||
ChecksumAlgorithm string
|
||||
ChecksumType string
|
||||
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
@@ -413,10 +416,11 @@ type CompleteMultipartUploadResponse struct {
|
||||
Key string
|
||||
ETag string
|
||||
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// DeleteError structure.
|
||||
@@ -516,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
case crypto.SSEC:
|
||||
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
|
||||
}
|
||||
|
||||
var toRemove []string
|
||||
@@ -593,8 +596,6 @@ func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, v
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -729,7 +730,6 @@ func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, n
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -790,17 +790,18 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0, h)
|
||||
cs, _ := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ChecksumCRC64NVME: cs[hash.ChecksumCRC64NVME.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -828,6 +829,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
|
||||
listPartsResponse.ChecksumType = partsInfo.ChecksumType
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
@@ -840,6 +842,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.ChecksumCRC32C = part.ChecksumCRC32C
|
||||
newPart.ChecksumSHA1 = part.ChecksumSHA1
|
||||
newPart.ChecksumSHA256 = part.ChecksumSHA256
|
||||
newPart.ChecksumCRC64NVME = part.ChecksumCRC64NVME
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -946,10 +949,11 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
switch err.HTTPStatusCode {
|
||||
case http.StatusServiceUnavailable, http.StatusTooManyRequests:
|
||||
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "60")
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
|
||||
@@ -227,13 +227,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
}
|
||||
|
||||
// Skip wrapping with the gzip middleware if specified.
|
||||
var gzippedHandler http.HandlerFunc = tracedHandler
|
||||
gzippedHandler := tracedHandler
|
||||
if !handlerFlags.has(noGZS3HFlag) {
|
||||
gzippedHandler = gzipHandler(gzippedHandler)
|
||||
}
|
||||
|
||||
// Skip wrapping with throttling middleware if specified.
|
||||
var throttledHandler http.HandlerFunc = gzippedHandler
|
||||
throttledHandler := gzippedHandler
|
||||
if !handlerFlags.has(noThrottleS3HFlag) {
|
||||
throttledHandler = maxClients(throttledHandler)
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).
|
||||
@@ -456,6 +456,14 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// PutBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// DeleteBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
|
||||
@@ -472,6 +480,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
|
||||
Queries("logging", "")
|
||||
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
|
||||
@@ -615,7 +624,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -96,7 +96,7 @@ func isRequestSignStreamingTrailerV4(r *http.Request) bool {
|
||||
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
|
||||
func isRequestUnsignedTrailerV4(r *http.Request) bool {
|
||||
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
|
||||
r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding)
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
||||
// Authorization type.
|
||||
@@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned {
|
||||
|
||||
// Get credential information from the request.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
@@ -222,7 +221,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -244,7 +243,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Check if a session policy is set. If so, decode it here.
|
||||
@@ -263,12 +262,16 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwtClaims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
@@ -319,7 +322,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
@@ -360,7 +363,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
@@ -671,32 +674,6 @@ func setAuthMiddleware(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
@@ -751,8 +728,14 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
case authTypeStreamingUnsignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err == ErrMissingFields {
|
||||
// Could be anonymous. cred + owner is zero value.
|
||||
s3Err = ErrNone
|
||||
}
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
@@ -450,7 +450,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
|
||||
@@ -72,10 +72,12 @@ type healingTracker struct {
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeItemsSkipped uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeBytesSkipped uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
@@ -88,6 +90,11 @@ type healingTracker struct {
|
||||
|
||||
ItemsSkipped uint64
|
||||
BytesSkipped uint64
|
||||
|
||||
RetryAttempts uint64
|
||||
|
||||
Finished bool // finished healing, whether with errors or not
|
||||
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@@ -141,6 +148,26 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *healingTracker) resetHealing() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.ItemsHealed = 0
|
||||
h.ItemsFailed = 0
|
||||
h.BytesDone = 0
|
||||
h.BytesFailed = 0
|
||||
h.ResumeItemsHealed = 0
|
||||
h.ResumeItemsFailed = 0
|
||||
h.ResumeBytesDone = 0
|
||||
h.ResumeBytesFailed = 0
|
||||
h.ItemsSkipped = 0
|
||||
h.BytesSkipped = 0
|
||||
|
||||
h.HealedBuckets = nil
|
||||
h.Object = ""
|
||||
h.Bucket = ""
|
||||
}
|
||||
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
@@ -196,9 +223,6 @@ func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
h.mu.Lock()
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@@ -260,8 +284,10 @@ func (h *healingTracker) resume() {
|
||||
|
||||
h.ItemsHealed = h.ResumeItemsHealed
|
||||
h.ItemsFailed = h.ResumeItemsFailed
|
||||
h.ItemsSkipped = h.ResumeItemsSkipped
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
h.BytesSkipped = h.ResumeBytesSkipped
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
@@ -272,8 +298,10 @@ func (h *healingTracker) bucketDone(bucket string) {
|
||||
|
||||
h.ResumeItemsHealed = h.ItemsHealed
|
||||
h.ResumeItemsFailed = h.ItemsFailed
|
||||
h.ResumeItemsSkipped = h.ItemsSkipped
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.ResumeBytesSkipped = h.BytesSkipped
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
@@ -322,6 +350,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Finished: h.Finished,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
@@ -337,6 +366,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
RetryAttempts: h.RetryAttempts,
|
||||
|
||||
ObjectsHealed: h.ItemsHealed, // Deprecated July 2021
|
||||
ObjectsFailed: h.ItemsFailed, // Deprecated July 2021
|
||||
@@ -351,24 +381,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
|
||||
go globalMRFState.startMRFPersistence()
|
||||
go globalMRFState.healRoutine(z)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
for _, disk := range localDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
if h := disk.Healing(); h != nil && !h.Finished {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -382,6 +414,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
|
||||
disk := getStorageViaEndpoint(endpoint)
|
||||
@@ -389,6 +423,17 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
|
||||
}
|
||||
|
||||
_, err := disk.DiskInfo(ctx, DiskInfoOptions{})
|
||||
if err != nil {
|
||||
if errors.Is(err, errDriveIsRoot) {
|
||||
// This is a root drive, ignore and move on
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errUnformattedDisk) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
|
||||
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
|
||||
@@ -451,8 +496,30 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return err
|
||||
}
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
|
||||
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
|
||||
tracker.RetryAttempts++
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
tracker.resetHealing()
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
return errRetryHealing
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
} else {
|
||||
if tracker.RetryAttempts > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
} else {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
}
|
||||
}
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
fmt.Printf("\n")
|
||||
@@ -482,7 +549,8 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
continue
|
||||
}
|
||||
if t.HealID == tracker.HealID {
|
||||
t.delete(ctx)
|
||||
t.Finished = true
|
||||
t.update(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -524,7 +592,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
if err := healFreshDisk(ctx, z, disk); err != nil {
|
||||
globalBackgroundHealState.setDiskHealingStatus(disk, false)
|
||||
timedout := OperationTimedOut{}
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
|
||||
printEndpointError(disk, err, false)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -132,6 +132,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
@@ -144,6 +150,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
@@ -200,6 +212,18 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -213,9 +237,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -394,6 +418,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeItemsSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeItemsSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
@@ -414,6 +448,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
@@ -478,15 +522,35 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "RetryAttempts"
|
||||
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.RetryAttempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "Finished"
|
||||
err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.Finished)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@@ -539,12 +603,18 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ResumeItemsFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsFailed)
|
||||
// string "ResumeItemsSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsSkipped)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "ResumeBytesSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesSkipped)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
@@ -566,6 +636,12 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BytesSkipped"
|
||||
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesSkipped)
|
||||
// string "RetryAttempts"
|
||||
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
o = msgp.AppendUint64(o, z.RetryAttempts)
|
||||
// string "Finished"
|
||||
o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.Finished)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -695,6 +771,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
@@ -707,6 +789,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
@@ -763,6 +851,18 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -777,7 +877,7 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
@@ -785,6 +885,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -116,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error {
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
line, col int
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
@@ -162,7 +163,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -194,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
|
||||
for _, kv := range ef.Metadata {
|
||||
// Object (version) must match all x-amz-meta and
|
||||
@@ -280,7 +281,7 @@ type BatchJobExpire struct {
|
||||
line, col int
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
@@ -288,6 +289,16 @@ type BatchJobExpire struct {
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpire{}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (r *BatchJobExpire) RedactSensitive() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.NotificationCfg.Token != "" {
|
||||
r.NotificationCfg.Token = redactedText
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
|
||||
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
|
||||
type expireJob BatchJobExpire
|
||||
@@ -340,8 +351,24 @@ func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versio
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
|
||||
allErrs := make([]error, 0, len(objsToDel))
|
||||
|
||||
for {
|
||||
count := len(objsToDel)
|
||||
if count == 0 {
|
||||
break
|
||||
}
|
||||
if count > maxDeleteList {
|
||||
count = maxDeleteList
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts)
|
||||
allErrs = append(allErrs, errs...)
|
||||
// Next batch of deletion
|
||||
objsToDel = objsToDel[count:]
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -371,9 +398,12 @@ func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
retryAttempts := job.Expire.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchExpireJobDefaultRetries
|
||||
}
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -394,12 +424,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
go func(toExpire []expireObjInfo) {
|
||||
defer wk.Give()
|
||||
|
||||
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
|
||||
toExpireAll := make([]expireObjInfo, 0, len(toExpire))
|
||||
toDel := make([]ObjectToDelete, 0, len(toExpire))
|
||||
oiCache := newObjInfoCache()
|
||||
for _, exp := range toExpire {
|
||||
if exp.ExpireAll {
|
||||
toExpireAll = append(toExpireAll, exp.ObjectInfo)
|
||||
toExpireAll = append(toExpireAll, exp)
|
||||
continue
|
||||
}
|
||||
// Cache ObjectInfo value via pointers for
|
||||
@@ -415,14 +445,14 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
oiCache.Add(od, &exp.ObjectInfo)
|
||||
}
|
||||
|
||||
var done bool
|
||||
// DeleteObject(deletePrefix: true) to expire all versions of an object
|
||||
for _, exp := range toExpireAll {
|
||||
var success bool
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
return
|
||||
default:
|
||||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
@@ -439,14 +469,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
break
|
||||
}
|
||||
}
|
||||
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
}
|
||||
|
||||
// DeleteMultiple objects
|
||||
@@ -504,7 +527,8 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
|
||||
type expireObjInfo struct {
|
||||
ObjectInfo
|
||||
ExpireAll bool
|
||||
ExpireAll bool
|
||||
DeleteMarkerCount int64
|
||||
}
|
||||
|
||||
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
|
||||
@@ -514,7 +538,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -534,40 +558,58 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
defer cancelCause(nil)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
prefixes := r.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
})
|
||||
if err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(results)
|
||||
return
|
||||
}
|
||||
for result := range prefixResultCh {
|
||||
results <- result
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(results)
|
||||
}()
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
quit := false
|
||||
after := time.Minute
|
||||
for !quit {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
quit = true
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
quit = true
|
||||
}
|
||||
|
||||
if quit {
|
||||
// save immediately if we are quitting
|
||||
after = 0
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -583,76 +625,115 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
matchedFilter BatchJobExpireFilter
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
failed bool
|
||||
done bool
|
||||
)
|
||||
failed := true
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
deleteMarkerCountMap := map[string]int64{}
|
||||
pushToExpire := func() {
|
||||
// set preObject deleteMarkerCount
|
||||
if len(toDel) > 0 {
|
||||
lastDelIndex := len(toDel) - 1
|
||||
lastDel := toDel[lastDelIndex]
|
||||
if lastDel.ExpireAll {
|
||||
toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name]
|
||||
// delete the key
|
||||
delete(deleteMarkerCountMap, lastDel.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
|
||||
var done bool
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
select {
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case result, ok := <-results:
|
||||
if !ok {
|
||||
done = true
|
||||
break
|
||||
}
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
if result.Item.DeleteMarker {
|
||||
deleteMarkerCountMap[result.Item.Name]++
|
||||
}
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
if prevObj.Name != result.Item.Name {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
// a file switched with no LatestVersion, logging it
|
||||
batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
pushToExpire()
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if context.Cause(ctx) != nil {
|
||||
xioutil.SafeClose(expireCh)
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
pushToExpire()
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -114,7 +114,7 @@ func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -171,7 +171,11 @@ func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
@@ -230,7 +234,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -280,7 +284,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
@@ -306,7 +310,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -433,7 +437,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -544,7 +548,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
@@ -613,7 +621,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -734,7 +742,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
s = 1 + 10 + z.OlderThan.Msgsize() + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
|
||||
@@ -18,9 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
@@ -32,7 +33,7 @@ expire: # Expire objects that match a condition
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
@@ -64,8 +65,61 @@ expire: # Expire objects that match a condition
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
err := yaml.Unmarshal([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
|
||||
multiPrefixExpireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
- myprefix
|
||||
- myprefix1
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,89 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteArrayHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0003 := range z {
|
||||
err = en.WriteString(z[zb0003])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
||||
for zb0003 := range z {
|
||||
o = msgp.AppendString(o, z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobPrefix) Msgsize() (s int) {
|
||||
s = msgp.ArrayHeaderSize
|
||||
for zb0003 := range z {
|
||||
s += msgp.StringPrefixSize + len(z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
|
||||
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobPrefix{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
|
||||
v := BatchJobRequest{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
75
cmd/batch-handlers_test.go
Normal file
75
cmd/batch-handlers_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) {
|
||||
type args struct {
|
||||
yamlStr string
|
||||
}
|
||||
type PrefixTemp struct {
|
||||
Prefix BatchJobPrefix `yaml:"prefix"`
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
b PrefixTemp
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "test1",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix: "foo"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix:
|
||||
- "foo"
|
||||
- "bar"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo", "bar"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr {
|
||||
t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !slices.Equal(tt.b.Prefix.F(), tt.want) {
|
||||
t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
@@ -65,12 +65,12 @@ import (
|
||||
|
||||
// BatchReplicateFilter holds all the filters currently supported for batch replication
|
||||
type BatchReplicateFilter struct {
|
||||
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
}
|
||||
|
||||
// BatchJobReplicateFlags various configurations for replication job definition currently includes
|
||||
@@ -151,7 +151,7 @@ func (t BatchJobReplicateTarget) ValidPath() bool {
|
||||
type BatchJobReplicateSource struct {
|
||||
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
|
||||
@@ -411,7 +411,7 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -514,7 +514,7 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -600,7 +600,11 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
@@ -664,7 +668,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -742,7 +746,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1409,13 +1413,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, err = dc.ReadDuration()
|
||||
err = z.NewerThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1489,7 +1493,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.NewerThan)
|
||||
err = z.NewerThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
@@ -1499,7 +1503,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1567,10 +1571,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 6
|
||||
// string "NewerThan"
|
||||
o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.NewerThan)
|
||||
o, err = z.NewerThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
// string "OlderThan"
|
||||
o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedAfter"
|
||||
o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72)
|
||||
o = msgp.AppendTime(o, z.CreatedAfter)
|
||||
@@ -1619,13 +1631,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.NewerThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1694,7 +1706,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchReplicateFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
|
||||
182
cmd/batch-replicate_test.go
Normal file
182
cmd/batch-replicate_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobReplicate(t *testing.T) {
|
||||
replicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: object-prefix1 # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.Unmarshal([]byte(replicateYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Replicate.Source.Prefix.F(), []string{"object-prefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
multiPrefixReplicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: # 'PREFIX' is optional
|
||||
- object-prefix1
|
||||
- object-prefix2
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixReplicateYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Replicate.Source.Prefix.F(), []string{"object-prefix1", "object-prefix2"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml")
|
||||
}
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
@@ -267,8 +267,12 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.KeyRotate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchKeyRotateJobDefaultRetries
|
||||
}
|
||||
delay := job.KeyRotate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchKeyRotateJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -354,7 +358,6 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
@@ -389,6 +392,17 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
stopFn(result, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
if attempts >= retryAttempts {
|
||||
auditOptions := AuditLogOptions{
|
||||
Event: "KeyRotate",
|
||||
APIName: "StartBatchJob",
|
||||
Bucket: result.Bucket,
|
||||
Object: result.Name,
|
||||
VersionID: result.VersionID,
|
||||
Error: err.Error(),
|
||||
}
|
||||
auditLogInternal(ctx, auditOptions)
|
||||
}
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex := getMD5Hash(textPartData)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -37,12 +38,22 @@ type streamingBitrotWriter struct {
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
finished bool
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.finished {
|
||||
return 0, errors.New("bitrot write not allowed")
|
||||
}
|
||||
if int64(len(p)) > b.shardSize {
|
||||
return 0, errors.New("unexpected bitrot buffer size")
|
||||
}
|
||||
if int64(len(p)) < b.shardSize {
|
||||
b.finished = true
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
@@ -141,13 +152,7 @@ func (b *streamingBitrotReader) Close() error {
|
||||
}
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
// drain the body for connection reuse at network layer.
|
||||
xhttp.DrainBody(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: b.rc,
|
||||
Closer: closeWrapper(func() error { return nil }),
|
||||
})
|
||||
xhttp.DrainBody(io.NopCloser(b.rc))
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -128,14 +128,20 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
}
|
||||
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
func closeBitrotWriters(ws []io.Writer) []error {
|
||||
errs := make([]error, len(ws))
|
||||
for i, w := range ws {
|
||||
if w == nil {
|
||||
errs[i] = errDiskNotFound
|
||||
continue
|
||||
}
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
errs[i] = bw.Close()
|
||||
} else {
|
||||
errs[i] = nil
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
@@ -178,7 +184,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
return errFileCorrupt
|
||||
}
|
||||
|
||||
bufp := xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||
bufp := xioutil.ODirectPoolSmall.Get()
|
||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||
|
||||
for left > 0 {
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
disk.MakeVol(t.Context(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
|
||||
traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit)
|
||||
|
||||
bs.mu.RLock()
|
||||
for _, i := range bs.info {
|
||||
traceInfo = append(traceInfo, i)
|
||||
}
|
||||
traceInfo = append(traceInfo, bs.info...)
|
||||
bs.mu.RUnlock()
|
||||
|
||||
return traceInfo
|
||||
|
||||
@@ -19,9 +19,13 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -43,10 +47,15 @@ type ServerSystemConfig struct {
|
||||
NEndpoints int
|
||||
CmdLines []string
|
||||
MinioEnv map[string]string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
if s1.Checksum != s2.Checksum {
|
||||
return fmt.Errorf("Expected MinIO binary checksum: %s, seen: %s", s1.Checksum, s2.Checksum)
|
||||
}
|
||||
|
||||
ns1 := s1.NEndpoints
|
||||
ns2 := s2.NEndpoints
|
||||
if ns1 != ns2 {
|
||||
@@ -82,7 +91,7 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
}
|
||||
msg := "Expected same MINIO_ environment variables and values across all servers: "
|
||||
msg := "Expected MINIO_* environment name and values across all servers to be same: "
|
||||
if len(missing) > 0 {
|
||||
msg += fmt.Sprintf(`Missing environment values: %v. `, missing)
|
||||
}
|
||||
@@ -97,14 +106,17 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPERATOR_VERSION": {},
|
||||
"MINIO_VSPHERE_PLUGIN_VERSION": {},
|
||||
"MINIO_CI_CD": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() *ServerSystemConfig {
|
||||
@@ -120,7 +132,7 @@ func getServerSystemCfg() *ServerSystemConfig {
|
||||
}
|
||||
envValues[envK] = logger.HashString(env.Get(envK, ""))
|
||||
}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues, Checksum: binaryChecksum}
|
||||
var cmdLines []string
|
||||
for _, ep := range globalEndpoints {
|
||||
cmdLines = append(cmdLines, ep.CmdLine)
|
||||
@@ -167,6 +179,26 @@ func (client *bootstrapRESTClient) String() string {
|
||||
return client.gridConn.String()
|
||||
}
|
||||
|
||||
var binaryChecksum = getBinaryChecksum()
|
||||
|
||||
func getBinaryChecksum() string {
|
||||
mw := md5.New()
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
b, err := os.Open(binPath)
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
return hex.EncodeToString(mw.Sum(nil))
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools, gm *grid.Manager) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointServerPools, gm)
|
||||
@@ -196,7 +228,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
err := clnt.Verify(ctx, srcCfg)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
|
||||
bootstrapTraceMsg(fmt.Sprintf("bootstrapVerify: %v, endpoint: %s", err, clnt))
|
||||
if !isNetworkError(err) {
|
||||
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
|
||||
|
||||
@@ -79,6 +79,12 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -92,9 +98,9 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// write "NEndpoints"
|
||||
err = en.Append(0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
err = en.Append(0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -142,15 +148,25 @@ func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Checksum"
|
||||
err = en.Append(0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// string "NEndpoints"
|
||||
o = append(o, 0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = append(o, 0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = msgp.AppendInt(o, z.NEndpoints)
|
||||
// string "CmdLines"
|
||||
o = append(o, 0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73)
|
||||
@@ -165,6 +181,9 @@ func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o = msgp.AppendString(o, za0003)
|
||||
}
|
||||
// string "Checksum"
|
||||
o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
o = msgp.AppendString(o, z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -241,6 +260,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -266,5 +291,6 @@ func (z *ServerSystemConfig) Msgsize() (s int) {
|
||||
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
|
||||
}
|
||||
}
|
||||
s += 9 + msgp.StringPrefixSize + len(z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
@@ -51,9 +52,9 @@ import (
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
@@ -92,7 +93,7 @@ const (
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
//
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -113,10 +114,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
bucketsSet.Add(bucket)
|
||||
r, ok := dnsBuckets[bucket]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
@@ -135,7 +136,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -144,7 +145,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
bucketsInConflict.Add(bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,11 +344,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(bucketsInfo, func(i, j int) bool {
|
||||
return bucketsInfo[i].Name < bucketsInfo[j].Name
|
||||
})
|
||||
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
@@ -428,7 +427,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Content-Md5 is required should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -560,7 +559,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
object.VersionPurgeStatus = replication.VersionPurgePending
|
||||
object.VersionPurgeStatuses = dsc.PendingStatus()
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = dsc.PendingStatus()
|
||||
@@ -670,9 +669,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == replication.VersionPurgePending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
if isDirObject(dobj.ObjectName) && dobj.VersionID == "" {
|
||||
@@ -842,7 +839,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
}
|
||||
apiErr := ErrBucketAlreadyExists
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
|
||||
@@ -890,6 +886,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
})
|
||||
}
|
||||
|
||||
// multipartReader is just like https://pkg.go.dev/net/http#Request.MultipartReader but
|
||||
// rejects multipart/mixed as its not supported in S3 API.
|
||||
func multipartReader(r *http.Request) (*multipart.Reader, error) {
|
||||
v := r.Header.Get("Content-Type")
|
||||
if v == "" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if r.Body == nil {
|
||||
return nil, errors.New("missing form body")
|
||||
}
|
||||
d, params, err := mime.ParseMediaType(v)
|
||||
if err != nil {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if d != "multipart/form-data" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
boundary, ok := params["boundary"]
|
||||
if !ok {
|
||||
return nil, http.ErrMissingBoundary
|
||||
}
|
||||
return multipart.NewReader(r.Body, boundary), nil
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
@@ -923,9 +943,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
mp, err := r.MultipartReader()
|
||||
mp, err := multipartReader(r)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
@@ -937,7 +962,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
fileSize int64 = -1
|
||||
actualSize int64 = -1
|
||||
fileName string
|
||||
fanOutEntries = make([]minio.PutObjectFanOutEntry, 0, 100)
|
||||
)
|
||||
@@ -945,6 +970,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
maxParts := 1000
|
||||
// Canonicalize the form values into http.Header.
|
||||
formValues := make(http.Header)
|
||||
var headerLen int64
|
||||
for {
|
||||
part, err := mp.NextRawPart()
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -986,7 +1012,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
headerLen += int64(len(name)) + int64(len(fileName))
|
||||
if name != "file" {
|
||||
if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") {
|
||||
dec := json.NewDecoder(part)
|
||||
@@ -997,7 +1023,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
part.Close()
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1007,8 +1033,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
buf := bytebufferpool.Get()
|
||||
// value, store as string in memory
|
||||
n, err := io.CopyN(&b, part, maxMemoryBytes+1)
|
||||
n, err := io.CopyN(buf, part, maxMemoryBytes+1)
|
||||
value := buf.String()
|
||||
buf.Reset()
|
||||
bytebufferpool.Put(buf)
|
||||
part.Close()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
@@ -1030,7 +1060,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String())
|
||||
headerLen += n
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], value)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1039,6 +1070,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// The file or text content must be the last field in the form.
|
||||
// You cannot upload more than one file at a time.
|
||||
reader = part
|
||||
|
||||
possibleShardSize := (r.ContentLength - headerLen)
|
||||
if globalStorageClass.ShouldInline(possibleShardSize, false) { // keep versioned false for this check
|
||||
var b bytes.Buffer
|
||||
n, err := io.Copy(&b, reader)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
reader = &b
|
||||
actualSize = n
|
||||
}
|
||||
|
||||
// we have found the File part of the request we are done processing multipart-form
|
||||
break
|
||||
}
|
||||
@@ -1140,11 +1186,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
|
||||
clientETag, err := etag.FromContentMD5(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var forceMD5 []byte
|
||||
// Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also
|
||||
// for server that is started with `--no-compat`.
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
if !etag.ContentMD5Requested(formValues) && (kind == crypto.SSEC || kind == crypto.S3KMS || !globalServerCtxt.StrictS3Compat) {
|
||||
forceMD5 = mustGetUUIDBytes()
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
|
||||
Size: actualSize,
|
||||
MD5Hex: clientETag.String(),
|
||||
SHA256Hex: "",
|
||||
ActualSize: actualSize,
|
||||
DisableMD5: false,
|
||||
ForceMD5: forceMD5,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if checksum != nil && checksum.Valid() {
|
||||
if err = hashReader.AddChecksumNoTrailer(formValues, false); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1201,9 +1269,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
opts.WantChecksum = checksum
|
||||
|
||||
fanOutOpts := fanOutOptions{Checksum: checksum}
|
||||
|
||||
if crypto.Requested(formValues) {
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
@@ -1248,8 +1316,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if actualSize >= 0 {
|
||||
info := ObjectInfo{Size: actualSize}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1)
|
||||
hashReader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1260,6 +1335,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1329,7 +1405,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Key: objInfo.Name,
|
||||
Error: errs[i].Error(),
|
||||
})
|
||||
|
||||
eventArgsList = append(eventArgsList, eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
@@ -1342,22 +1417,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires.UTC().Format(http.TimeFormat),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
fanOutResp = append(fanOutResp, minio.PutObjectFanOutResponse{
|
||||
Key: objInfo.Name,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
@@ -1452,22 +1511,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
w.Header().Set(xhttp.Location, obj)
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: etag,
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.ExpiresStr(),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
@@ -1715,7 +1758,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
globalReplicationPool.Get().deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
@@ -1764,6 +1807,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
// Audit log tags.
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
reqInfo.SetTags("retention", config.String())
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
||||
@@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
if errorResponse.Code != testCase.errorResponse.Code {
|
||||
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
@@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
if recV2.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
|
||||
@@ -17,12 +17,16 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/minio/internal/bucket/lifecycle"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
)
|
||||
|
||||
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
|
||||
type lcEventSrc uint8
|
||||
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
@@ -43,7 +47,7 @@ type lcAuditEvent struct {
|
||||
source lcEventSrc
|
||||
}
|
||||
|
||||
func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
func (lae lcAuditEvent) Tags() map[string]string {
|
||||
event := lae.Event
|
||||
src := lae.source
|
||||
const (
|
||||
@@ -55,7 +59,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions"
|
||||
ilmNoncurrentDays = "ilm-noncurrent-days"
|
||||
)
|
||||
tags := make(map[string]interface{}, 5)
|
||||
tags := make(map[string]string, 5)
|
||||
if src > lcEventSrc_None {
|
||||
tags[ilmSrc] = src.String()
|
||||
}
|
||||
@@ -63,7 +67,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
tags[ilmRuleID] = event.RuleID
|
||||
|
||||
if !event.Due.IsZero() {
|
||||
tags[ilmDue] = event.Due
|
||||
tags[ilmDue] = event.Due.Format(iso8601Format)
|
||||
}
|
||||
|
||||
// rule with Transition/NoncurrentVersionTransition in effect
|
||||
@@ -73,10 +77,10 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
|
||||
// rule with NewernoncurrentVersions in effect
|
||||
if event.NewerNoncurrentVersions > 0 {
|
||||
tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions
|
||||
tags[ilmNewerNoncurrentVersions] = strconv.Itoa(event.NewerNoncurrentVersions)
|
||||
}
|
||||
if event.NoncurrentDays > 0 {
|
||||
tags[ilmNoncurrentDays] = event.NoncurrentDays
|
||||
tags[ilmNoncurrentDays] = strconv.Itoa(event.NoncurrentDays)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -53,7 +52,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -70,7 +69,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(io.LimitReader(r.Body, r.ContentLength))
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -71,8 +71,12 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo {
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string, metadata map[string]string, err string) madmin.TraceInfo {
|
||||
sz, _ := oi.GetActualSize()
|
||||
if metadata == nil {
|
||||
metadata = make(map[string]string)
|
||||
}
|
||||
metadata["version-id"] = oi.VersionID
|
||||
return madmin.TraceInfo{
|
||||
TraceType: madmin.TraceILM,
|
||||
Time: startTime,
|
||||
@@ -81,18 +85,22 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event
|
||||
Duration: duration,
|
||||
Path: pathJoin(oi.Bucket, oi.Name),
|
||||
Bytes: sz,
|
||||
Error: "",
|
||||
Error: err,
|
||||
Message: getSource(4),
|
||||
Custom: map[string]string{"version-id": oi.VersionID},
|
||||
Custom: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string) {
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string, metadata map[string]string, err error) {
|
||||
startTime := time.Now()
|
||||
return func(event string) {
|
||||
return func(event string, metadata map[string]string, err error) {
|
||||
duration := time.Since(startTime)
|
||||
if globalTrace.NumSubscribers(madmin.TraceILM) > 0 {
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event))
|
||||
e := ""
|
||||
if err != nil {
|
||||
e = err.Error()
|
||||
}
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event, metadata, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,8 +155,8 @@ func (f freeVersionTask) OpHash() uint64 {
|
||||
return xxh3.HashString(f.TransitionedObject.Tier + f.TransitionedObject.Name)
|
||||
}
|
||||
|
||||
func (n newerNoncurrentTask) OpHash() uint64 {
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName)
|
||||
func (n noncurrentVersionsTask) OpHash() uint64 {
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectName)
|
||||
}
|
||||
|
||||
func (j jentry) OpHash() uint64 {
|
||||
@@ -232,14 +240,16 @@ func (es *expiryState) enqueueByDays(oi ObjectInfo, event lifecycle.Event, src l
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueByNewerNoncurrent enqueues object versions expired by
|
||||
// NewerNoncurrentVersions limit for expiry.
|
||||
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete, lcEvent lifecycle.Event) {
|
||||
func (es *expiryState) enqueueNoncurrentVersions(bucket string, versions []ObjectToDelete, events []lifecycle.Event) {
|
||||
if len(versions) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
task := newerNoncurrentTask{bucket: bucket, versions: versions, event: lcEvent}
|
||||
task := noncurrentVersionsTask{
|
||||
bucket: bucket,
|
||||
versions: versions,
|
||||
events: events,
|
||||
}
|
||||
wrkr := es.getWorkerCh(task.OpHash())
|
||||
if wrkr == nil {
|
||||
es.stats.missedExpiryTasks.Add(1)
|
||||
@@ -339,8 +349,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
} else {
|
||||
applyExpiryOnNonTransitionedObjects(es.ctx, es.objAPI, v.objInfo, v.event, v.src)
|
||||
}
|
||||
case newerNoncurrentTask:
|
||||
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
|
||||
case noncurrentVersionsTask:
|
||||
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.events)
|
||||
case jentry:
|
||||
transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
|
||||
case freeVersionTask:
|
||||
@@ -348,7 +358,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
traceFn := globalLifecycleSys.trace(oi)
|
||||
if !oi.TransitionedObject.FreeVersion {
|
||||
// nothing to be done
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
ignoreNotFoundErr := func(err error) error {
|
||||
@@ -362,7 +372,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
transitionLogIf(es.ctx, err)
|
||||
return
|
||||
traceFn(ILMFreeVersionDelete, nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove this free version
|
||||
@@ -387,12 +398,10 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState(ctx, objectAPI, globalILMConfig.getExpirationWorkers())
|
||||
}
|
||||
|
||||
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
|
||||
// by NewerNoncurrentVersions
|
||||
type newerNoncurrentTask struct {
|
||||
type noncurrentVersionsTask struct {
|
||||
bucket string
|
||||
versions []ObjectToDelete
|
||||
event lifecycle.Event
|
||||
events []lifecycle.Event
|
||||
}
|
||||
|
||||
type transitionTask struct {
|
||||
@@ -707,6 +716,11 @@ type auditTierOp struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (op auditTierOp) String() string {
|
||||
// flattening the auditTierOp{} for audit
|
||||
return fmt.Sprintf("tier:%s,respNS:%d,tx:%d,err:%s", op.Tier, op.TimeToResponseNS, op.OutputBytes, op.Error)
|
||||
}
|
||||
|
||||
func auditTierActions(ctx context.Context, tier string, bytes int64) func(err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
@@ -730,7 +744,7 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
|
||||
globalTierMetrics.logFailure(tier)
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op)
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -998,7 +1012,7 @@ func ongoingRestoreObj() restoreObjStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// completeRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
// completedRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
return restoreObjStatus{
|
||||
ongoing: false,
|
||||
@@ -1094,17 +1108,20 @@ func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) {
|
||||
// ToLifecycleOpts returns lifecycle.ObjectOpts value for oi.
|
||||
func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
|
||||
return lifecycle.ObjectOpts{
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
Size: oi.Size,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
Size: oi.Size,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
UserDefined: oi.UserDefined,
|
||||
VersionPurgeStatus: oi.VersionPurgeStatus,
|
||||
ReplicationStatus: oi.ReplicationStatus,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,26 +243,26 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
return subToken, nodeIndex
|
||||
}
|
||||
|
||||
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) {
|
||||
subToken, nodeIndex := parseRequestToken(token)
|
||||
if nodeIndex > 0 {
|
||||
return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex)
|
||||
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string, returnErr bool) (subToken string, proxied bool, success bool) {
|
||||
var nodeIndex int
|
||||
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
|
||||
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
|
||||
}
|
||||
return subToken, false
|
||||
return
|
||||
}
|
||||
|
||||
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) {
|
||||
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return false
|
||||
return
|
||||
}
|
||||
if index < 0 || index >= len(globalProxyEndpoints) {
|
||||
return false
|
||||
return
|
||||
}
|
||||
ep := globalProxyEndpoints[index]
|
||||
if ep.IsLocal {
|
||||
return false
|
||||
return
|
||||
}
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
return true, proxyRequest(ctx, w, r, ep, returnErr)
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
@@ -47,6 +48,7 @@ type BucketMetadataSys struct {
|
||||
|
||||
sync.RWMutex
|
||||
initialized bool
|
||||
group *singleflight.Group
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
@@ -62,6 +64,7 @@ func (sys *BucketMetadataSys) Count() int {
|
||||
func (sys *BucketMetadataSys) Remove(buckets ...string) {
|
||||
sys.Lock()
|
||||
for _, bucket := range buckets {
|
||||
sys.group.Forget(bucket)
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
}
|
||||
@@ -121,6 +124,7 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
meta.PolicyConfigUpdatedAt = updatedAt
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
meta.NotificationConfigUpdatedAt = updatedAt
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
meta.LifecycleConfigUpdatedAt = updatedAt
|
||||
@@ -150,12 +154,13 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
if err != nil {
|
||||
return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
meta.BucketTargetsConfigUpdatedAt = updatedAt
|
||||
meta.BucketTargetsConfigMetaUpdatedAt = updatedAt
|
||||
default:
|
||||
return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
err = sys.save(ctx, meta)
|
||||
return updatedAt, err
|
||||
return updatedAt, sys.save(ctx, meta)
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) error {
|
||||
@@ -262,6 +267,21 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketPolicy returns configured bucket policy
|
||||
func (sys *BucketMetadataSys) GetBucketPolicy(bucket string) (*policy.BucketPolicy, time.Time, error) {
|
||||
meta, _, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
@@ -392,9 +412,7 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
if reloaded {
|
||||
globalBucketTargetSys.set(BucketInfo{
|
||||
Name: bucket,
|
||||
}, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
|
||||
}
|
||||
@@ -413,9 +431,7 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if reloaded {
|
||||
globalBucketTargetSys.set(BucketInfo{
|
||||
Name: bucket,
|
||||
}, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
@@ -455,13 +471,20 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
if ok {
|
||||
return meta, reloaded, nil
|
||||
}
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
|
||||
|
||||
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), errBucketMetadataNotInitialized
|
||||
}
|
||||
}
|
||||
return meta, reloaded, err
|
||||
return meta, err
|
||||
})
|
||||
meta, _ = val.(BucketMetadata)
|
||||
if err != nil {
|
||||
return meta, false, err
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
@@ -471,7 +494,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
}
|
||||
|
||||
// Init - initializes bucket metadata system for all buckets.
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []string, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -484,7 +507,7 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []string) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
@@ -494,8 +517,8 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
// herd upon start up sequence.
|
||||
time.Sleep(25*time.Millisecond + time.Duration(rand.Int63n(int64(100*time.Millisecond))))
|
||||
|
||||
_, _ = sys.objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true})
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index].Name)
|
||||
_, _ = sys.objAPI.HealBucket(ctx, buckets[index], madmin.HealOpts{Recreate: true})
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -508,7 +531,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
for index, err := range errs {
|
||||
if err != nil {
|
||||
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
|
||||
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
|
||||
"load-bucket-metadata-"+buckets[index], logger.WarningKind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,16 +542,12 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
sys.metadataMap[buckets[i]] = meta
|
||||
}
|
||||
sys.Unlock()
|
||||
|
||||
for i, meta := range bucketMetas {
|
||||
if errs[i] != nil {
|
||||
if failedBuckets == nil {
|
||||
failedBuckets = make(map[string]struct{})
|
||||
}
|
||||
failedBuckets[buckets[i].Name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
globalEventNotifier.set(buckets[i], meta) // set notification targets
|
||||
@@ -536,7 +555,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
const bucketMetadataRefresh = 15 * time.Minute
|
||||
|
||||
sleeper := newDynamicSleeper(2, 150*time.Millisecond, false)
|
||||
@@ -548,7 +567,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{NoMetadata: true})
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
break
|
||||
@@ -566,7 +585,10 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
for i := range buckets {
|
||||
wait := sleeper.Timer(ctx)
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
|
||||
bucket := buckets[i].Name
|
||||
updated := false
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket)
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
wait() // wait to proceed to next entry.
|
||||
@@ -574,14 +596,16 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
// Update if the bucket metadata in the memory is older than on-disk one
|
||||
if lu := sys.metadataMap[bucket].lastUpdate(); lu.Before(meta.lastUpdate()) {
|
||||
updated = true
|
||||
sys.metadataMap[bucket] = meta
|
||||
}
|
||||
sys.Unlock()
|
||||
|
||||
// Initialize the failed buckets
|
||||
if _, ok := failedBuckets[buckets[i].Name]; ok {
|
||||
globalEventNotifier.set(buckets[i], meta)
|
||||
globalBucketTargetSys.set(buckets[i], meta)
|
||||
delete(failedBuckets, buckets[i].Name)
|
||||
if updated {
|
||||
globalEventNotifier.set(bucket, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
@@ -600,15 +624,14 @@ func (sys *BucketMetadataSys) Initialized() bool {
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
failedBuckets := make(map[string]struct{})
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) {
|
||||
count := globalEndpoints.ESCount() * 10
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
sys.concurrentLoad(ctx, buckets, failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets)
|
||||
break
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count], failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets[:count])
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
@@ -617,16 +640,14 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
sys.Unlock()
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
go sys.refreshBucketsMetadataLoop(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the state of the BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) Reset() {
|
||||
sys.Lock()
|
||||
for k := range sys.metadataMap {
|
||||
delete(sys.metadataMap, k)
|
||||
}
|
||||
clear(sys.metadataMap)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
@@ -634,5 +655,6 @@ func (sys *BucketMetadataSys) Reset() {
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
metadataMap: make(map[string]BucketMetadata),
|
||||
group: &singleflight.Group{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,14 +81,19 @@ type BucketMetadata struct {
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
LifecycleConfigUpdatedAt time.Time
|
||||
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
LifecycleConfigUpdatedAt time.Time
|
||||
NotificationConfigUpdatedAt time.Time
|
||||
BucketTargetsConfigUpdatedAt time.Time
|
||||
BucketTargetsConfigMetaUpdatedAt time.Time
|
||||
// Add a new UpdatedAt field and update lastUpdate function
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.BucketPolicy
|
||||
@@ -120,6 +125,46 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
// Return the last update of this bucket metadata, which
|
||||
// means, the last update of any policy document.
|
||||
func (b BucketMetadata) lastUpdate() (t time.Time) {
|
||||
if b.PolicyConfigUpdatedAt.After(t) {
|
||||
t = b.PolicyConfigUpdatedAt
|
||||
}
|
||||
if b.ObjectLockConfigUpdatedAt.After(t) {
|
||||
t = b.ObjectLockConfigUpdatedAt
|
||||
}
|
||||
if b.EncryptionConfigUpdatedAt.After(t) {
|
||||
t = b.EncryptionConfigUpdatedAt
|
||||
}
|
||||
if b.TaggingConfigUpdatedAt.After(t) {
|
||||
t = b.TaggingConfigUpdatedAt
|
||||
}
|
||||
if b.QuotaConfigUpdatedAt.After(t) {
|
||||
t = b.QuotaConfigUpdatedAt
|
||||
}
|
||||
if b.ReplicationConfigUpdatedAt.After(t) {
|
||||
t = b.ReplicationConfigUpdatedAt
|
||||
}
|
||||
if b.VersioningConfigUpdatedAt.After(t) {
|
||||
t = b.VersioningConfigUpdatedAt
|
||||
}
|
||||
if b.LifecycleConfigUpdatedAt.After(t) {
|
||||
t = b.LifecycleConfigUpdatedAt
|
||||
}
|
||||
if b.NotificationConfigUpdatedAt.After(t) {
|
||||
t = b.NotificationConfigUpdatedAt
|
||||
}
|
||||
if b.BucketTargetsConfigUpdatedAt.After(t) {
|
||||
t = b.BucketTargetsConfigUpdatedAt
|
||||
}
|
||||
if b.BucketTargetsConfigMetaUpdatedAt.After(t) {
|
||||
t = b.BucketTargetsConfigMetaUpdatedAt
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Versioning returns true if versioning is enabled
|
||||
func (b BucketMetadata) Versioning() bool {
|
||||
return b.LockEnabled || (b.versioningConfig != nil && b.versioningConfig.Enabled()) || (b.objectLockConfig != nil && b.objectLockConfig.Enabled())
|
||||
@@ -440,6 +485,18 @@ func (b *BucketMetadata) defaultTimestamps() {
|
||||
if b.LifecycleConfigUpdatedAt.IsZero() {
|
||||
b.LifecycleConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.NotificationConfigUpdatedAt.IsZero() {
|
||||
b.NotificationConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.BucketTargetsConfigUpdatedAt.IsZero() {
|
||||
b.BucketTargetsConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.BucketTargetsConfigMetaUpdatedAt.IsZero() {
|
||||
b.BucketTargetsConfigMetaUpdatedAt = b.Created
|
||||
}
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
@@ -498,7 +555,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
|
||||
crypto.S3.CreateMetadata(metadata, key, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
if err != nil {
|
||||
return output, metabytes, err
|
||||
|
||||
@@ -156,6 +156,24 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "NotificationConfigUpdatedAt":
|
||||
z.NotificationConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigUpdatedAt":
|
||||
z.BucketTargetsConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaUpdatedAt":
|
||||
z.BucketTargetsConfigMetaUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -169,9 +187,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 22
|
||||
// map header, size 25
|
||||
// write "Name"
|
||||
err = en.Append(0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -390,15 +408,45 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "NotificationConfigUpdatedAt"
|
||||
err = en.Append(0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.NotificationConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigUpdatedAt"
|
||||
err = en.Append(0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.BucketTargetsConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "BucketTargetsConfigMetaUpdatedAt"
|
||||
err = en.Append(0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.BucketTargetsConfigMetaUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 22
|
||||
// map header, size 25
|
||||
// string "Name"
|
||||
o = append(o, 0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -463,6 +511,15 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "LifecycleConfigUpdatedAt"
|
||||
o = append(o, 0xb8, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.LifecycleConfigUpdatedAt)
|
||||
// string "NotificationConfigUpdatedAt"
|
||||
o = append(o, 0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.NotificationConfigUpdatedAt)
|
||||
// string "BucketTargetsConfigUpdatedAt"
|
||||
o = append(o, 0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.BucketTargetsConfigUpdatedAt)
|
||||
// string "BucketTargetsConfigMetaUpdatedAt"
|
||||
o = append(o, 0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.BucketTargetsConfigMetaUpdatedAt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -616,6 +673,24 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "LifecycleConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "NotificationConfigUpdatedAt":
|
||||
z.NotificationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigUpdatedAt":
|
||||
z.BucketTargetsConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "BucketTargetsConfigMetaUpdatedAt":
|
||||
z.BucketTargetsConfigMetaUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -630,6 +705,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize + 28 + msgp.TimeSize + 29 + msgp.TimeSize + 34 + msgp.TimeSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
||||
if legalHoldRequested {
|
||||
var lerr error
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
|
||||
if err != nil && !(replica && rMode == "" && rDate.IsZero()) {
|
||||
if err != nil && (!replica || rMode != "" || !rDate.IsZero()) {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
if retentionPermErr != ErrNone {
|
||||
|
||||
@@ -50,6 +50,9 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
if objAPI == nil {
|
||||
return DataUsageInfo{}, errServerNotInitialized
|
||||
}
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
@@ -59,7 +62,9 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) (BucketUsageInfo, error) {
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) BucketUsageInfo {
|
||||
sys.Init(newObjectLayerFn())
|
||||
|
||||
dui, err := bucketStorageCache.GetWithCtx(ctx)
|
||||
timedout := OperationTimedOut{}
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
|
||||
@@ -73,10 +78,10 @@ func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string
|
||||
if len(dui.BucketsUsage) > 0 {
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if ok {
|
||||
return bui, nil
|
||||
return bui
|
||||
}
|
||||
}
|
||||
return BucketUsageInfo{}, nil
|
||||
return BucketUsageInfo{}
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
@@ -118,11 +123,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
bui, err := sys.GetBucketUsageInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bui := sys.GetBucketUsageInfo(ctx, bucket)
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= quotaSize) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true)
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, &validateReplicationDestinationOptions{CheckRemoteBucket: true})
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
@@ -230,7 +230,7 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
stats := globalReplicationStats.getLatestReplicationStats(bucket)
|
||||
stats := globalReplicationStats.Load().getLatestReplicationStats(bucket)
|
||||
bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket)
|
||||
bwMap := bwRpt.BucketStats
|
||||
for arn, st := range stats.ReplicationStats.Stats {
|
||||
@@ -286,7 +286,7 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsV2Handler(w http.Respons
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
stats := globalReplicationStats.getLatestReplicationStats(bucket)
|
||||
stats := globalReplicationStats.Load().getLatestReplicationStats(bucket)
|
||||
bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket)
|
||||
bwMap := bwRpt.BucketStats
|
||||
for arn, st := range stats.ReplicationStats.Stats {
|
||||
@@ -422,7 +422,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalReplicationPool.resyncer.start(ctx, objectAPI, resyncOpts{
|
||||
if err := globalReplicationPool.Get().resyncer.start(ctx, objectAPI, resyncOpts{
|
||||
bucket: bucket,
|
||||
arn: arn,
|
||||
resyncID: resetID,
|
||||
@@ -559,7 +559,7 @@ func (api objectAPIHandlers) ValidateBucketReplicationCredsHandler(w http.Respon
|
||||
lockEnabled = lcfg.Enabled()
|
||||
}
|
||||
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true)
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, &validateReplicationDestinationOptions{CheckRemoteBucket: true})
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
|
||||
@@ -119,7 +119,7 @@ func (a *ActiveWorkerStat) update() {
|
||||
if a == nil {
|
||||
return
|
||||
}
|
||||
a.Curr = globalReplicationPool.ActiveWorkers()
|
||||
a.Curr = globalReplicationPool.Get().ActiveWorkers()
|
||||
a.hist.Update(int64(a.Curr))
|
||||
a.Avg = float32(a.hist.Mean())
|
||||
a.Max = int(a.hist.Max())
|
||||
|
||||
@@ -87,6 +87,9 @@ func (r *ReplicationStats) updateMovingAvg() {
|
||||
|
||||
// ActiveWorkers returns worker stats
|
||||
func (r *ReplicationStats) ActiveWorkers() ActiveWorkerStat {
|
||||
if r == nil {
|
||||
return ActiveWorkerStat{}
|
||||
}
|
||||
r.wlock.RLock()
|
||||
defer r.wlock.RUnlock()
|
||||
w := r.workers.get()
|
||||
@@ -109,7 +112,6 @@ func (r *ReplicationStats) collectWorkerMetrics(ctx context.Context) {
|
||||
r.wlock.Lock()
|
||||
r.workers.update()
|
||||
r.wlock.Unlock()
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -351,6 +353,9 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio
|
||||
}
|
||||
|
||||
func (r *ReplicationStats) getAllLatest(bucketsUsage map[string]BucketUsageInfo) (bucketsReplicationStats map[string]BucketStats) {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
peerBucketStatsList := globalNotificationSys.GetClusterAllBucketStats(GlobalContext)
|
||||
bucketsReplicationStats = make(map[string]BucketStats, len(bucketsUsage))
|
||||
|
||||
@@ -460,6 +465,9 @@ func (r *ReplicationStats) calculateBucketReplicationStats(bucket string, bucket
|
||||
|
||||
// get the most current of in-memory replication stats and data usage info from crawler.
|
||||
func (r *ReplicationStats) getLatestReplicationStats(bucket string) (s BucketStats) {
|
||||
if r == nil {
|
||||
return s
|
||||
}
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
||||
return r.calculateBucketReplicationStats(bucket, bucketStats)
|
||||
}
|
||||
@@ -495,9 +503,14 @@ func (r *ReplicationStats) decQ(bucket string, sz int64, isDelMarker bool, opTyp
|
||||
|
||||
// incProxy increments proxy metrics for proxied calls
|
||||
func (r *ReplicationStats) incProxy(bucket string, api replProxyAPI, isErr bool) {
|
||||
r.pCache.inc(bucket, api, isErr)
|
||||
if r != nil {
|
||||
r.pCache.inc(bucket, api, isErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ReplicationStats) getProxyStats(bucket string) ProxyMetric {
|
||||
if r == nil {
|
||||
return ProxyMetric{}
|
||||
}
|
||||
return r.pCache.getBucketStats(bucket)
|
||||
}
|
||||
|
||||
@@ -125,16 +125,16 @@ func (ri replicatedInfos) VersionPurgeStatus() VersionPurgeStatusType {
|
||||
completed := 0
|
||||
for _, v := range ri.Targets {
|
||||
switch v.VersionPurgeStatus {
|
||||
case Failed:
|
||||
return Failed
|
||||
case Complete:
|
||||
case replication.VersionPurgeFailed:
|
||||
return replication.VersionPurgeFailed
|
||||
case replication.VersionPurgeComplete:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(ri.Targets) {
|
||||
return Complete
|
||||
return replication.VersionPurgeComplete
|
||||
}
|
||||
return Pending
|
||||
return replication.VersionPurgePending
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) VersionPurgeStatusInternal() string {
|
||||
@@ -380,7 +380,7 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT
|
||||
// CompositeVersionPurgeStatus returns overall replication purge status for the permanent delete being replicated.
|
||||
func (rs *ReplicationState) CompositeVersionPurgeStatus() VersionPurgeStatusType {
|
||||
switch VersionPurgeStatusType(rs.VersionPurgeStatusInternal) {
|
||||
case Pending, Complete, Failed: // for backward compatibility
|
||||
case replication.VersionPurgePending, replication.VersionPurgeComplete, replication.VersionPurgeFailed: // for backward compatibility
|
||||
return VersionPurgeStatusType(rs.VersionPurgeStatusInternal)
|
||||
default:
|
||||
return getCompositeVersionPurgeStatus(rs.PurgeTargets)
|
||||
@@ -478,16 +478,16 @@ func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) Version
|
||||
completed := 0
|
||||
for _, v := range m {
|
||||
switch v {
|
||||
case Failed:
|
||||
return Failed
|
||||
case Complete:
|
||||
case replication.VersionPurgeFailed:
|
||||
return replication.VersionPurgeFailed
|
||||
case replication.VersionPurgeComplete:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(m) {
|
||||
return Complete
|
||||
return replication.VersionPurgeComplete
|
||||
}
|
||||
return Pending
|
||||
return replication.VersionPurgePending
|
||||
}
|
||||
|
||||
// getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo
|
||||
@@ -635,28 +635,7 @@ type ResyncTarget struct {
|
||||
}
|
||||
|
||||
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
|
||||
type VersionPurgeStatusType string
|
||||
|
||||
const (
|
||||
// Pending - versioned delete replication is pending.
|
||||
Pending VersionPurgeStatusType = "PENDING"
|
||||
|
||||
// Complete - versioned delete replication is now complete, erase version on disk.
|
||||
Complete VersionPurgeStatusType = "COMPLETE"
|
||||
|
||||
// Failed - versioned delete replication failed.
|
||||
Failed VersionPurgeStatusType = "FAILED"
|
||||
)
|
||||
|
||||
// Empty returns true if purge status was not set.
|
||||
func (v VersionPurgeStatusType) Empty() bool {
|
||||
return string(v) == ""
|
||||
}
|
||||
|
||||
// Pending returns true if the version is pending purge.
|
||||
func (v VersionPurgeStatusType) Pending() bool {
|
||||
return v == Pending || v == Failed
|
||||
}
|
||||
type VersionPurgeStatusType = replication.VersionPurgeStatusType
|
||||
|
||||
type replicationResyncer struct {
|
||||
// map of bucket to their resync status
|
||||
|
||||
@@ -915,33 +915,29 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "PurgeTargets")
|
||||
return
|
||||
}
|
||||
{
|
||||
var zb0004 string
|
||||
zb0004, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
}
|
||||
za0004 = VersionPurgeStatusType(zb0004)
|
||||
err = za0004.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
}
|
||||
z.PurgeTargets[za0003] = za0004
|
||||
}
|
||||
case "ResetStatusesMap":
|
||||
var zb0005 uint32
|
||||
zb0005, err = dc.ReadMapHeader()
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResetStatusesMap")
|
||||
return
|
||||
}
|
||||
if z.ResetStatusesMap == nil {
|
||||
z.ResetStatusesMap = make(map[string]string, zb0005)
|
||||
z.ResetStatusesMap = make(map[string]string, zb0004)
|
||||
} else if len(z.ResetStatusesMap) > 0 {
|
||||
for key := range z.ResetStatusesMap {
|
||||
delete(z.ResetStatusesMap, key)
|
||||
}
|
||||
}
|
||||
for zb0005 > 0 {
|
||||
zb0005--
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
var za0005 string
|
||||
var za0006 string
|
||||
za0005, err = dc.ReadString()
|
||||
@@ -1078,7 +1074,7 @@ func (z *ReplicationState) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "PurgeTargets")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(string(za0004))
|
||||
err = za0004.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
@@ -1154,7 +1150,11 @@ func (z *ReplicationState) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.PurgeTargets)))
|
||||
for za0003, za0004 := range z.PurgeTargets {
|
||||
o = msgp.AppendString(o, za0003)
|
||||
o = msgp.AppendString(o, string(za0004))
|
||||
o, err = za0004.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "ResetStatusesMap"
|
||||
o = append(o, 0xb0, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x4d, 0x61, 0x70)
|
||||
@@ -1279,35 +1279,31 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "PurgeTargets")
|
||||
return
|
||||
}
|
||||
{
|
||||
var zb0004 string
|
||||
zb0004, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
}
|
||||
za0004 = VersionPurgeStatusType(zb0004)
|
||||
bts, err = za0004.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PurgeTargets", za0003)
|
||||
return
|
||||
}
|
||||
z.PurgeTargets[za0003] = za0004
|
||||
}
|
||||
case "ResetStatusesMap":
|
||||
var zb0005 uint32
|
||||
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResetStatusesMap")
|
||||
return
|
||||
}
|
||||
if z.ResetStatusesMap == nil {
|
||||
z.ResetStatusesMap = make(map[string]string, zb0005)
|
||||
z.ResetStatusesMap = make(map[string]string, zb0004)
|
||||
} else if len(z.ResetStatusesMap) > 0 {
|
||||
for key := range z.ResetStatusesMap {
|
||||
delete(z.ResetStatusesMap, key)
|
||||
}
|
||||
}
|
||||
for zb0005 > 0 {
|
||||
for zb0004 > 0 {
|
||||
var za0005 string
|
||||
var za0006 string
|
||||
zb0005--
|
||||
zb0004--
|
||||
za0005, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResetStatusesMap")
|
||||
@@ -1345,7 +1341,7 @@ func (z *ReplicationState) Msgsize() (s int) {
|
||||
if z.PurgeTargets != nil {
|
||||
for za0003, za0004 := range z.PurgeTargets {
|
||||
_ = za0004
|
||||
s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(string(za0004))
|
||||
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
|
||||
}
|
||||
}
|
||||
s += 17 + msgp.MapHeaderSize
|
||||
@@ -2507,55 +2503,3 @@ func (z *TargetReplicationResyncStatus) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.TimeSize + 4 + msgp.TimeSize + 3 + msgp.StringPrefixSize + len(z.ResyncID) + 4 + msgp.TimeSize + 4 + msgp.IntSize + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len(z.Bucket) + 4 + msgp.StringPrefixSize + len(z.Object)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *VersionPurgeStatusType) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
{
|
||||
var zb0001 string
|
||||
zb0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = VersionPurgeStatusType(zb0001)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z VersionPurgeStatusType) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteString(string(z))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z VersionPurgeStatusType) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendString(o, string(z))
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *VersionPurgeStatusType) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
{
|
||||
var zb0001 string
|
||||
zb0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = VersionPurgeStatusType(zb0001)
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z VersionPurgeStatusType) Msgsize() (s int) {
|
||||
s = msgp.StringPrefixSize + len(string(z))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
@@ -184,7 +183,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
dsc, err := parseReplicateDecision(context.Background(), "bucket", test.expDsc.String())
|
||||
dsc, err := parseReplicateDecision(t.Context(), "bucket", test.expDsc.String())
|
||||
if err != nil {
|
||||
if test.expErr != err {
|
||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||
|
||||
@@ -49,11 +49,11 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/once"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/zeebo/xxh3"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -91,7 +91,10 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati
|
||||
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
// It also returns true if replication destination is same as this server.
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, checkRemote bool) (bool, APIError) {
|
||||
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, opts *validateReplicationDestinationOptions) (bool, APIError) {
|
||||
if opts == nil {
|
||||
opts = &validateReplicationDestinationOptions{}
|
||||
}
|
||||
var arns []string
|
||||
if rCfg.RoleArn != "" {
|
||||
arns = append(arns, rCfg.RoleArn)
|
||||
@@ -113,7 +116,7 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
if clnt == nil {
|
||||
return sameTarget, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
|
||||
}
|
||||
if checkRemote { // validate remote bucket
|
||||
if opts.CheckRemoteBucket { // validate remote bucket
|
||||
found, err := clnt.BucketExists(ctx, arn.Bucket)
|
||||
if err != nil {
|
||||
return sameTarget, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, err)
|
||||
@@ -133,23 +136,29 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
|
||||
}
|
||||
}
|
||||
}
|
||||
// if checked bucket, then check the ready is unnecessary
|
||||
if !opts.CheckRemoteBucket && opts.CheckReady {
|
||||
endpoint := clnt.EndpointURL().String()
|
||||
if errInt, ok := opts.checkReadyErr.Load(endpoint); !ok {
|
||||
err = checkRemoteEndpoint(ctx, clnt.EndpointURL())
|
||||
opts.checkReadyErr.Store(endpoint, err)
|
||||
} else {
|
||||
if errInt == nil {
|
||||
err = nil
|
||||
} else {
|
||||
err, _ = errInt.(error)
|
||||
}
|
||||
}
|
||||
switch err.(type) {
|
||||
case BucketRemoteIdenticalToSource:
|
||||
return true, errorCodes.ToAPIErrWithErr(ErrBucketRemoteIdenticalToSource, fmt.Errorf("remote target endpoint %s is self referential", clnt.EndpointURL().String()))
|
||||
default:
|
||||
}
|
||||
}
|
||||
// validate replication ARN against target endpoint
|
||||
c := globalBucketTargetSys.GetRemoteTargetClient(bucket, arnStr)
|
||||
if c != nil {
|
||||
if err := checkRemoteEndpoint(ctx, c.EndpointURL()); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteIdenticalToSource:
|
||||
return true, errorCodes.ToAPIErrWithErr(ErrBucketRemoteIdenticalToSource, fmt.Errorf("remote target endpoint %s is self referential", c.EndpointURL().String()))
|
||||
default:
|
||||
}
|
||||
}
|
||||
if c.EndpointURL().String() == clnt.EndpointURL().String() {
|
||||
selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
if !sameTarget {
|
||||
sameTarget = selfTarget
|
||||
}
|
||||
continue
|
||||
}
|
||||
selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
if !sameTarget {
|
||||
sameTarget = selfTarget
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,7 +390,7 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
if !oi.VersionPurgeStatus.Empty() {
|
||||
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
|
||||
replicate = oi.VersionPurgeStatus == replication.VersionPurgePending || oi.VersionPurgeStatus == replication.VersionPurgeFailed
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
}
|
||||
continue
|
||||
@@ -469,7 +478,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
globalReplicationPool.queueMRFSave(dobj.ToMRFEntry())
|
||||
globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry())
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
@@ -539,7 +548,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
// to decrement pending count later.
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
|
||||
globalReplicationStats.Update(dobj.Bucket, rinfo, replicationStatus,
|
||||
globalReplicationStats.Load().Update(dobj.Bucket, rinfo, replicationStatus,
|
||||
prevStatus)
|
||||
}
|
||||
}
|
||||
@@ -547,7 +556,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
eventName := event.ObjectReplicationComplete
|
||||
if replicationStatus == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
globalReplicationPool.queueMRFSave(dobj.ToMRFEntry())
|
||||
globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry())
|
||||
}
|
||||
drs := getReplicationState(rinfos, dobj.ReplicationState, dobj.VersionID)
|
||||
if replicationStatus != prevStatus {
|
||||
@@ -609,7 +618,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
|
||||
return
|
||||
}
|
||||
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete {
|
||||
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete {
|
||||
return
|
||||
}
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
@@ -629,7 +638,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
if dobj.VersionID == "" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
} else {
|
||||
rinfo.VersionPurgeStatus = Failed
|
||||
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -653,7 +662,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
case isErrObjectNotFound(serr), isErrVersionNotFound(serr):
|
||||
// version being purged is already not found on target.
|
||||
if !rinfo.VersionPurgeStatus.Empty() {
|
||||
rinfo.VersionPurgeStatus = Complete
|
||||
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
|
||||
return
|
||||
}
|
||||
case isErrReadQuorum(serr), isErrWriteQuorum(serr):
|
||||
@@ -686,7 +695,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
if dobj.VersionID == "" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
} else {
|
||||
rinfo.VersionPurgeStatus = Failed
|
||||
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
|
||||
}
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
|
||||
if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
@@ -696,7 +705,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
if dobj.VersionID == "" {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
} else {
|
||||
rinfo.VersionPurgeStatus = Complete
|
||||
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -765,55 +774,44 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) map[string]string {
|
||||
meta := make(map[string]string)
|
||||
cs := oi.decryptChecksums(partNum, h)
|
||||
for k, v := range cs {
|
||||
cksum := hash.NewChecksumString(k, v)
|
||||
if cksum == nil {
|
||||
continue
|
||||
}
|
||||
if cksum.Valid() {
|
||||
meta[cksum.Type.Key()] = v
|
||||
}
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, partNum int) (putOpts minio.PutObjectOptions, err error) {
|
||||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) {
|
||||
meta := make(map[string]string)
|
||||
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
|
||||
|
||||
for k, v := range objInfo.UserDefined {
|
||||
_, isValidSSEHeader := validSSEReplicationHeaders[k]
|
||||
// In case of SSE-C objects copy the allowed internal headers as well
|
||||
if !isSSEC || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
if !isSSEC || !isValidSSEHeader {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
if strings.EqualFold(k, ReservedMetadataPrefixLower+"crc") {
|
||||
for k, v := range getCRCMeta(objInfo, partNum, nil) {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
if isValidSSEHeader {
|
||||
meta[validSSEReplicationHeaders[k]] = v
|
||||
} else {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
isMP = objInfo.isMultipart()
|
||||
if len(objInfo.Checksum) > 0 {
|
||||
// Add encrypted CRC to metadata for SSE-C objects.
|
||||
if isSSEC {
|
||||
meta[ReplicationSsecChecksumHeader] = base64.StdEncoding.EncodeToString(objInfo.Checksum)
|
||||
} else {
|
||||
for k, v := range getCRCMeta(objInfo, 0, nil) {
|
||||
cs, mp := getCRCMeta(objInfo, 0, nil)
|
||||
// Set object checksum.
|
||||
for k, v := range cs {
|
||||
meta[k] = v
|
||||
}
|
||||
isMP = mp
|
||||
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
|
||||
// For objects where checksum is full object, it will be the same.
|
||||
// Therefore, we use the cheaper PutObject replication.
|
||||
isMP = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -843,7 +841,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
|
||||
if tagTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok {
|
||||
tagTimestamp, err = time.Parse(time.RFC3339Nano, tagTmstampStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
return putOpts, false, err
|
||||
}
|
||||
}
|
||||
putOpts.Internal.TaggingTimestamp = tagTimestamp
|
||||
@@ -867,7 +865,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := amztime.ISO8601Parse(retainDateStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
return putOpts, false, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
// set retention timestamp in opts
|
||||
@@ -875,7 +873,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
|
||||
if retainTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok {
|
||||
retTimestamp, err = time.Parse(time.RFC3339Nano, retainTmstampStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
return putOpts, false, err
|
||||
}
|
||||
}
|
||||
putOpts.Internal.RetentionTimestamp = retTimestamp
|
||||
@@ -887,7 +885,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
|
||||
if lholdTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
|
||||
lholdTimestamp, err = time.Parse(time.RFC3339Nano, lholdTmstampStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
return putOpts, false, err
|
||||
}
|
||||
}
|
||||
putOpts.Internal.LegalholdTimestamp = lholdTimestamp
|
||||
@@ -895,6 +893,24 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
|
||||
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
|
||||
putOpts.ServerSideEncryption = encrypt.NewSSE()
|
||||
}
|
||||
|
||||
if crypto.S3KMS.IsEncrypted(objInfo.UserDefined) {
|
||||
// If KMS key ID replication is enabled (as by default)
|
||||
// we include the object's KMS key ID. In any case, we
|
||||
// always set the SSE-KMS header. If no KMS key ID is
|
||||
// specified, MinIO is supposed to use whatever default
|
||||
// config applies on the site or bucket.
|
||||
var keyID string
|
||||
if kms.ReplicateKeyID() {
|
||||
keyID = objInfo.KMSKeyID()
|
||||
}
|
||||
|
||||
sseEnc, err := encrypt.NewSSEKMS(keyID, nil)
|
||||
if err != nil {
|
||||
return putOpts, false, err
|
||||
}
|
||||
putOpts.ServerSideEncryption = sseEnc
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -952,7 +968,11 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
||||
}
|
||||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2.UserTags, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
|
||||
oi2Map := make(map[string]string)
|
||||
for k, v := range oi2.UserTags {
|
||||
oi2Map[k] = v
|
||||
}
|
||||
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
@@ -1061,7 +1081,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
|
||||
globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry())
|
||||
return
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
@@ -1146,7 +1166,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
|
||||
rinfo.OpType = opType // update optype to reflect correct operation.
|
||||
globalReplicationStats.Update(bucket, rinfo, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus)
|
||||
globalReplicationStats.Load().Update(bucket, rinfo, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1166,7 +1186,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
ri.EventType = ReplicateMRF
|
||||
ri.ReplicationStatusInternal = rinfos.ReplicationStatusInternal()
|
||||
ri.RetryCount++
|
||||
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
|
||||
globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1280,7 +1300,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
// use core client to avoid doing multipart on PUT
|
||||
c := &minio.Core{Client: tgt.Client}
|
||||
|
||||
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
|
||||
putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
if err != nil {
|
||||
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
|
||||
sendEvent(eventArgs{
|
||||
@@ -1306,13 +1326,13 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
HeaderSize: headerSize,
|
||||
}
|
||||
newCtx := ctx
|
||||
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
|
||||
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) && objInfo.Size < minLargeObjSize {
|
||||
var cancel context.CancelFunc
|
||||
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
||||
defer cancel()
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if isMP {
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
@@ -1441,13 +1461,14 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
rinfo.Duration = time.Since(startTime)
|
||||
}()
|
||||
|
||||
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, minio.StatObjectOptions{
|
||||
sOpts := minio.StatObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
Internal: minio.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
},
|
||||
})
|
||||
}
|
||||
sOpts.Set(xhttp.AmzTagDirective, "ACCESS")
|
||||
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, sOpts)
|
||||
if cerr == nil {
|
||||
rAction = getReplicationAction(objInfo, oi, ri.OpType)
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
@@ -1532,19 +1553,30 @@ applyAction:
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
}
|
||||
if tagTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok {
|
||||
// default timestamps to ModTime unless present in metadata
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if _, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
dstOpts.Internal.LegalholdTimestamp = objInfo.ModTime
|
||||
}
|
||||
if _, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
dstOpts.Internal.RetentionTimestamp = objInfo.ModTime
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
dstOpts.Internal.TaggingTimestamp = objInfo.ModTime
|
||||
}
|
||||
if tagTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + TaggingTimestamp); ok {
|
||||
ondiskTimestamp, err := time.Parse(time.RFC3339, tagTmStr)
|
||||
if err == nil {
|
||||
dstOpts.Internal.TaggingTimestamp = ondiskTimestamp
|
||||
}
|
||||
}
|
||||
if retTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok {
|
||||
if retTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockRetentionTimestamp); ok {
|
||||
ondiskTimestamp, err := time.Parse(time.RFC3339, retTmStr)
|
||||
if err == nil {
|
||||
dstOpts.Internal.RetentionTimestamp = ondiskTimestamp
|
||||
}
|
||||
}
|
||||
if lholdTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
|
||||
if lholdTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockLegalHoldTimestamp); ok {
|
||||
ondiskTimestamp, err := time.Parse(time.RFC3339, lholdTmStr)
|
||||
if err == nil {
|
||||
dstOpts.Internal.LegalholdTimestamp = ondiskTimestamp
|
||||
@@ -1555,8 +1587,7 @@ applyAction:
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
} else {
|
||||
var putOpts minio.PutObjectOptions
|
||||
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
|
||||
putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
if err != nil {
|
||||
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
|
||||
sendEvent(eventArgs{
|
||||
@@ -1581,13 +1612,13 @@ applyAction:
|
||||
HeaderSize: headerSize,
|
||||
}
|
||||
newCtx := ctx
|
||||
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
|
||||
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) && objInfo.Size < minLargeObjSize {
|
||||
var cancel context.CancelFunc
|
||||
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
||||
defer cancel()
|
||||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if isMP {
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
@@ -1648,7 +1679,6 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
|
||||
var (
|
||||
hr *hash.Reader
|
||||
pInfo minio.ObjectPart
|
||||
isSSEC = crypto.SSEC.IsEncrypted(objInfo.UserDefined)
|
||||
)
|
||||
|
||||
@@ -1666,8 +1696,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
cHeader := http.Header{}
|
||||
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
|
||||
if !isSSEC {
|
||||
crc := getCRCMeta(objInfo, partInfo.Number, nil) // No SSE-C keys here.
|
||||
for k, v := range crc {
|
||||
cs, _ := getCRCMeta(objInfo, partInfo.Number, nil)
|
||||
for k, v := range cs {
|
||||
cHeader.Add(k, v)
|
||||
}
|
||||
}
|
||||
@@ -1676,30 +1706,32 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
CustomHeader: cHeader,
|
||||
}
|
||||
|
||||
var size int64
|
||||
if isSSEC {
|
||||
objectSize += partInfo.Size
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, popts)
|
||||
size = partInfo.Size
|
||||
} else {
|
||||
objectSize += partInfo.ActualSize
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
|
||||
size = partInfo.ActualSize
|
||||
}
|
||||
objectSize += size
|
||||
pInfo, err := c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, size, popts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isSSEC && pInfo.Size != partInfo.ActualSize {
|
||||
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
|
||||
if pInfo.Size != size {
|
||||
return fmt.Errorf("ssec(%t): Part size mismatch: got %d, want %d", isSSEC, pInfo.Size, size)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, minio.CompletePart{
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
ChecksumCRC32: pInfo.ChecksumCRC32,
|
||||
ChecksumCRC32C: pInfo.ChecksumCRC32C,
|
||||
ChecksumSHA1: pInfo.ChecksumSHA1,
|
||||
ChecksumSHA256: pInfo.ChecksumSHA256,
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
ChecksumCRC32: pInfo.ChecksumCRC32,
|
||||
ChecksumCRC32C: pInfo.ChecksumCRC32C,
|
||||
ChecksumSHA1: pInfo.ChecksumSHA1,
|
||||
ChecksumSHA256: pInfo.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: pInfo.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
userMeta := map[string]string{
|
||||
validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"],
|
||||
xhttp.MinIOReplicationActualObjectSize: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"],
|
||||
}
|
||||
if isSSEC && objInfo.UserDefined[ReplicationSsecChecksumHeader] != "" {
|
||||
userMeta[ReplicationSsecChecksumHeader] = objInfo.UserDefined[ReplicationSsecChecksumHeader]
|
||||
@@ -1708,6 +1740,13 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
// really big value but its okay on heavily loaded systems. This is just tail end timeout.
|
||||
cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer ccancel()
|
||||
|
||||
if len(objInfo.Checksum) > 0 {
|
||||
cs, _ := getCRCMeta(objInfo, 0, nil)
|
||||
for k, v := range cs {
|
||||
userMeta[k] = strings.Split(v, "-")[0]
|
||||
}
|
||||
}
|
||||
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
|
||||
UserMetadata: userMeta,
|
||||
Internal: minio.AdvancedPutOptions{
|
||||
@@ -1795,23 +1834,27 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
globalReplicationPool *ReplicationPool
|
||||
globalReplicationStats *ReplicationStats
|
||||
globalReplicationPool = once.NewSingleton[ReplicationPool]()
|
||||
globalReplicationStats atomic.Pointer[ReplicationStats]
|
||||
)
|
||||
|
||||
// ReplicationPool describes replication pool
|
||||
type ReplicationPool struct {
|
||||
// atomic ops:
|
||||
activeWorkers int32
|
||||
activeLrgWorkers int32
|
||||
activeMRFWorkers int32
|
||||
|
||||
objLayer ObjectLayer
|
||||
ctx context.Context
|
||||
priority string
|
||||
maxWorkers int
|
||||
mu sync.RWMutex
|
||||
mrfMU sync.Mutex
|
||||
resyncer *replicationResyncer
|
||||
objLayer ObjectLayer
|
||||
ctx context.Context
|
||||
priority string
|
||||
maxWorkers int
|
||||
maxLWorkers int
|
||||
stats *ReplicationStats
|
||||
|
||||
mu sync.RWMutex
|
||||
mrfMU sync.Mutex
|
||||
resyncer *replicationResyncer
|
||||
|
||||
// workers:
|
||||
workers []chan ReplicationWorkerOperation
|
||||
@@ -1854,7 +1897,7 @@ const (
|
||||
)
|
||||
|
||||
// NewReplicationPool creates a pool of replication workers of specified size
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts) *ReplicationPool {
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts, stats *ReplicationStats) *ReplicationPool {
|
||||
var workers, failedWorkers int
|
||||
priority := "auto"
|
||||
maxWorkers := WorkerMaxLimit
|
||||
@@ -1882,9 +1925,13 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
|
||||
if maxWorkers > 0 && failedWorkers > maxWorkers {
|
||||
failedWorkers = maxWorkers
|
||||
}
|
||||
maxLWorkers := LargeWorkerCount
|
||||
if opts.MaxLWorkers > 0 {
|
||||
maxLWorkers = opts.MaxLWorkers
|
||||
}
|
||||
pool := &ReplicationPool{
|
||||
workers: make([]chan ReplicationWorkerOperation, 0, workers),
|
||||
lrgworkers: make([]chan ReplicationWorkerOperation, 0, LargeWorkerCount),
|
||||
lrgworkers: make([]chan ReplicationWorkerOperation, 0, maxLWorkers),
|
||||
mrfReplicaCh: make(chan ReplicationWorkerOperation, 100000),
|
||||
mrfWorkerKillCh: make(chan struct{}, failedWorkers),
|
||||
resyncer: newresyncer(),
|
||||
@@ -1892,11 +1939,13 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
|
||||
mrfStopCh: make(chan struct{}, 1),
|
||||
ctx: ctx,
|
||||
objLayer: o,
|
||||
stats: stats,
|
||||
priority: priority,
|
||||
maxWorkers: maxWorkers,
|
||||
maxLWorkers: maxLWorkers,
|
||||
}
|
||||
|
||||
pool.AddLargeWorkers()
|
||||
pool.ResizeLrgWorkers(maxLWorkers, 0)
|
||||
pool.ResizeWorkers(workers, 0)
|
||||
pool.ResizeFailedWorkers(failedWorkers)
|
||||
go pool.resyncer.PersistToDisk(ctx, o)
|
||||
@@ -1918,11 +1967,11 @@ func (p *ReplicationPool) AddMRFWorker() {
|
||||
}
|
||||
switch v := oi.(type) {
|
||||
case ReplicateObjectInfo:
|
||||
globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
atomic.AddInt32(&p.activeMRFWorkers, 1)
|
||||
replicateObject(p.ctx, v, p.objLayer)
|
||||
atomic.AddInt32(&p.activeMRFWorkers, -1)
|
||||
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
|
||||
default:
|
||||
bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
|
||||
@@ -1950,9 +1999,9 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, 1)
|
||||
}
|
||||
globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
replicateObject(p.ctx, v, p.objLayer)
|
||||
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, -1)
|
||||
}
|
||||
@@ -1960,10 +2009,10 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, 1)
|
||||
}
|
||||
globalReplicationStats.incQ(v.Bucket, 0, true, v.OpType)
|
||||
p.stats.incQ(v.Bucket, 0, true, v.OpType)
|
||||
|
||||
replicateDelete(p.ctx, v, p.objLayer)
|
||||
globalReplicationStats.decQ(v.Bucket, 0, true, v.OpType)
|
||||
p.stats.decQ(v.Bucket, 0, true, v.OpType)
|
||||
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, -1)
|
||||
@@ -1975,23 +2024,8 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
|
||||
}
|
||||
}
|
||||
|
||||
// AddLargeWorkers adds a static number of workers to handle large uploads
|
||||
func (p *ReplicationPool) AddLargeWorkers() {
|
||||
for i := 0; i < LargeWorkerCount; i++ {
|
||||
p.lrgworkers = append(p.lrgworkers, make(chan ReplicationWorkerOperation, 100000))
|
||||
i := i
|
||||
go p.AddLargeWorker(p.lrgworkers[i])
|
||||
}
|
||||
go func() {
|
||||
<-p.ctx.Done()
|
||||
for i := 0; i < LargeWorkerCount; i++ {
|
||||
xioutil.SafeClose(p.lrgworkers[i])
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddLargeWorker adds a replication worker to the static pool for large uploads.
|
||||
func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation) {
|
||||
func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation, opTracker *int32) {
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
@@ -2002,11 +2036,23 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
|
||||
}
|
||||
switch v := oi.(type) {
|
||||
case ReplicateObjectInfo:
|
||||
globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, 1)
|
||||
}
|
||||
p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
replicateObject(p.ctx, v, p.objLayer)
|
||||
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, -1)
|
||||
}
|
||||
case DeletedObjectReplicationInfo:
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, 1)
|
||||
}
|
||||
replicateDelete(p.ctx, v, p.objLayer)
|
||||
if opTracker != nil {
|
||||
atomic.AddInt32(opTracker, -1)
|
||||
}
|
||||
default:
|
||||
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
|
||||
}
|
||||
@@ -2014,6 +2060,30 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
|
||||
}
|
||||
}
|
||||
|
||||
// ResizeLrgWorkers sets replication workers pool for large transfers(>=128MiB) to new size.
|
||||
// checkOld can be set to an expected value.
|
||||
// If the worker count changed
|
||||
func (p *ReplicationPool) ResizeLrgWorkers(n, checkOld int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if (checkOld > 0 && len(p.lrgworkers) != checkOld) || n == len(p.lrgworkers) || n < 1 {
|
||||
// Either already satisfied or worker count changed while we waited for the lock.
|
||||
return
|
||||
}
|
||||
for len(p.lrgworkers) < n {
|
||||
input := make(chan ReplicationWorkerOperation, 100000)
|
||||
p.lrgworkers = append(p.lrgworkers, input)
|
||||
|
||||
go p.AddLargeWorker(input, &p.activeLrgWorkers)
|
||||
}
|
||||
for len(p.lrgworkers) > n {
|
||||
worker := p.lrgworkers[len(p.lrgworkers)-1]
|
||||
p.lrgworkers = p.lrgworkers[:len(p.lrgworkers)-1]
|
||||
xioutil.SafeClose(worker)
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveWorkers returns the number of active workers handling replication traffic.
|
||||
func (p *ReplicationPool) ActiveWorkers() int {
|
||||
return int(atomic.LoadInt32(&p.activeWorkers))
|
||||
@@ -2024,6 +2094,11 @@ func (p *ReplicationPool) ActiveMRFWorkers() int {
|
||||
return int(atomic.LoadInt32(&p.activeMRFWorkers))
|
||||
}
|
||||
|
||||
// ActiveLrgWorkers returns the number of active workers handling traffic > 128MiB object size.
|
||||
func (p *ReplicationPool) ActiveLrgWorkers() int {
|
||||
return int(atomic.LoadInt32(&p.activeLrgWorkers))
|
||||
}
|
||||
|
||||
// ResizeWorkers sets replication workers pool to new size.
|
||||
// checkOld can be set to an expected value.
|
||||
// If the worker count changed
|
||||
@@ -2049,7 +2124,7 @@ func (p *ReplicationPool) ResizeWorkers(n, checkOld int) {
|
||||
}
|
||||
|
||||
// ResizeWorkerPriority sets replication failed workers pool size
|
||||
func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) {
|
||||
func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers, maxLWorkers int) {
|
||||
var workers, mrfWorkers int
|
||||
p.mu.Lock()
|
||||
switch pri {
|
||||
@@ -2076,11 +2151,15 @@ func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) {
|
||||
if maxWorkers > 0 && mrfWorkers > maxWorkers {
|
||||
mrfWorkers = maxWorkers
|
||||
}
|
||||
if maxLWorkers <= 0 {
|
||||
maxLWorkers = LargeWorkerCount
|
||||
}
|
||||
p.priority = pri
|
||||
p.maxWorkers = maxWorkers
|
||||
p.mu.Unlock()
|
||||
p.ResizeWorkers(workers, 0)
|
||||
p.ResizeFailedWorkers(mrfWorkers)
|
||||
p.ResizeLrgWorkers(maxLWorkers, 0)
|
||||
}
|
||||
|
||||
// ResizeFailedWorkers sets replication failed workers pool size
|
||||
@@ -2124,9 +2203,18 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
||||
h := xxh3.HashString(ri.Bucket + ri.Name)
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
case p.lrgworkers[h%LargeWorkerCount] <- ri:
|
||||
case p.lrgworkers[h%uint64(len(p.lrgworkers))] <- ri:
|
||||
default:
|
||||
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
|
||||
p.queueMRFSave(ri.ToMRFEntry())
|
||||
p.mu.RLock()
|
||||
maxLWorkers := p.maxLWorkers
|
||||
existing := len(p.lrgworkers)
|
||||
p.mu.RUnlock()
|
||||
maxLWorkers = min(maxLWorkers, LargeWorkerCount)
|
||||
if p.ActiveLrgWorkers() < maxLWorkers {
|
||||
workers := min(existing+1, maxLWorkers)
|
||||
p.ResizeLrgWorkers(workers, existing)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -2148,7 +2236,7 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
||||
case healCh <- ri:
|
||||
case ch <- ri:
|
||||
default:
|
||||
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
|
||||
globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry())
|
||||
p.mu.RLock()
|
||||
prio := p.priority
|
||||
maxWorkers := p.maxWorkers
|
||||
@@ -2184,7 +2272,7 @@ func queueReplicateDeletesWrapper(doi DeletedObjectReplicationInfo, existingObje
|
||||
doi.ResetID = v.ResetID
|
||||
doi.TargetArn = k
|
||||
|
||||
globalReplicationPool.queueReplicaDeleteTask(doi)
|
||||
globalReplicationPool.Get().queueReplicaDeleteTask(doi)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2205,7 +2293,7 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
||||
case <-p.ctx.Done():
|
||||
case ch <- doi:
|
||||
default:
|
||||
globalReplicationPool.queueMRFSave(doi.ToMRFEntry())
|
||||
p.queueMRFSave(doi.ToMRFEntry())
|
||||
p.mu.RLock()
|
||||
prio := p.priority
|
||||
maxWorkers := p.maxWorkers
|
||||
@@ -2229,14 +2317,16 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
||||
}
|
||||
|
||||
type replicationPoolOpts struct {
|
||||
Priority string
|
||||
MaxWorkers int
|
||||
Priority string
|
||||
MaxWorkers int
|
||||
MaxLWorkers int
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationOpts())
|
||||
globalReplicationStats = NewReplicationStats(ctx, objectAPI)
|
||||
go globalReplicationStats.trackEWMA()
|
||||
stats := NewReplicationStats(ctx, objectAPI)
|
||||
globalReplicationPool.Set(NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationOpts(), stats))
|
||||
globalReplicationStats.Store(stats)
|
||||
go stats.trackEWMA()
|
||||
}
|
||||
|
||||
type proxyResult struct {
|
||||
@@ -2442,7 +2532,7 @@ func scheduleReplication(ctx context.Context, oi ObjectInfo, o ObjectLayer, dsc
|
||||
if dsc.Synchronous() {
|
||||
replicateObject(ctx, ri, o)
|
||||
} else {
|
||||
globalReplicationPool.queueReplicaTask(ri)
|
||||
globalReplicationPool.Get().queueReplicaTask(ri)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2570,9 +2660,9 @@ func proxyGetTaggingToRepTarget(ctx context.Context, bucket, object string, opts
|
||||
}
|
||||
|
||||
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectReplicationInfo, o ObjectLayer) {
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
globalReplicationPool.Get().queueReplicaDeleteTask(dv)
|
||||
for arn := range dv.ReplicationState.Targets {
|
||||
globalReplicationStats.Update(dv.Bucket, replicatedTargetInfo{Arn: arn, Size: 0, Duration: 0, OpType: replication.DeleteReplicationType}, replication.Pending, replication.StatusType(""))
|
||||
globalReplicationStats.Load().Update(dv.Bucket, replicatedTargetInfo{Arn: arn, Size: 0, Duration: 0, OpType: replication.DeleteReplicationType}, replication.Pending, replication.StatusType(""))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3002,9 +3092,9 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt
|
||||
if len(tgtArns) == 0 {
|
||||
return fmt.Errorf("arn %s specified for resync not found in replication config", opts.arn)
|
||||
}
|
||||
globalReplicationPool.resyncer.RLock()
|
||||
data, ok := globalReplicationPool.resyncer.statusMap[opts.bucket]
|
||||
globalReplicationPool.resyncer.RUnlock()
|
||||
globalReplicationPool.Get().resyncer.RLock()
|
||||
data, ok := globalReplicationPool.Get().resyncer.statusMap[opts.bucket]
|
||||
globalReplicationPool.Get().resyncer.RUnlock()
|
||||
if !ok {
|
||||
data, err = loadBucketResyncMetadata(ctx, opts.bucket, objAPI)
|
||||
if err != nil {
|
||||
@@ -3030,9 +3120,9 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt
|
||||
return err
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncer.Lock()
|
||||
defer globalReplicationPool.resyncer.Unlock()
|
||||
brs, ok := globalReplicationPool.resyncer.statusMap[opts.bucket]
|
||||
globalReplicationPool.Get().resyncer.Lock()
|
||||
defer globalReplicationPool.Get().resyncer.Unlock()
|
||||
brs, ok := globalReplicationPool.Get().resyncer.statusMap[opts.bucket]
|
||||
if !ok {
|
||||
brs = BucketReplicationResyncStatus{
|
||||
Version: resyncMetaVersion,
|
||||
@@ -3040,8 +3130,8 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt
|
||||
}
|
||||
}
|
||||
brs.TargetsMap[opts.arn] = status
|
||||
globalReplicationPool.resyncer.statusMap[opts.bucket] = brs
|
||||
go globalReplicationPool.resyncer.resyncBucket(GlobalContext, objAPI, false, opts)
|
||||
globalReplicationPool.Get().resyncer.statusMap[opts.bucket] = brs
|
||||
go globalReplicationPool.Get().resyncer.resyncBucket(GlobalContext, objAPI, false, opts)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3086,7 +3176,7 @@ func (p *ReplicationPool) deleteResyncMetadata(ctx context.Context, bucket strin
|
||||
}
|
||||
|
||||
// initResync - initializes bucket replication resync for all buckets.
|
||||
func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (p *ReplicationPool) initResync(ctx context.Context, buckets []string, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -3095,7 +3185,7 @@ func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []string, objAPI ObjectLayer) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// Run the replication resync in a loop
|
||||
for {
|
||||
@@ -3113,13 +3203,13 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []Buck
|
||||
}
|
||||
|
||||
// Loads bucket replication resync statuses into memory.
|
||||
func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
func (p *ReplicationPool) loadResync(ctx context.Context, buckets []string, objAPI ObjectLayer) error {
|
||||
// Make sure only one node running resync on the cluster.
|
||||
ctx, cancel := globalLeaderLock.GetLock(ctx)
|
||||
defer cancel()
|
||||
|
||||
for index := range buckets {
|
||||
bucket := buckets[index].Name
|
||||
bucket := buckets[index]
|
||||
|
||||
meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI)
|
||||
if err != nil {
|
||||
@@ -3273,7 +3363,7 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
|
||||
}
|
||||
for arn, st := range roi.TargetPurgeStatuses {
|
||||
if opts.ARN == "" || opts.ARN == arn {
|
||||
if !opts.Verbose && st == Complete {
|
||||
if !opts.Verbose && st == replication.VersionPurgeComplete {
|
||||
continue
|
||||
}
|
||||
t, ok := tgtsMap[arn]
|
||||
@@ -3372,8 +3462,8 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
|
||||
// heal delete marker replication failure or versioned delete replication failure
|
||||
if roi.ReplicationStatus == replication.Pending ||
|
||||
roi.ReplicationStatus == replication.Failed ||
|
||||
roi.VersionPurgeStatus == Failed || roi.VersionPurgeStatus == Pending {
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending {
|
||||
globalReplicationPool.Get().queueReplicaDeleteTask(dv)
|
||||
return
|
||||
}
|
||||
// if replication status is Complete on DeleteMarker and existing object resync required
|
||||
@@ -3389,12 +3479,12 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
|
||||
switch roi.ReplicationStatus {
|
||||
case replication.Pending, replication.Failed:
|
||||
roi.EventType = ReplicateHeal
|
||||
globalReplicationPool.queueReplicaTask(roi)
|
||||
globalReplicationPool.Get().queueReplicaTask(roi)
|
||||
return
|
||||
}
|
||||
if roi.ExistingObjResync.mustResync() {
|
||||
roi.EventType = ReplicateExisting
|
||||
globalReplicationPool.queueReplicaTask(roi)
|
||||
globalReplicationPool.Get().queueReplicaTask(roi)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -3459,8 +3549,8 @@ func (p *ReplicationPool) queueMRFSave(entry MRFReplicateEntry) {
|
||||
return
|
||||
}
|
||||
if entry.RetryCount > mrfRetryLimit { // let scanner catch up if retry count exceeded
|
||||
atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedCount, 1)
|
||||
atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedBytes, uint64(entry.sz))
|
||||
atomic.AddUint64(&p.stats.mrfStats.TotalDroppedCount, 1)
|
||||
atomic.AddUint64(&p.stats.mrfStats.TotalDroppedBytes, uint64(entry.sz))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3473,8 +3563,8 @@ func (p *ReplicationPool) queueMRFSave(entry MRFReplicateEntry) {
|
||||
select {
|
||||
case p.mrfSaveCh <- entry:
|
||||
default:
|
||||
atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedCount, 1)
|
||||
atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedBytes, uint64(entry.sz))
|
||||
atomic.AddUint64(&p.stats.mrfStats.TotalDroppedCount, 1)
|
||||
atomic.AddUint64(&p.stats.mrfStats.TotalDroppedBytes, uint64(entry.sz))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3505,7 +3595,7 @@ func (p *ReplicationPool) persistToDrive(ctx context.Context, v MRFReplicateEntr
|
||||
}
|
||||
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
for _, localDrive := range localDrives {
|
||||
@@ -3523,7 +3613,7 @@ func (p *ReplicationPool) saveMRFEntries(ctx context.Context, entries map[string
|
||||
if !p.initialized() {
|
||||
return
|
||||
}
|
||||
atomic.StoreUint64(&globalReplicationStats.mrfStats.LastFailedCount, uint64(len(entries)))
|
||||
atomic.StoreUint64(&p.stats.mrfStats.LastFailedCount, uint64(len(entries)))
|
||||
if len(entries) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -3572,7 +3662,7 @@ func (p *ReplicationPool) loadMRF() (mrfRec MRFReplicateEntries, err error) {
|
||||
}
|
||||
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
for _, localDrive := range localDrives {
|
||||
@@ -3660,7 +3750,7 @@ func (p *ReplicationPool) queueMRFHeal() error {
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) initialized() bool {
|
||||
return !(p == nil || p.objLayer == nil)
|
||||
return p != nil && p.objLayer != nil
|
||||
}
|
||||
|
||||
// getMRF returns MRF entries for this node.
|
||||
@@ -3693,3 +3783,29 @@ func (p *ReplicationPool) getMRF(ctx context.Context, bucket string) (ch <-chan
|
||||
|
||||
return mrfCh, nil
|
||||
}
|
||||
|
||||
// validateReplicationDestinationOptions is used to configure the validation of the replication destination.
|
||||
// validateReplicationDestination uses this to configure the validation.
|
||||
type validateReplicationDestinationOptions struct {
|
||||
CheckRemoteBucket bool
|
||||
CheckReady bool
|
||||
|
||||
checkReadyErr sync.Map
|
||||
}
|
||||
|
||||
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string, isMP bool) {
|
||||
meta := make(map[string]string)
|
||||
cs, isMP = oi.decryptChecksums(partNum, h)
|
||||
for k, v := range cs {
|
||||
cksum := hash.NewChecksumString(k, v)
|
||||
if cksum == nil {
|
||||
continue
|
||||
}
|
||||
if cksum.Valid() {
|
||||
meta[cksum.Type.Key()] = v
|
||||
meta[xhttp.AmzChecksumType] = cs[xhttp.AmzChecksumType]
|
||||
meta[xhttp.AmzChecksumAlgo] = cksum.Type.String()
|
||||
}
|
||||
}
|
||||
return meta, isMP
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
@@ -86,7 +85,7 @@ var replicationConfigTests = []struct {
|
||||
}
|
||||
|
||||
func TestReplicationResync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
for i, test := range replicationConfigTests {
|
||||
if sync := test.rcfg.Resync(ctx, test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync)
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
@@ -107,18 +108,18 @@ func (l ReplicationLastHour) merge(o ReplicationLastHour) (merged ReplicationLas
|
||||
|
||||
// Add a new duration data
|
||||
func (l *ReplicationLastHour) addsize(sz int64) {
|
||||
min := time.Now().Unix() / 60
|
||||
l.forwardTo(min)
|
||||
winIdx := min % 60
|
||||
l.Totals[winIdx].merge(AccElem{Total: min, Size: sz, N: 1})
|
||||
l.LastMin = min
|
||||
minutes := time.Now().Unix() / 60
|
||||
l.forwardTo(minutes)
|
||||
winIdx := minutes % 60
|
||||
l.Totals[winIdx].merge(AccElem{Total: minutes, Size: sz, N: 1})
|
||||
l.LastMin = minutes
|
||||
}
|
||||
|
||||
// Merge all recorded counts of last hour into one
|
||||
func (l *ReplicationLastHour) getTotal() AccElem {
|
||||
var res AccElem
|
||||
min := time.Now().Unix() / 60
|
||||
l.forwardTo(min)
|
||||
minutes := time.Now().Unix() / 60
|
||||
l.forwardTo(minutes)
|
||||
for _, elem := range l.Totals[:] {
|
||||
res.merge(elem)
|
||||
}
|
||||
@@ -127,8 +128,7 @@ func (l *ReplicationLastHour) getTotal() AccElem {
|
||||
|
||||
// forwardTo time t, clearing any entries in between.
|
||||
func (l *ReplicationLastHour) forwardTo(t int64) {
|
||||
tMin := t / 60
|
||||
if l.LastMin >= tMin {
|
||||
if l.LastMin >= t {
|
||||
return
|
||||
}
|
||||
if t-l.LastMin >= 60 {
|
||||
@@ -310,10 +310,18 @@ type ReplQNodeStats struct {
|
||||
func (r *ReplicationStats) getNodeQueueStats(bucket string) (qs ReplQNodeStats) {
|
||||
qs.NodeName = globalLocalNodeName
|
||||
qs.Uptime = UTCNow().Unix() - globalBootTime.Unix()
|
||||
qs.ActiveWorkers = globalReplicationStats.ActiveWorkers()
|
||||
grs := globalReplicationStats.Load()
|
||||
if grs != nil {
|
||||
qs.ActiveWorkers = grs.ActiveWorkers()
|
||||
} else {
|
||||
qs.ActiveWorkers = ActiveWorkerStat{}
|
||||
}
|
||||
qs.XferStats = make(map[RMetricName]XferStats)
|
||||
qs.QStats = r.qCache.getBucketStats(bucket)
|
||||
qs.TgtXferStats = make(map[string]map[RMetricName]XferStats)
|
||||
qs.MRFStats = ReplicationMRFStats{
|
||||
LastFailedCount: atomic.LoadUint64(&r.mrfStats.LastFailedCount),
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
@@ -399,10 +407,12 @@ func (r *ReplicationStats) getNodeQueueStats(bucket string) (qs ReplQNodeStats)
|
||||
func (r *ReplicationStats) getNodeQueueStatsSummary() (qs ReplQNodeStats) {
|
||||
qs.NodeName = globalLocalNodeName
|
||||
qs.Uptime = UTCNow().Unix() - globalBootTime.Unix()
|
||||
qs.ActiveWorkers = globalReplicationStats.ActiveWorkers()
|
||||
qs.ActiveWorkers = globalReplicationStats.Load().ActiveWorkers()
|
||||
qs.XferStats = make(map[RMetricName]XferStats)
|
||||
qs.QStats = r.qCache.getSiteStats()
|
||||
|
||||
qs.MRFStats = ReplicationMRFStats{
|
||||
LastFailedCount: atomic.LoadUint64(&r.mrfStats.LastFailedCount),
|
||||
}
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
tx := newXferStats()
|
||||
|
||||
@@ -612,7 +612,7 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
||||
func (sys *BucketTargetSys) set(bucket string, meta BucketMetadata) {
|
||||
cfg := meta.bucketTargetConfig
|
||||
if cfg == nil || cfg.Empty() {
|
||||
return
|
||||
@@ -626,9 +626,9 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient}
|
||||
sys.updateBandwidthLimit(bucket.Name, tgt.Arn, tgt.BandwidthLimit)
|
||||
sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit)
|
||||
}
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
sys.targetsMap[bucket] = cfg.Targets
|
||||
}
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/minio/console/api/operations"
|
||||
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -91,9 +92,6 @@ func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
globalBatchJobsMetrics = batchJobMetrics{metrics: make(map[string]*batchJobInfo)}
|
||||
go globalBatchJobsMetrics.purgeJobMetrics()
|
||||
|
||||
t, _ := minioVersionToReleaseTime(Version)
|
||||
if !t.IsZero() {
|
||||
globalVersionUnix = uint64(t.Unix())
|
||||
@@ -138,6 +136,9 @@ func minioConfigToConsoleFeatures() {
|
||||
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
|
||||
os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value)
|
||||
}
|
||||
if value := env.Get(config.EnvConsoleDebugLogLevel, ""); value != "" {
|
||||
os.Setenv("CONSOLE_DEBUG_LOGLEVEL", value)
|
||||
}
|
||||
// pass the console subpath configuration
|
||||
if globalBrowserRedirectURL != nil {
|
||||
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
|
||||
@@ -382,7 +383,7 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
||||
}
|
||||
|
||||
// Check "no-compat" flag from command line argument.
|
||||
ctxt.StrictS3Compat = !(ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat"))
|
||||
ctxt.StrictS3Compat = !ctx.IsSet("no-compat") && !ctx.GlobalIsSet("no-compat")
|
||||
|
||||
switch {
|
||||
case ctx.IsSet("config-dir"):
|
||||
@@ -433,7 +434,6 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
||||
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
|
||||
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
|
||||
if conf := ctx.String("config"); len(conf) > 0 {
|
||||
err = mergeServerCtxtFromConfigFile(conf, ctxt)
|
||||
@@ -462,25 +462,27 @@ func handleCommonArgs(ctxt serverCtxt) {
|
||||
certsDir := ctxt.CertsDir
|
||||
certsSet := ctxt.certsDirSet
|
||||
|
||||
if consoleAddr == "" {
|
||||
p, err := xnet.GetFreePort()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to get free port for Console UI on the host")
|
||||
if globalBrowserEnabled {
|
||||
if consoleAddr == "" {
|
||||
p, err := xnet.GetFreePort()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to get free port for Console UI on the host")
|
||||
}
|
||||
// hold the port
|
||||
l, err := net.Listen("TCP", fmt.Sprintf(":%s", p.String()))
|
||||
if err == nil {
|
||||
defer l.Close()
|
||||
}
|
||||
consoleAddr = net.JoinHostPort("", p.String())
|
||||
}
|
||||
// hold the port
|
||||
l, err := net.Listen("TCP", fmt.Sprintf(":%s", p.String()))
|
||||
if err == nil {
|
||||
defer l.Close()
|
||||
|
||||
if _, _, err := net.SplitHostPort(consoleAddr); err != nil {
|
||||
logger.FatalIf(err, "Unable to start listening on console port")
|
||||
}
|
||||
consoleAddr = net.JoinHostPort("", p.String())
|
||||
}
|
||||
|
||||
if _, _, err := net.SplitHostPort(consoleAddr); err != nil {
|
||||
logger.FatalIf(err, "Unable to start listening on console port")
|
||||
}
|
||||
|
||||
if consoleAddr == addr {
|
||||
logger.FatalIf(errors.New("--console-address cannot be same as --address"), "Unable to start the server")
|
||||
if consoleAddr == addr {
|
||||
logger.FatalIf(errors.New("--console-address cannot be same as --address"), "Unable to start the server")
|
||||
}
|
||||
}
|
||||
|
||||
globalMinioHost, globalMinioPort = mustSplitHostPort(addr)
|
||||
@@ -493,7 +495,9 @@ func handleCommonArgs(ctxt serverCtxt) {
|
||||
globalDynamicAPIPort = true
|
||||
}
|
||||
|
||||
globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr)
|
||||
if globalBrowserEnabled {
|
||||
globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr)
|
||||
}
|
||||
|
||||
if globalMinioPort == globalMinioConsolePort {
|
||||
logger.FatalIf(errors.New("--console-address port cannot be same as --address port"), "Unable to start the server")
|
||||
@@ -686,16 +690,6 @@ func loadEnvVarsFromFiles() {
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(kms.EnvKMSSecretKeyFile) {
|
||||
kmsSecret, err := readFromSecret(env.Get(kms.EnvKMSSecretKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to read the KMS secret key inherited from secret file")
|
||||
}
|
||||
if kmsSecret != "" {
|
||||
os.Setenv(kms.EnvKMSSecretKey, kmsSecret)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvConfigEnvFile) {
|
||||
ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, ""))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
@@ -707,12 +701,16 @@ func loadEnvVarsFromFiles() {
|
||||
}
|
||||
}
|
||||
|
||||
func serverHandleEnvVars() {
|
||||
func serverHandleEarlyEnvVars() {
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
func serverHandleEnvVars() {
|
||||
var err error
|
||||
if globalBrowserEnabled {
|
||||
if redirectURL := env.Get(config.EnvBrowserRedirectURL, ""); redirectURL != "" {
|
||||
u, err := xnet.ParseHTTPURL(redirectURL)
|
||||
@@ -720,9 +718,7 @@ func serverHandleEnvVars() {
|
||||
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value in environment variable")
|
||||
}
|
||||
// Look for if URL has invalid values and return error.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
u.Opaque == "" &&
|
||||
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
if !isValidURLEndpoint((*url.URL)(u)) {
|
||||
err := fmt.Errorf("URL contains unexpected resources, expected URL to be one of http(s)://console.example.com or as a subpath via API endpoint http(s)://minio.example.com/minio format: %v", u)
|
||||
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable")
|
||||
}
|
||||
@@ -737,9 +733,7 @@ func serverHandleEnvVars() {
|
||||
logger.Fatal(err, "Invalid MINIO_SERVER_URL value in environment variable")
|
||||
}
|
||||
// Look for if URL has invalid values and return error.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
|
||||
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
if !isValidURLEndpoint((*url.URL)(u)) {
|
||||
err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u)
|
||||
logger.Fatal(err, "Invalid MINIO_SERVER_URL value is environment variable")
|
||||
}
|
||||
@@ -769,7 +763,7 @@ func serverHandleEnvVars() {
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
if _, ok := dns2.IsDomainName(domainName); !ok {
|
||||
logger.Fatal(config.ErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName),
|
||||
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
globalDomainNames = append(globalDomainNames, domainName)
|
||||
@@ -778,7 +772,7 @@ func serverHandleEnvVars() {
|
||||
lcpSuf := lcpSuffix(globalDomainNames)
|
||||
for _, domainName := range globalDomainNames {
|
||||
if domainName == lcpSuf && len(globalDomainNames) > 1 {
|
||||
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames),
|
||||
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msgf("Overlapping domains `%s` not allowed", globalDomainNames),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
}
|
||||
@@ -835,45 +829,86 @@ func serverHandleEnvVars() {
|
||||
}
|
||||
}
|
||||
|
||||
globalDisableFreezeOnBoot = env.Get("_MINIO_DISABLE_API_FREEZE_ON_BOOT", "") == "true" || serverDebugLog
|
||||
globalEnableSyncBoot = env.Get("MINIO_SYNC_BOOT", config.EnableOff) == config.EnableOn
|
||||
}
|
||||
|
||||
func loadRootCredentials() {
|
||||
func loadRootCredentials() auth.Credentials {
|
||||
// At this point, either both environment variables
|
||||
// are defined or both are not defined.
|
||||
// Check both cases and authenticate them if correctly defined
|
||||
var user, password string
|
||||
var hasCredentials bool
|
||||
var legacyCredentials bool
|
||||
//nolint:gocritic
|
||||
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
user = env.Get(config.EnvRootUser, "")
|
||||
password = env.Get(config.EnvRootPassword, "")
|
||||
hasCredentials = true
|
||||
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
user = env.Get(config.EnvAccessKey, "")
|
||||
password = env.Get(config.EnvSecretKey, "")
|
||||
hasCredentials = true
|
||||
legacyCredentials = true
|
||||
} else if globalServerCtxt.RootUser != "" && globalServerCtxt.RootPwd != "" {
|
||||
user, password = globalServerCtxt.RootUser, globalServerCtxt.RootPwd
|
||||
hasCredentials = true
|
||||
}
|
||||
if hasCredentials {
|
||||
cred, err := auth.CreateCredentials(user, password)
|
||||
if err != nil {
|
||||
if user == "" || password == "" {
|
||||
return auth.Credentials{}
|
||||
}
|
||||
cred, err := auth.CreateCredentials(user, password)
|
||||
if err != nil {
|
||||
if legacyCredentials {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
} else {
|
||||
logger.Fatal(config.ErrInvalidRootUserCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
|
||||
" Please use %s and %s",
|
||||
config.EnvAccessKey, config.EnvSecretKey,
|
||||
config.EnvRootUser, config.EnvRootPassword)
|
||||
logger.Info(color.RedBold(msg))
|
||||
}
|
||||
globalActiveCred = cred
|
||||
globalCredViaEnv = true
|
||||
} else {
|
||||
globalActiveCred = auth.DefaultCredentials
|
||||
}
|
||||
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
|
||||
" Please use %s and %s",
|
||||
config.EnvAccessKey, config.EnvSecretKey,
|
||||
config.EnvRootUser, config.EnvRootPassword)
|
||||
logger.Info(color.RedBold(msg))
|
||||
}
|
||||
globalCredViaEnv = true
|
||||
return cred
|
||||
}
|
||||
|
||||
// autoGenerateRootCredentials generates root credentials deterministically if
|
||||
// a KMS is configured, no manual credentials have been specified and if root
|
||||
// access is disabled.
|
||||
func autoGenerateRootCredentials() auth.Credentials {
|
||||
if GlobalKMS == nil {
|
||||
return globalActiveCred
|
||||
}
|
||||
|
||||
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
|
||||
if IsErrIgnored(err, kes.ErrNotAllowed, kms.ErrNotSupported, errors.ErrUnsupported, kms.ErrPermission) {
|
||||
// If we don't have permission to compute the HMAC, don't change the cred.
|
||||
return globalActiveCred
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
|
||||
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
return auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -905,7 +940,7 @@ func handleKMSConfig() {
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
if !isFile(getPublicCertFile()) || !isFile(getPrivateKeyFile()) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ func Test_readFromSecret(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp("", "testfile")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -157,7 +157,7 @@ MINIO_ROOT_PASSWORD=minio123`,
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp("", "testfile")
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -18,23 +18,18 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config/browser"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/api"
|
||||
"github.com/minio/minio/internal/config/batch"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/callhome"
|
||||
"github.com/minio/minio/internal/config/compress"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
@@ -80,7 +75,6 @@ func initHelp() {
|
||||
config.CallhomeSubSys: callhome.DefaultKVS,
|
||||
config.DriveSubSys: drive.DefaultKVS,
|
||||
config.ILMSubSys: ilm.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.BatchSubSys: batch.DefaultKVS,
|
||||
config.BrowserSubSys: browser.DefaultKVS,
|
||||
}
|
||||
@@ -101,7 +95,7 @@ func initHelp() {
|
||||
config.HelpKV{
|
||||
Key: config.SubnetSubSys,
|
||||
Type: "string",
|
||||
Description: "register the cluster to MinIO SUBNET",
|
||||
Description: "register Enterprise license for the cluster",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
@@ -229,12 +223,6 @@ func initHelp() {
|
||||
Key: config.EtcdSubSys,
|
||||
Description: "persist IAM assets externally to etcd",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CacheSubSys,
|
||||
Type: "string",
|
||||
Description: "enable cache plugin on MinIO for GET/HEAD requests",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.BrowserSubSys,
|
||||
Description: "manage Browser HTTP specific features, such as Security headers, etc.",
|
||||
@@ -291,7 +279,6 @@ func initHelp() {
|
||||
config.SubnetSubSys: subnet.HelpSubnet,
|
||||
config.CallhomeSubSys: callhome.HelpCallhome,
|
||||
config.DriveSubSys: drive.HelpDrive,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.BrowserSubSys: browser.Help,
|
||||
config.ILMSubSys: ilm.Help,
|
||||
}
|
||||
@@ -409,10 +396,6 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
if _, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.PolicyOPASubSys:
|
||||
// In case legacy OPA config is being set, we treat it as if the
|
||||
// AuthZPlugin is being set.
|
||||
@@ -583,7 +566,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
|
||||
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
|
||||
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
|
||||
case config.CompressionSubSys:
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
@@ -710,13 +692,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
|
||||
}
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport)
|
||||
if err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
|
||||
} else {
|
||||
globalCacheConfig.Update(cacheCfg)
|
||||
}
|
||||
case config.BrowserSubSys:
|
||||
browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default])
|
||||
if err != nil {
|
||||
@@ -749,47 +724,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoGenerateRootCredentials generates root credentials deterministically if
|
||||
// a KMS is configured, no manual credentials have been specified and if root
|
||||
// access is disabled.
|
||||
func autoGenerateRootCredentials() {
|
||||
if GlobalKMS == nil {
|
||||
return
|
||||
}
|
||||
if globalAPIConfig.permitRootAccess() || !globalActiveCred.Equal(auth.DefaultCredentials) {
|
||||
return
|
||||
}
|
||||
|
||||
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
|
||||
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
|
||||
return // If we don't have permission to compute the HMAC, don't change the cred.
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
|
||||
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
globalActiveCred = auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
@@ -56,7 +56,7 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
if err := saveServerConfig(t.Context(), objLayer, globalServerConfig); err != nil {
|
||||
t.Fatalf("Unable to save updated config file %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,11 +25,11 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/logger/target/console"
|
||||
"github.com/minio/minio/internal/logger/target/types"
|
||||
"github.com/minio/minio/internal/pubsub"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
|
||||
@@ -104,6 +104,22 @@ func (p *scannerMetrics) log(s scannerMetric, paths ...string) func(custom map[s
|
||||
}
|
||||
}
|
||||
|
||||
// time n scanner actions.
|
||||
// Use for s < scannerMetricLastRealtime
|
||||
func (p *scannerMetrics) timeN(s scannerMetric) func(n int) func() {
|
||||
startTime := time.Now()
|
||||
return func(n int) func() {
|
||||
return func() {
|
||||
duration := time.Since(startTime)
|
||||
|
||||
atomic.AddUint64(&p.operations[s], uint64(n))
|
||||
if s < scannerMetricLastRealtime {
|
||||
p.latency[s].add(duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// time a scanner action.
|
||||
// Use for s < scannerMetricLastRealtime
|
||||
func (p *scannerMetrics) time(s scannerMetric) func() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user